code
stringlengths 1
199k
|
|---|
import os
import sys
import re
import subprocess
import json
VCC_PATH = 'C:/Program Files/Side Effects Software/Houdini 16.0.600/bin/vcc.exe'
SYN_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
COMP_PATH = os.path.join(SYN_PATH, 'VEX.sublime-completions')
FUNC_PATH = os.path.join(os.path.join(SYN_PATH, 'syntax_lists'), 'VexFunctions.txt')
def contexts(vcc_path=VCC_PATH):
"""Return a sorted list of all vex contexts."""
ctxs = subprocess.check_output([vcc_path, '-X'])
ctxs = ctxs.decode('ascii').split('\n')
return sorted([x for x in ctxs if x != '' and x != None])
def context_functions(context, vcc_path=VCC_PATH, as_set=False):
"""Return the sorted list of all function names for a vex context."""
ctx_info = subprocess.check_output([vcc_path, '-X', context])
ctx_info = ctx_info.decode('ascii')
funcs = set()
for f in re.findall('\w+\(', ctx_info):
if len(f) > 1:
funcs.add(f[:-1])
if as_set:
return funcs
else:
return sorted(list(funcs))
def context_function_signatures(context, vcc_path=VCC_PATH):
ctx_info = subprocess.check_output([vcc_path, '-X', context])
ctx_info = ctx_info.decode('ascii')
sigs = []
for s in re.findall('(\w+(\[\])?) (\w+)\((.*)\)', ctx_info):
sig_str = '%s %s(%s)' % (s[0], s[2], s[3])
if s[3] == 'void':
hint_str = ''
else:
hint_str = '%s\n(%s)' % (s[0], s[3].rstrip().lstrip().rstrip(';'))
args = [x.strip() for x in s[3].split(';')]
sigs.append({'returns':s[0], 'name':s[2], 'ctx':context, 'args':args, 'str':sig_str,
'hint':hint_str})
return sigs
def all_functions(vcc_path=VCC_PATH, write_functions=True, function_path=FUNC_PATH):
"""Returns a sorted list of all vex functions in all contexts."""
all_funcs = set()
for ctx in contexts():
all_funcs.update(context_functions(ctx, as_set=True))
all_funcs_sorted = sorted(all_funcs)
if write_functions:
with open(function_path, 'w') as f:
for func in all_funcs_sorted:
f.write('{}\n'.format(func))
return all_funcs_sorted
def all_function_signatures(vcc_path=VCC_PATH):
all_sigs = []
sig_strs = set()
for ctx in contexts():
ctx_sigs = context_function_signatures(ctx)
for ctx_sig in ctx_sigs:
if ctx_sig['str'] not in sig_strs:
sig_strs.add(ctx_sig['str'])
all_sigs.append(ctx_sig)
return all_sigs
def generate_simple_completions(sublime_completion_path=COMP_PATH):
"""Converts the function signitures generated by vcc into SublimeText compatable completion
JSON files."""
completions = []
for name in all_functions():
completions.append({'trigger' : ('%s\tfunction' % name),
'contents': ('%s(${1})' % name)})
data = {'scope': 'source.vex', 'completions': completions}
with open(sublime_completion_path, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
def generate_completions(sublime_completion_path=COMP_PATH):
"""Converts the function signitures generated by vcc into SublimeText compatable completion
JSON files."""
completions = []
for sig in all_function_signatures():
if len(sig['args']) == 1 and sig['args'][0] == 'void':
comp_arg_fmt = ''
else:
comp_arg_fmt = ''
comp_arg_fmt_no_varadic = ''
c = 1
for arg_type in sig['args']:
comp_arg_fmt += ('${%d:%s}, ' % (c, arg_type))
c += 1
if arg_type != '...':
comp_arg_fmt_no_varadic = comp_arg_fmt
comp_arg_fmt = comp_arg_fmt[:-2] # stripping ', ' before closing parenthesis
comp_arg_fmt_no_varadic = comp_arg_fmt_no_varadic[:-2]
# in the varadic case, we'll generate two completions - one with and one without the
# varadic argument elipsis
if sig['args'][-1] == '...':
new_hint = sig['hint'][:-4]
new_hint = new_hint.rstrip().rstrip(';')
new_hint += ')'
completions.append({'trigger' : ('%s\t%s' % (sig['name'], new_hint)),
'contents': ('%s(%s)' % (sig['name'], comp_arg_fmt_no_varadic))})
completions.append({'trigger' : ('%s\t%s' % (sig['name'], sig['hint'])),
'contents': ('%s(%s)' % (sig['name'], comp_arg_fmt))})
data = {'scope': 'source.vex', 'completions': completions}
with open(sublime_completion_path, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == '__main__':
if len(sys.argv) < 2:
generate_simple_completions()
elif len(sys.argv) == 2:
generate_simple_completions(sys.argv[1])
else:
raise Exception('To many arguments.')
|
"""XTF viewer (and converter)"""
import os
import csv
import webbrowser
import re
import sys
from functools import partial
import numpy
from GUI import Application, ScrollableView, Document, Window, Globals, rgb
from GUI import Image, Frame, Font, Model, Label, Menu, Grid, CheckBox, Button
from GUI import BaseAlert, ModalDialog, TextField, application
from GUI.Files import FileType, DirRef
from GUI.FileDialogs import request_old_files, request_new_file
from GUI.Geometry import (pt_in_rect, offset_rect, rects_intersect,
rect_sized, rect_height, rect_size)
from GUI.StdColors import black, red, light_grey, white
from GUI.StdFonts import system_font
from GUI.StdMenus import basic_menus, edit_cmds, pref_cmds, print_cmds
from GUI.StdButtons import CancelButton, DefaultButton
from GUI.Numerical import image_from_ndarray
from GUI.BaseAlertFunctions import present_and_destroy
from GUI.Alerts import confirm, stop_alert
import xtf
def log(*args):
sys.stdout.write(' '.join(args) + '\n')
def app_menu(profiles = None):
menus = basic_menus(
exclude = edit_cmds + pref_cmds + print_cmds + 'revert_cmd' + 'about_cmd',
substitutions = {
'new_cmd': 'New Project',
'open_cmd': 'Open Project...',
'save_cmd': 'Save Project',
'save_as_cmd': 'Save Project As...'})
menus.append(Menu('Profile', [('Import XTF files...', 'import_cmd'),
'-',
(profiles or [], 'profiles_cmd')]))
menus.append(Menu('Tools', [('Export trace headers to CSV (Excel)...',
'export_csv_cmd'),
('Preferences...', 'preferences_cmd')
]))
menus.append(Menu('Help', [('Help...', 'help_cmd'),
('About XTF Surveyor...', 'about_cmd')]))
return menus
class XTFApp(Application):
def __init__(self, **kw):
Application.__init__(self, **kw)
self.proj_type = FileType(name = "XTF Project", suffix = "project")
self.file_type = self.proj_type
self.menus = []
self.utm_params = None
def open_app(self):
self.new_cmd()
def make_document(self, fileref):
return Project(file_type = self.proj_type)
def make_window(self, document):
ProjectWindow(document).show()
def help_cmd(self):
path = os.path.join(os.path.dirname(sys.argv[0]), 'README_ru.html')
webbrowser.open('file:///' + path)
def about_cmd(self):
present_and_destroy(AboutBox())
def preferences_cmd(self):
pw = PreferencesWindow(self.utm_params)
self.utm_params = pw.present()
pw.destroy()
class PreferencesWindow(ModalDialog):
def __init__(self, utm_params):
self.old_utm_params = utm_params
utm = '%d%s' % utm_params if utm_params else ''
ModalDialog.__init__(self, title = 'Preferences')
label = Label(text = 'UTM zone and hemisphere for SEG-Y export\n'
'(1 <= zone <= 60, hemisphere: S or N):\n')
self.place(label, left = 20, top = 20)
self.utm_field = TextField(text = utm)
self.place(self.utm_field, left = 20, top = label, right = -20)
self.utm_field.select_all()
label2 = Label(text = 'e.g. 17S, 43N\n')
self.place(label2, left = 20, top = self.utm_field)
default_button = DefaultButton()
cancel_button = CancelButton()
self.place(default_button, right = -20, top = label2)
self.place(cancel_button, left = 20, top = label2)
self.shrink_wrap()
self.center()
def ok(self):
text = self.utm_field.text
if not text or text.isspace():
self.dismiss(None)
return
m = re.match('^\s*(\d+)\s*([SN])\s*$', text, re.I)
if m and 1 <= int(m.group(1)) <= 60:
utm_params = int(m.group(1)), m.group(2).upper()
self.dismiss(utm_params)
else:
stop_alert('Incorrect UTM zone. Allowed values:\n1N - 60N or 1S - 60S.')
self.utm_field.select_all()
def cancel(self):
self.dismiss(self.old_utm_params)
def request_old_directory(prompt, default_dir = None):
# GUI.Win32.BaseFileDialogs._request_old_dir, but with BIF_NEWDIALOGSTYLE
from win32com.shell import shell as sh
import win32com.shell.shellcon as sc
import win32api as api
import win32gui as gui
from GUI.BaseFileDialogs import win_fix_prompt
BIF_NEWDIALOGSTYLE = 0x0040
win_bif_flags = sc.BIF_RETURNONLYFSDIRS | BIF_NEWDIALOGSTYLE
if default_dir:
def callback(hwnd, msg, lp, data):
if msg == sc.BFFM_INITIALIZED:
api.SendMessage(hwnd, sc.BFFM_SETSELECTION, True, default_dir.path)
else:
callback = None
(idl, name, images) = sh.SHBrowseForFolder(None, None,
win_fix_prompt(prompt), win_bif_flags, callback)
if idl:
return DirRef(sh.SHGetPathFromIDList(idl))
XTF_TYPE = FileType(name = 'XTF file', suffix = 'xtf')
SEGY_TYPE = FileType(name = 'SEG-Y file', suffix = 'seg')
class ProjectWindow(Window):
def __init__(self, document):
self.current_file = None
self.xtf_dir = None
self.segy_dir = None
Window.__init__(self, size = (500, 400), document = document)
self.project_changed(document)
def close_cmd(self):
Window.close_cmd(self)
if not application().windows:
# force close: the remaining console window stops app from quiting
application()._quit()
def setup_menus(self, m):
Window.setup_menus(self, m)
m.about_cmd.enabled = True
m.help_cmd.enabled = True
m.import_cmd.enabled = True
m.preferences_cmd.enabled = True
if self.current_file is not None:
m.export_csv_cmd.enabled = True
m.profiles_cmd.enabled = True
m.profiles_cmd.checked = False
if self.current_file is not None:
m.profiles_cmd[self.current_file].checked = True
def import_cmd(self):
refs = request_old_files('Import XTF files', file_types = [XTF_TYPE])
if refs is not None:
self.document.add_files([os.path.join(r.dir.path, r.name)
for r in refs])
def profiles_cmd(self, i):
self.current_file = i
self.project_changed(self.document)
def export_csv_cmd(self):
self.xtf_file.export_csv()
def xtf_cmd(self):
ref = request_new_file('Save XTF file', file_type = XTF_TYPE)
if ref is not None:
numbers = [i for i, cb in enumerate(self.checkboxes) if cb.value]
filename = os.path.join(ref.dir.path, ref.name)
xtf.export_XTF(self.xtf_file.filename, filename, numbers)
def segy_cmd(self):
ref = request_new_file('Save SEG-Y file', file_type = SEGY_TYPE)
if ref is not None:
numbers = [i for i, cb in enumerate(self.checkboxes) if cb.value]
filename = os.path.join(ref.dir.path, ref.name)
xtf.export_SEGY(self.xtf_file.filename, filename, numbers,
utm_params = application().utm_params)
def xtf_all_cmd(self):
default_dir = self.document.file.dir if self.document.file else None
ref = request_old_directory('Save XTF files to folder',
self.xtf_dir or default_dir)
if self.batch_export(ref, xtf.export_XTF, '.xtf'):
self.xtf_dir = ref # remember selected dir for next time
def segy_all_cmd(self):
default_dir = self.document.file.dir if self.document.file else None
ref = request_old_directory('Save SEG-Y files to folder',
self.segy_dir or default_dir)
export = partial(xtf.export_SEGY, utm_params = application().utm_params)
if self.batch_export(ref, export, '.seg'):
self.segy_dir = ref # remember selected dir for next time
def batch_export(self, out_dir, export_function, ext):
"""Run export_function on all project files. Return True on success"""
ext_re = re.compile(r'\.xtf$', re.I)
if out_dir is not None:
numbers = [i for i, cb in enumerate(self.checkboxes) if cb.value]
src = self.document.abspaths()
dst = [ext_re.sub('', os.path.split(p)[1]) + ext for p in src]
dstf = [os.path.join(out_dir.path, d) for d in dst]
existing = [d for d, df in zip(dst, dstf) if os.path.exists(df)]
if (not existing or confirm('%s already has files: %s. Overwrite?'
% (out_dir.path, ', '.join(existing)))):
for i, (s, d, df) in enumerate(zip(src, dst, dstf)):
log('[%d/%d] %s -> %s' % (i+1, len(dst), s, d))
try:
export_function(s, df, numbers)
except xtf.BadDataError, e:
msg = 'Aborted! %s.' % (e,)
log(msg)
stop_alert(msg)
break
else:
log('Finished!')
return True
def project_changed(self, model, recent_filename = None):
doc = self.document
self.menus = app_menu([f.replace('/', '\\')
for f in sorted(doc.files)])
for c in list(self.contents):
self.remove(c)
c.destroy()
if doc.files:
if self.current_file is None:
self.current_file = 0
if recent_filename is not None:
self.current_file = doc.files.index(recent_filename)
filename = doc.abspaths()[self.current_file]
try:
self.xtf_file = XTFFile(filename)
except xtf.BadDataError, e:
self.place(Label(text = 'Error in %s (%s)' % (filename, e),
font = Font(system_font.family, 15, 'normal')),
top = 20, left = 20)
else:
panel = Frame()
checks = [CheckBox(', '.join(w for w in
['channel %d' % (c+1),
self.xtf_file.types.get(c),
'traces: %d' % n] if w),
enabled = n > 0, value = n > 0,
action = 'setup_buttons')
for c, n in enumerate(self.xtf_file.ntraces)]
xtf_btn = Button('Save to XTF...', action = 'xtf_cmd')
xtf_btn2 = Button('Save all to XTF...', action = 'xtf_all_cmd')
segy_btn = Button('Save to SEG-Y...', action = 'segy_cmd')
segy_btn2 = Button('Save all to SEG-Y...',
action = 'segy_all_cmd')
xtf_btn.width = segy_btn.width = \
max(xtf_btn.width, segy_btn.width)
xtf_btn2.width = segy_btn2.width = \
max(xtf_btn2.width, segy_btn2.width)
buttons = Grid([[xtf_btn, xtf_btn2], [segy_btn, segy_btn2]],
row_spacing = 10)
self.label = Label(width = buttons.width)
panel.place_column(checks + [buttons], top = 10, left = 10)
panel.place(self.label, top = buttons+3, left = 20)
panel.shrink_wrap(padding = (20, 20))
self.place(panel, top = 0, bottom = 0, right = 0,
sticky = 'nse')
self.checkboxes = checks
self.xtf_btn = xtf_btn
self.xtf_all_btn = xtf_btn2
self.segy_btn = segy_btn
self.segy_all_btn = segy_btn2
self.setup_buttons()
file_view = FileView(self.xtf_file)
self.place(file_view, top = 0, bottom = 0, left = 0,
right = panel, sticky = 'nesw')
else:
self.place(Label(text = 'Open project or import XTF files.',
font = Font(system_font.family, 30, 'normal')),
top = 20, left = 20)
self.update_title()
# make sure .setup_menus() gets called
# (it usually does, except after toggle-some-control-then-change-file)
self.become_target()
def setup_buttons(self):
self.xtf_btn.enabled = self.xtf_all_btn.enabled = \
any(cb.value for cb in self.checkboxes)
self.segy_btn.enabled = self.segy_all_btn.enabled = \
len([cb.value for cb in self.checkboxes if cb.value]) == 1
if self.segy_btn.enabled:
self.label.text = ''
else:
self.label.text = '(select exactly one channel to enable SEG-Y)'
def update_title(self):
doc = self.document
if self.current_file is None:
self.set_title(doc.title)
else:
self.set_title('%s - %s' %
(doc.files[self.current_file], doc.title))
def normalize(a):
"""Normalize array values to 0.0...255.0 range"""
a = a.astype(float)
lo, hi = a.min(), a.max()
a -= lo
a *= 255.0 / (hi - lo)
return a.round()
def rgb_array(gray):
a = normalize(gray)
# make grayscale RGB image: [x, y, ...] => [[x, x, x], [y, y, y], ...]
g = numpy.empty(a.shape + (3,), dtype=numpy.uint8)
g[...] = a[..., numpy.newaxis]
return g
def image_from_rgb_array(array):
# based on image_from_ndarray and (buggy) GDIPlus.Bitmap.from_data
from GUI import GDIPlus as gdi
from ctypes import c_void_p, byref
height, width, bytes_per_pixel = array.shape
assert bytes_per_pixel == 3
assert array.dtype == numpy.uint8
format = gdi.PixelFormat24bppRGB
# make sure that image width is divisable by 4
pad = -width % 4
stride = (width + pad) * bytes_per_pixel
if pad:
p = numpy.empty((height, pad, bytes_per_pixel), dtype = numpy.uint8)
array = numpy.hstack((array, p))
# create and fill GDI+ bitmap
bitmap = gdi.Bitmap.__new__(gdi.Bitmap)
ptr = c_void_p()
data = array.tostring() # FIXME works only for gray images...
if gdi.wg.GdipCreateBitmapFromScan0(width, height, stride, format, data,
byref(ptr)) != 0:
raise Exception('GDI+ Error')
bitmap.ptr = ptr
# create Image object
image = Image.__new__(Image)
image._win_image = bitmap
image._data = data # is it really needed? (image_from_ndarray does it too)
return image
class XTFFile(object):
def __init__(self, filename):
self.filename = filename
header, nchannels, arrays = xtf.read_XTF_as_grayscale_arrays(filename)
log('File %r, header:' % (filename,))
log(' ' + '\n '.join('%s: %r' % (k.replace('_', ' '), v)
for k, v in header.items()))
self.headers = []
self.channels = []
self.ntraces = [0] * nchannels
self.types = {}
for num, type, headers, a in arrays:
a = rgb_array(a)
self.ntraces[num] = len(headers)
self.types[num] = type
self.headers.extend(headers)
image = image_from_rgb_array(a)
self.channels.append(Channel(image, num))
csv_type = FileType(name = 'CSV file', suffix = 'csv')
def export_csv(self):
ref = request_new_file('Export CSV file', file_type = self.csv_type)
if ref is not None:
outfile = csv.writer(ref.open('wb'), delimiter = ';')
outfile.writerow([n.replace('_', ' ').capitalize()
for n in xtf.TraceHeader._fields])
for header in self.headers:
outfile.writerow(header)
class FileView(Frame):
def __init__(self, xtf_file):
Frame.__init__(self)
for channel in xtf_file.channels:
self.place(ChannelView(model = channel, scrolling = 'h'))
self.resized((0, 0))
def resized(self, delta):
# make sure content components evenly fill all the space
n = len(self.contents)
for i, content in enumerate(self.contents):
W, H = self.content_size
content.bounds = 0, H / n * i, W, H / n * (i + 1)
class Channel(Model):
def __init__(self, image, number):
Model.__init__(self)
self.image = image
self.number = number
class ChannelView(ScrollableView):
def draw(self, canvas, update_rect):
#canvas.erase_rect(update_rect)
# Draw channel image, scaled to fit the view vertically
image = self.model.image
W, H = image.size
h = rect_height(self.viewed_rect())
dst_rect = (0, 0, int(float(h) * W / H), h)
image.draw(canvas, image.bounds, dst_rect)
self.extent = rect_size(dst_rect)
# Draw channel title
canvas.moveto(10, self.height / 2)
canvas.font = Font(system_font.family, 30, 'normal')
canvas.textcolor = rgb(0.2, 0.4, 0.6)
canvas.show_text('channel %d' % (self.model.number+1,))
class Project(Document):
magic = 'XTF PROJECT'
files = None
def abspaths(self):
return [f if os.path.isabs(f) else os.path.join(self.file.dir.path, f)
for f in self.files]
def new_contents(self):
self.files = []
def read_contents(self, file):
if file.next().rstrip() != self.magic:
raise RuntimeError('Bad project file')
files = [filename.rstrip() for filename in file]
self.files = sorted(self.normpath(filename) for filename in files)
def write_contents(self, file):
file.write(self.magic + '\n')
self.files = sorted(self.normpath(f) for f in self.files)
for f in self.files:
file.write(f + '\n')
self.notify_windows('project_changed')
def normpath(self, p):
if self.file:
proj_dir = os.path.abspath(self.file.dir.path)
if os.path.abspath(p).startswith(proj_dir):
p = os.path.relpath(p, proj_dir)
return p.replace('\\', '/')
def add_files(self, filenames):
for f in filenames:
f = self.normpath(f)
if f not in self.files:
self.files.append(f)
self.changed()
self.files.sort()
self.notify_windows('project_changed', self.normpath(filenames[0]))
def notify_windows(self, *event):
for window in self.windows:
getattr(window, event[0])(self, *event[1:])
class AboutBox(BaseAlert):
def __init__(self):
from version import url, __version__ as ver
self.url = url
BaseAlert.__init__(self, 'note', '%s, version %s\n\n%s' %
(Globals.application_name, ver, self.url),
button_labels = ['OK', 'Visit home page'])
def _create_buttons(self, ok_label, home_label):
self.yes_button = DefaultButton(title = ok_label, action = self.yes)
self.home_button = Button(title = home_label, action = self.home)
def _layout_buttons(self):
self.place(self.yes_button,
right = self.label.right,
top = self.label + self._label_button_spacing)
self.place(self.home_button,
left = self.label.left,
top = self.label + self._label_button_spacing)
def home(self):
webbrowser.open(self.url)
self.yes()
XTFApp(title = 'XTF Surveyor').run()
|
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
m = len(matrix)
n = 0 if m == 0 else len(matrix[0])
for i in range(m):
for j in range(n):
if i == 0 and j == 0:
continue
elif i == 0:
matrix[0][j] += matrix[0][j - 1]
elif j == 0:
matrix[i][0] += matrix[i - 1][0]
else:
matrix[i][j] += matrix[i - 1][j] + matrix[i][j - 1] - matrix[i - 1][j - 1]
self.cs_mat = matrix
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
mat = self.cs_mat
m, n = len(mat), len(mat[0])
assert -1 < row1 < m and -1 < row2 < m
assert -1 < col1 < n and -1 < col2 < n
assert row1 <= row2
assert col1 <= col2
if row1 == 0 and col1 == 0:
return mat[row2][col2]
elif row1 == 0:
return mat[row2][col2] - mat[row2][col1 - 1]
elif col1 == 0:
return mat[row2][col2] - mat[row1 - 1][col2]
else:
return mat[row2][col2] + mat[row1 - 1][col1 - 1] - mat[row2][col1 - 1] - mat[row1 - 1][col2]
|
from django.shortcuts import render
from django.http import HttpResponse
from rango.models import Category
from rango.models import Page
from rango.forms import CategoryForm
from rango.forms import PageForm
def index(request):
# Query the database for a list of ALL categories currently stored.
# Order the categories by no. likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in our context_dict dictionary
# that will be passed to the template engine.
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {}
context_dict['categories'] = category_list
context_dict['pages'] = page_list
# Render the response and send it back!
return render(request,'rango/index.html', context=context_dict)
def about(request):
context_dict = {'author':"Max Lai"}
return render(request,'rango/about.html', context=context_dict)
#return HttpResponse("Rango says here is the about page. <br/> <a href='/rango/'>Index</a>")
def show_category(request, category_name_slug):
# Create a context dictionary which we can pass
# to the template rendering engine.
context_dict = {}
try:
# Can we find a category name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
print(category_name_slug)
category = Category.objects.get(slug=category_name_slug)
print(category.name)
# Retrieve all of the associated pages.
# Note that filter() will return a list of page objects or an empty list
pages = Page.objects.filter(category=category)
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
# We also add the category object from
# the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['category'] = category
except Category.DoesNotExist:
# We get here if we didn't find the specified category.
# Don't do anything -
# the template will display the "no category" message for us.
context_dict['category'] = None
context_dict['pages'] = None
# Go render the response and return it to the client.
return render(request, 'rango/category.html', context_dict)
def add_category(request):
form = CategoryForm()
# check if HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# check if w/ a valid form?
if form.is_valid():
# Save the new category to the database.
form.save(commit=True)
# Now that the category is saved
# We could give a confirmation message
# But since the most recent category added is on the index page
# Then we can direct the user back to the index page.
return index(request) # question: index just list the most viewed categories?
else:
# The supplied form contained errors -
# just print them to the terminal.
print(form.errors)
# Will handle the bad form, new form, or no form supplied cases.
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
def add_page(request, category_name_slug):
try:
category = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
category = None
form = PageForm()
if request.method =='POST':
form = PageForm(request.POST)
if form.is_valid():
if category:
page = form.save(commit=False)
page.category = category
page.view = 0
page.save()
return show_category(request, category_name_slug)
else:
print(form.errors)
context_dict = {'form':form, 'category':category}
return render(request, 'rango/add_page.html', context_dict)
|
from IPython import embed
import cv2
import numpy as np
import scipy.ndimage.filters
import math
class Tracker(object):
def __init__(self, **kwargs):
"""Set configuration attributes and create tracked objects.
Objects are instances of inner class _Object. This method creates these
instances using the passed in names and masks."""
settings = {
'color' : None,
'colorTolerance' : None,
'objects': {},
'blurAperture' : 45,
}
for k in settings.keys():
v = kwargs[k] if kwargs.get(k) != None else settings[k]
setattr(self, k, v)
self.colorHSV = tuple(cv2.cvtColor(np.uint8([[self.color]]),
cv2.COLOR_BGR2HSV)[0][0])
for name,mask in self.objects.items():
self.objects[name] = Tracker._Object(mask)
setattr(self, name, self.objects[name])
def update(self, frame, time):
"""Update the location attributes of each tracked object.
First the provided frame is thresholded according to the configured color.
A Gaussian blur is then applied to the thresholded frame. The blurred frame
is passed to the objects, which update their own locations."""
self.updateThresholded(frame)
self.updateBlurred()
for obj in self.objects.values():
obj.update(self.blurred, time)
def updateThresholded(self, frame):
"""Update thresholded frame. Use RGB thresholding."""
lower = np.uint8(map( lambda x: max(0, x - self.colorTolerance), self.color))
upper = np.uint8(map(lambda x: min(255, x + self.colorTolerance), self.color))
self.thresholded = cv2.inRange(frame, lower, upper)
def updateBlurred(self):
"""Update blurred frame by applying Gaussian blur to the thresholded."""
self.blurred = cv2.blur(self.thresholded, (self.blurAperture,self.blurAperture))
self.blurred = scipy.ndimage.filters.uniform_filter(self.blurred,
size=10, mode='constant')
class _Object(object):
"""Represents an individual object tracked by the containing tracker."""
def __init__(self, locationMask=None, advancedTracking=True, useMask=False):
"""Set configuration attributes."""
self.locationMask = locationMask
self.advancedTracking = advancedTracking
self.location = self.lastLocation = (0,0)
self.speed = None
self.angle = None
self.counter = 0
self.lastTime = 0
self.refreshInterval = 1
# self.useMask = useMask
def update(self, blurred, time):
"""Update location, angle, and speed of the object.
Angle and speed are only updated if the advancedTracking option is set
for this object.
"""
if self.locationMask is not None:
blurred = (np.ma.masked_array(blurred, self.locationMask))
self.location = np.unravel_index(blurred.argmax(), blurred.shape)
if self.advancedTracking and self.counter % self.refreshInterval == 0:
timeElapsed = time - self.lastTime
self.speed = self.getSpeed(timeElapsed)
self.angle = self.getAngle()
self.lastTime = time
self.lastLocation = self.location
def getSpeed(self, timeElapsed):
"""Calculate and return the speed."""
xs = (self.location[0] - self.lastLocation[0])**2
ys = (self.location[1] - self.lastLocation[1])**2
return round(math.sqrt(xs + ys) / timeElapsed, 2)
def getAngle(self):
"""Calculate and return the angle of motion."""
num = float(self.location[0] - self.lastLocation[0])
den = float(self.location[1] - self.lastLocation[1])
if num == 0 and den == 0:
return None
elif den == 0:
return 90.0
else:
return math.atan(num/den) * 180 / math.pi
|
__counter__=159
__release__=''
__version__='0.1.%03d'%__counter__+__release__
|
from __future__ import absolute_import, division, print_function, unicode_literals
from itertools import chain
from nose.tools import eq_, raises
from six.moves import xrange
from smarkets.streaming_api.framing import (
frame_decode_all, frame_encode, IncompleteULEB128, uleb128_decode, uleb128_encode,
)
test_data = (
(0x00000000, b'\x00'),
(0x0000007F, b'\x7F'),
(0x00000080, b'\x80\x01'),
(624485, b'\xE5\x8E\x26'),
(268435202, b'\x82\xFE\xFF\x7F'),
)
def test_dumps():
for value, string in test_data:
yield check_dumps, value, string
def check_dumps(value, string):
eq_(uleb128_encode(value), string)
def test_loads():
for value, string in test_data:
yield check_loads, bytearray(string), value
def check_loads(byte_array, value):
eq_(uleb128_decode(byte_array), (value, len(byte_array)))
def test_loads_and_dumps_are_consistent():
for i in chain(
xrange(2 ** 18),
xrange(2 ** 20, 2 ** 26, 33333),
xrange(2 ** 26, 2 ** 32, 777777),
):
byte_dump = uleb128_encode(i)
eq_(uleb128_decode(byte_dump), (i, len(byte_dump)))
@raises(ValueError)
def test_uleb128_encode_fails_on_negative_number():
uleb128_encode(-1)
def test_uleb128_decode_fails_on_invalid_input():
byte_array = uleb128_encode(12345678)
for i in xrange(len(byte_array)):
yield check_uleb128_decode_fails_on_invalid_input, byte_array[:i]
@raises(IncompleteULEB128)
def check_uleb128_decode_fails_on_invalid_input(input_):
uleb128_decode(input_)
def test_frame_encode():
for input_, output in (
(b'', b'\x00\x00\x00\x00'),
(b'a', b'\x01a\x00\x00'),
(b'ab', b'\x02ab\x00'),
(b'abc', b'\x03abc'),
(b'abcd', b'\x04abcd'),
):
yield check_frame_encode, bytearray(input_), output
def check_frame_encode(byte_array, output):
frame = bytearray()
frame_encode(frame, byte_array)
eq_(frame, output)
def test_frame_decode_all():
for input_, output in (
# frame matches the boundary
(b'', ([], b'')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd', ([b'a', b'ab', b'abc', b'abcd'], b'')),
# ends with complete header but only part of a message
(b'\x03ab', ([], b'\x03ab')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd\x03ab', ([b'a', b'ab', b'abc', b'abcd'], b'\x03ab')),
(b'\x05abcd', ([], b'\x05abcd')),
# ends with incomplete header
(b'\x80', ([], b'\x80')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd\x03ab', ([b'a', b'ab', b'abc', b'abcd'], b'\x03ab')),
# 4(or more)-byte incomplete header is a special case because it reaches the minimum frame size
# so let's make sure decoding doesn't fail at header decoding stage
(b'\x80\x80\x80\x80', ([], b'\x80\x80\x80\x80')),
(b'\x80\x80\x80\x80\x80', ([], b'\x80\x80\x80\x80\x80')),
# regression: if the second frame is shorter, we still want to decode both...
(b'\x05abcde\x03abc', ([b'abcde', b'abc'], b'')),
):
yield check_frame_decode_all, bytearray(input_), output
def check_frame_decode_all(byte_array, output):
eq_(frame_decode_all(byte_array), output)
|
"""Codec for quoted-printable encoding.
Like base64 and rot13, this returns Python strings, not Unicode.
"""
import codecs, quopri
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def quopri_encode(input, errors='strict'):
"""Encode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
# using str() because of cStringIO's Unicode undesired Unicode behavior.
f = StringIO(str(input))
g = StringIO()
quopri.encode(f, g, 1)
output = g.getvalue()
return (output, len(input))
def quopri_decode(input, errors='strict'):
"""Decode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
f = StringIO(str(input))
g = StringIO()
quopri.decode(f, g)
output = g.getvalue()
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return quopri_encode(input, errors)
def decode(self, input, errors='strict'):
return quopri_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return quopri_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return quopri_decode(input, self.errors)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(
name='quopri',
encode=quopri_encode,
decode=quopri_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
from ..core import Provider
from ..core import Response
from ..exceptions import NotifierException
from ..utils import requests
class Zulip(Provider):
"""Send Zulip notifications"""
name = "zulip"
site_url = "https://zulipchat.com/api/"
api_endpoint = "/api/v1/messages"
base_url = "https://{domain}.zulipchat.com"
path_to_errors = ("msg",)
__type = {
"type": "string",
"enum": ["stream", "private"],
"title": "Type of message to send",
}
_required = {
"allOf": [
{"required": ["message", "email", "api_key", "to"]},
{
"oneOf": [{"required": ["domain"]}, {"required": ["server"]}],
"error_oneOf": "Only one of 'domain' or 'server' is allowed",
},
]
}
_schema = {
"type": "object",
"properties": {
"message": {"type": "string", "title": "Message content"},
"email": {"type": "string", "format": "email", "title": "User email"},
"api_key": {"type": "string", "title": "User API Key"},
"type": __type,
"type_": __type,
"to": {"type": "string", "title": "Target of the message"},
"subject": {
"type": "string",
"title": "Title of the stream message. Required when using stream.",
},
"domain": {"type": "string", "minLength": 1, "title": "Zulip cloud domain"},
"server": {
"type": "string",
"format": "uri",
"title": "Zulip server URL. Example: https://myzulip.server.com",
},
},
"additionalProperties": False,
}
@property
def defaults(self) -> dict:
return {"type": "stream"}
def _prepare_data(self, data: dict) -> dict:
base_url = (
self.base_url.format(domain=data.pop("domain"))
if data.get("domain")
else data.pop("server")
)
data["url"] = base_url + self.api_endpoint
data["content"] = data.pop("message")
# A workaround since `type` is a reserved word
if data.get("type_"):
data["type"] = data.pop("type_")
return data
def _validate_data_dependencies(self, data: dict) -> dict:
if data["type"] == "stream" and not data.get("subject"):
raise NotifierException(
provider=self.name,
message="'subject' is required when 'type' is 'stream'",
data=data,
)
return data
def _send_notification(self, data: dict) -> Response:
url = data.pop("url")
auth = (data.pop("email"), data.pop("api_key"))
response, errors = requests.post(
url, data=data, auth=auth, path_to_errors=self.path_to_errors
)
return self.create_response(data, response, errors)
|
import json
from handlers.exceptions import HandlerError
from handlers.base import BaseHandler
from models.users import ContactModel
class Contacts(BaseHandler):
model = ContactModel()
def post(self):
try:
search_payload = json.loads(self.request.body) if self.request.body else None
size = self.get_argument("size", default_value=10, argument_type=int)
data = self.model.match(search_payload=search_payload, size=size)
except HandlerError as e:
self.set_error(e.message)
self.set_status(e.code)
except KeyError as e:
self.set_error(str(e))
self.set_status(400)
else:
self.set_data(data)
self.write_response()
|
from django.shortcuts import render, get_object_or_404,redirect
from django.views import View, generic
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from star_ratings.models import Rating
from django.core.exceptions import PermissionDenied
from django.db.models import F
from django.contrib.gis.measure import D
from django.contrib.gis.geos import Point
from .models import Places
from .forms import PostForm
import magic
@login_required(login_url='/accounts/login/')
def post_create(request):
form= PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, 'Successfully Created')
return HttpResponseRedirect('/')
context= {
'form': form,
}
return render(request, 'location/post_form.html',context,)
def post_detail(request,id= None):
instance= get_object_or_404(Places, id=id)
context= {
'title': instance.title,
'instance': instance,
}
return render(request,'location/post_detail.html',context)
def post_update(request,id=None):
instance= get_object_or_404(Places, id=id)
form= PostForm(request.POST or None, request.FILES or None, instance=instance)
if form.is_valid():
instance= form.save(commit=False)
instance.save()
messages.success(request,'Saved')
#success
return HttpResponseRedirect(instance.get_absolute_url())
context= {
'title': instance.title,
'instance': instance,
'form': form,
}
return render(request, 'location/post_form.html', context)
def post_delete(request, id=id):
instance= get_object_or_404(Places, id=id)
if request.user.username == instance.usersave:
instance.delete()
messages.success(request, 'Success')
else:
raise PermissionDenied()
return redirect('posts:list')
def fetch_places(request):
finder_location = Point(-83,33)
nearby= Places.objects.filter(
location__distance_lte=(
finder_location,
D(km=40))).distance(finder_location).order_by('distance')[:10]
context= {
'object_listboy': nearby,
'title': 'wall',
}
return render(request, 'location/wall.html', context)
def fetch_places_loc(request):
lat= request.GET['latitude']
lon= request.GET['longitude']
finder_location = Point(float(lon),float(lat))
nearby= Places.objects.filter(
location__distance_lte=(
finder_location,
D(km=40))).distance(finder_location).order_by('distance').order_by('-rating__average')[:10]
context= {
'object_listboy': nearby,
'title': 'wall',
}
return render(request, 'location/wall.html', context)
|
import numpy
def test(p, parameters):
qc = numpy.zeros(p.n_levels(), dtype=bool)
# this spike test only makes sense for 3 or more levels
if p.n_levels() < 3:
return qc
t = p.t()
for i in range(2, p.n_levels()-2):
qc[i] = spike(t[i-2:i+3])
qc[1] = spike(t[0:3])
qc[-2] = spike(t[-3:])
return qc
def spike(t):
# generic spike check for a masked array of an odd number of consecutive temperature measurements
if True in t.mask:
# missing data, decline to flag
return False
centralTemp = t[int(len(t)/2)]
medianDiff = round( abs(centralTemp - numpy.median(t)),2)
if medianDiff != 0:
t = numpy.delete(t, int(len(t)/2))
spikeCheck = round(abs(centralTemp-numpy.mean(t)), 2)
if spikeCheck > 0.3:
return True
return False
|
class KerviPlugin(object):
def __init__(self, name, config, manager):
from kervi.config.configuration import _Configuration
self._config = _Configuration()
from kervi.config import Configuration
self._global_config = Configuration
plugin_config = {}
if config:
plugin_config = config.as_dict()
self._config._load(config_user=plugin_config, config_base=self.get_default_config())
self._name = name
self._manager = manager
from kervi.spine import Spine
self.spine = Spine()
from kervi.core.utility.bind_decorators import bind_decorators_to_class
bind_decorators_to_class(self)
@property
def manager(self):
return self._manager
@property
def plugin_config(self):
return self._config
@property
def global_config(self):
return self._global_config
@property
def name(self):
return self._name
def first_process_step(self):
pass
def process_step(self):
pass
def terminate_process(self):
pass
def get_default_config(self):
return {}
|
class Dummy:
"""
Implements a dummy class that is completely inert.
"""
def __init__(self, *args, **kwargs):
pass
def __getattr__(self, item):
return self.dummy_function
def __setattr__(self, key, value):
pass
def dummy_function(*args, **kwargs):
pass
|
"""
Application settings module.
"""
import os
from xdg import XDG_CACHE_HOME
from bootstrap import PROJECT_NAME
OS_IMAGES = {
'raspbian-lite': 'https://downloads.raspberrypi.org/raspbian_lite_latest',
}
CACHE = os.path.join(XDG_CACHE_HOME, PROJECT_NAME)
|
from datetime import datetime
import os
import sys
if (len(sys.argv) != 2):
print "usage: <log file>"
exit()
pattern = '%Y-%m-%d %H:%M:%S,%f'
def parseCommand(command):
return command.split(" ")[0]
command_timing = {}
indent = 0
start_time = None
for ext in [".5", ".4", ".3", ".2", ".1", ""]:
if not os.path.exists(sys.argv[1] + ".out" + ext):
continue
with open(sys.argv[1] + ".out" + ext) as fp:
for line in fp:
try:
[time, command] = map(lambda x: x.strip(), line.split(":apyec:INFO:"))
except ValueError:
continue
if start_time is None:
elapsed = "{0:6.2f}".format(0.0)
start_time = datetime.strptime(time, pattern)
else:
cur_time = datetime.strptime(time, pattern)
elapsed = "{0:6.2f}".format((cur_time - start_time).total_seconds())
# Command start.
if (" started" in line):
print elapsed, " " * indent, command
indent += 2
# Command end.
if (" ended" in line):
indent -= 2
if (indent < 0):
indent = 0
print elapsed, " " * indent, command
|
"""
Streaming server
Accept incoming data from the streaming clients,
and store the data in Db
"""
class StreamingServer(object):
""" Streaming server class """
def __init__(self):
pass
def run(self):
pass
|
from . import tag
from .exif import ExifReader
from .projection import rectify
__all__ = ['ExifReader', 'rectify', 'tag']
|
import getpass
import tempfile
from datetime import datetime
from unittest.mock import call
import pytest
from dateutil.tz import tzutc
from typer.testing import CliRunner
from cli.webapp import app
runner = CliRunner()
@pytest.fixture
def mock_webapp(mocker):
mock_webapp = mocker.patch("cli.webapp.Webapp")
mock_webapp.return_value.get_log_info.return_value = {
"access": [0, 1, 2],
"error": [0, 1, 2],
"server": [0, 1, 2],
}
return mock_webapp
@pytest.fixture
def domain_name():
return "foo.bar.baz"
@pytest.fixture(name="file_with_content")
def fixture_file_with_content():
def file_with_content(content):
filename = tempfile.NamedTemporaryFile(mode="w", encoding="utf8").name
with open(filename, "w") as f:
f.write(content)
return filename
return file_with_content
def test_create_calls_all_stuff_in_right_order(mocker):
mock_project = mocker.patch("cli.webapp.Project")
result = runner.invoke(
app,
[
"create",
"-d",
"www.domain.com",
"-p",
"python.version",
"--nuke",
],
)
assert mock_project.call_args == call("www.domain.com", "python.version")
assert mock_project.return_value.method_calls == [
call.sanity_checks(nuke=True),
call.virtualenv.create(nuke=True),
call.create_webapp(nuke=True),
call.add_static_file_mappings(),
call.webapp.reload(),
]
assert "All done! Your site is now live at https://www.domain.com" in result.stdout
assert (
f"https://www.pythonanywhere.com/user/{getpass.getuser().lower()}/webapps/www_domain_com"
in result.stdout
)
def test_delete_all_logs(mock_webapp, domain_name):
result = runner.invoke(
app,
[
"delete-logs",
"-d",
domain_name,
],
)
mock_webapp.assert_called_once_with(domain_name)
assert mock_webapp.return_value.delete_log.call_args_list == [
call("access", 0),
call("access", 1),
call("access", 2),
call("error", 0),
call("error", 1),
call("error", 2),
call("server", 0),
call("server", 1),
call("server", 2),
]
assert "All done!" in result.stdout
def test_delete_all_server_logs(mock_webapp, domain_name):
result = runner.invoke(
app,
[
"delete-logs",
"-d",
domain_name,
"-t",
"server",
],
)
mock_webapp.assert_called_once_with(domain_name)
assert mock_webapp.return_value.delete_log.call_args_list == [
call("server", 0),
call("server", 1),
call("server", 2),
]
assert "All done!" in result.stdout
def test_delete_one_server_logs(mock_webapp, domain_name):
result = runner.invoke(
app, ["delete-logs", "-d", "foo.bar.baz", "-t", "server", "-i", "2"]
)
mock_webapp.assert_called_once_with(domain_name)
mock_webapp.return_value.delete_log.assert_called_once_with("server", 2)
assert "All done!" in result.stdout
def test_delete_all_current_logs(mock_webapp, domain_name):
result = runner.invoke(app, ["delete-logs", "-d", "foo.bar.baz", "-i", "0"])
mock_webapp.assert_called_once_with(domain_name)
assert mock_webapp.return_value.delete_log.call_args_list == [
call("access", 0),
call("error", 0),
call("server", 0),
]
assert "All done!" in result.stdout
def test_validates_log_number(mock_webapp):
result = runner.invoke(
app, ["delete-logs", "-d", "foo.bar.baz", "-t", "server", "-i", "10"]
)
assert "Invalid value" in result.stdout
assert "log_index has to be 0 for current" in result.stdout
def test_install_ssl_with_default_reload(mock_webapp, domain_name, file_with_content):
mock_webapp.return_value.get_ssl_info.return_value = {
"not_after": datetime(2018, 8, 24, 17, 16, 23, tzinfo=tzutc())
}
certificate = "certificate"
certificate_file = file_with_content(certificate)
private_key = "private_key"
private_key_file = file_with_content(private_key)
result = runner.invoke(
app,
["install-ssl", domain_name, certificate_file, private_key_file],
)
mock_webapp.assert_called_once_with(domain_name)
mock_webapp.return_value.set_ssl.assert_called_once_with(certificate, private_key)
mock_webapp.return_value.reload.assert_called_once()
assert f"for {domain_name}" in result.stdout
assert "2018-08-24," in result.stdout
def test_install_ssl_with_reload_suppressed(
mock_webapp, domain_name, file_with_content
):
certificate = "certificate"
certificate_file = file_with_content(certificate)
private_key = "private_key"
private_key_file = file_with_content(private_key)
runner.invoke(
app,
[
"install-ssl",
domain_name,
certificate_file,
private_key_file,
"--suppress-reload",
],
)
mock_webapp.assert_called_once_with(domain_name)
mock_webapp.return_value.set_ssl.assert_called_once_with(certificate, private_key)
mock_webapp.return_value.reload.assert_not_called()
def test_reload(mock_webapp, domain_name):
result = runner.invoke(app, ["reload", "-d", domain_name])
assert f"{domain_name} has been reloaded" in result.stdout
mock_webapp.assert_called_once_with(domain_name)
assert mock_webapp.return_value.method_calls == [call.reload()]
|
'''
Created on 19 March 2012
@author: tcezard
'''
import sys, os
from utils import utils_logging, utils_commands,\
longest_common_substr_from_start, utils_param, sort_bam_file_per_coordinate
import logging, threading, re
from optparse import OptionParser
from glob import glob
import command_runner
from utils.FastaFormat import FastaReader
import time
from RAD_merge_read1_and_read2 import merge_2_contigs
from utils.parameters import Config_file_error
from utils.utils_commands import get_output_stream_from_command
from collections import Counter, defaultdict
from IO_interface.vcfIO import VcfReader
import shutil
import copy
iupac_alphabet_list=['A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'T', 'W', 'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r', 't', 'w', 'v', 'y']
def parse_RG_line(line):
dict = {}
sp_line = line.split('\t')
for element in sp_line[1:]:
tmp = element.split(':')
dict[tmp[0]]=':'.join(tmp[1:])
return dict
def generate_readgroup_exclusion_file_per_samples(bam_file):
directory = os.path.dirname(os.path.abspath(bam_file))
command = 'samtools view -H %s | grep @RG'%(bam_file)
stream, process = get_output_stream_from_command(command)
all_samples=set()
all_samples2id=defaultdict(list)
for line in stream:
RG_dict = parse_RG_line(line)
all_samples.add(RG_dict.get('SM'))
all_samples2id[RG_dict.get('SM')].append(RG_dict.get('ID'))
all_samples2exclusion_id_file={}
for sample in all_samples:
exclusion_id = []
exclusion_samples = all_samples.difference(set([sample]))
for exclusion_sample in exclusion_samples:
exclusion_id.extend(all_samples2id.get(exclusion_sample))
sample_exclusion_file=os.path.join(directory,'exclusion_id_for_%s.txt'%sample)
with open(sample_exclusion_file,'w') as open_file: open_file.write('\n'.join(exclusion_id))
all_samples2exclusion_id_file[sample]= sample_exclusion_file
return all_samples2exclusion_id_file
def calculate_base_frequency_for_snps(directory, name):
bam_file = os.path.join(directory, name+'_corrected_sorted_mrk_dup_fixed.bam')
vcf_file = os.path.join(directory, name+'_corrected_sorted_mrk_dup_fixed_samtools_filterd20q60.vcf')
samples=[]
if os.path.exists(vcf_file):
list_snp_position=[]
with open(vcf_file) as open_vcf:
reader=VcfReader(open_vcf)
list_snp_position = ['%s\t%s'%(rec.get_reference(),rec.get_position()) for rec in reader]
if len(list_snp_position)>0:
snp_positions_file=os.path.join(directory, 'snps_positions.txt')
with open(snp_positions_file,'w') as open_file:open_file.write('\n'.join(list_snp_position))
all_samples2exclusion_id_file = generate_readgroup_exclusion_file_per_samples(bam_file)
samples=all_samples2exclusion_id_file.keys()
for sample in all_samples2exclusion_id_file.keys():
exclusion_id_file = all_samples2exclusion_id_file.get(sample)
output_file=os.path.join(directory, name+'_corrected_sorted_mrk_dup_fixed_samtools_%s.allelefreq'%sample)
allele_freq_from_bam_and_list_pos(output_file, bam_file, snp_positions_file, list_snp_position, exclusion_id_file)
return samples
def replace_number_base(match_object):
s=match_object.group()
m=re.search('[0-9]+',s)
if m:
return s[m.end()+int(m.group()):]
else: return ''
def get_base_frequency_from_line(line, bas_qual_threshold, map_qual_threshold, test_mapping_qual=True):
ATCG={'A':0,'T':0,'C':0,'G':0}
ATCG_filtered={'A':0,'T':0,'C':0,'G':0}
sp_line = line.strip().split()
## do not process line specifying the deletion
if sp_line[2]=='*':
return
##remove the insertion
iupac_alphabet=''.join(iupac_alphabet_list)
bases=re.sub('\+[0-9]+['+iupac_alphabet+']+',replace_number_base,sp_line[4])
##remove the deletion
bases=re.sub('\-[0-9]+['+iupac_alphabet+']+',replace_number_base,bases)
##remove weird character for start and end of the read
bases=re.sub('\^.','',bases)
bases=re.sub('\$','',bases)
sp_bases=re.findall('['+iupac_alphabet+'.,*><]', bases) # sp_line[8] is the bases
if len(sp_line)<=6:
test_mapping_qual=False
if len(sp_bases)==len(sp_line[5]) and (not test_mapping_qual or len(sp_bases)==len(sp_line[6])):
for i, base in enumerate(sp_bases):
bas_qual=ord(sp_line[5][i])-33
if test_mapping_qual:
map_qual=ord(sp_line[6][i])-33
else:
map_qual=40
if bas_qual>=bas_qual_threshold and map_qual>=map_qual_threshold:
if base=='.' or base==',':
base=sp_line[2]
if base.upper() in ATCG.keys():
ATCG[base.upper()]+=1
else:
if base=='.' or base==',':
base=sp_line[2]
if base.upper() in ATCG.keys():
ATCG_filtered[base.upper()]+=1
else:
print 'problem in line %s'%line
print '%s (%s) and %s (%s) and %s (%s) have different length'%(''.join(sp_bases), len(sp_bases) ,sp_line[-2],
len(sp_line[-2]), sp_line[-1], len(sp_line[-1]))
return None
return ATCG,ATCG_filtered
def format_base_freq_line(chr, position, ref_base, consensus_base, ATCG, ATCG_filtered, coverage):
out=[]
out.append(chr)
out.append('%s'%position)
out.append(ref_base)
out.append(consensus_base)
out.append('%s'%(coverage))
out.append('A:%s:%s'%(ATCG['A'],ATCG_filtered['A']))
out.append('T:%s:%s'%(ATCG['T'],ATCG_filtered['T']))
out.append('C:%s:%s'%(ATCG['C'],ATCG_filtered['C']))
out.append('G:%s:%s'%(ATCG['G'],ATCG_filtered['G']))
return '\t'.join(out)
def get_mpileup_from_bam(bam_file, options=''):
try:
pipeline_parm=utils_param.get_pipeline_parameters()
samtools_bin=os.path.join(pipeline_parm.get_samtools_dir(),'samtools')
except Config_file_error, e:
logging.warning("Can't find the configuration file you'll need to have samtools in you path.")
samtools_bin='samtools'
if bam_file=='PIPE':
bam_file='-'
else:
command = '%s mpileup -A %s %s'%(samtools_bin, bam_file, options)
stream, process = utils_commands.get_output_stream_from_command(command, logger_name=None)
return stream
def allele_freq_from_bam_and_list_pos(output_file, input_file, list_position_file, all_positions_loaded, exclusion_id_file, bas_qual_threshold=20,
map_qual_threshold=10, coverage_threshold=6):
input_stream = get_mpileup_from_bam(input_file, options='-s -l %s -G %s'%(list_position_file,exclusion_id_file))
all_positions_loaded=copy.copy(all_positions_loaded)
if input_stream is not None:
open_output=open(output_file,'w')
for line in input_stream:
sp_line = line.strip().split()
position = '%s\t%s'%(sp_line[0],sp_line[1])
if position in all_positions_loaded :
all_positions_loaded.remove(position)
else:
continue
info=get_base_frequency_from_line(line, bas_qual_threshold, map_qual_threshold)
if info:
ATCG,ATCG_filtered=info
else:
ATCG={'A':0,'T':0,'C':0,'G':0}
ATCG_filtered={'A':0,'T':0,'C':0,'G':0}
##Calculate overall coverage
coverage=ATCG['A']+ATCG['C']+ATCG['G']+ATCG['T']
#if coverage<coverage_threshold:
# continue
## Get the most frequent base
sorted_list=sorted(ATCG, key=lambda x: ATCG[x], reverse=True)
if sorted_list[0]!=sp_line[2].upper():
snp_base=sorted_list[0]
else:
snp_base=sorted_list[1]
output_line=format_base_freq_line(chr=sp_line[0], position=sp_line[1], ref_base=sp_line[2].upper(),
consensus_base=snp_base, ATCG=ATCG, ATCG_filtered=ATCG_filtered,
coverage=coverage)
open_output.write('%s\n'%(output_line))
input_stream.close()
for position in all_positions_loaded:
reference, coordinate = position.split('\t')
ATCG={'A':0,'T':0,'C':0,'G':0}
ATCG_filtered={'A':0,'T':0,'C':0,'G':0}
output_line=format_base_freq_line(chr=reference, position=coordinate, ref_base='N',
consensus_base='N', ATCG=ATCG, ATCG_filtered=ATCG_filtered,
coverage=0)
open_output.write('%s\n'%(output_line))
open_output.close()
def run_all_fastq_files(directory):
directory=os.path.abspath(directory)
all_dirs = glob(os.path.join(directory,'*_dir'))
all_samples=set()
for sub_dir in all_dirs:
print sub_dir
name=os.path.basename(sub_dir)[:-len("_dir")]
samples=calculate_base_frequency_for_snps(sub_dir, name)
all_samples.update(set(samples))
for sample in all_samples:
#concatenate the allele frequency file per samples
merged_file = os.path.join(directory,'samtools_snps_%s.allelefreq'%sample)
command = 'cat %s/*_dir/*_%s.allelefreq > %s'%(directory, sample, merged_file)
command_runner.run_command(command)
return
def main():
#initialize the logging
utils_logging.init_logging(logging.INFO)
#utils_logging.init_logging(logging.CRITICAL)
#Setup options
optparser=_prepare_optparser()
(options,args) = optparser.parse_args()
#verify options
arg_pass=_verifyOption(options)
if not arg_pass:
logging.warning(optparser.get_usage())
logging.critical("Non valid arguments: exit")
sys.exit(1)
if options.debug:
utils_logging.init_logging(logging.DEBUG)
if not options.print_command:
command_runner.set_command_to_run_localy()
run_all_fastq_files(options.consensus_dir)
def _prepare_optparser():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = """usage: %prog <-b bam_file> [ -o output_file]"""
description = """This script will take aligned RAD read to the consensuses and calculate per consensus coverage."""
optparser = OptionParser(version="None",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="show this help message and exit.")
optparser.add_option("-d","--consensus_dir",dest="consensus_dir",type="string",
help="Path to a directory containing fastq file (only extension .fastq will be processed). Default: %default")
optparser.add_option("--print",dest="print_command",action='store_true',default=False,
help="print the commands instead of running them. Default: %default")
optparser.add_option("--debug",dest="debug",action='store_true',default=False,
help="Output debug statment. Default: %default")
return optparser
def _verifyOption(options):
"""Check if the mandatory option are present in the options objects.
@return False if any argument is wrong."""
arg_pass=True
return arg_pass
if __name__=="__main__":
main()
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('iiits', '0005_auto_20160228_1200'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=200)),
('content', models.TextField(default='NA')),
('fileupload', models.FileField(blank=True, null=True, upload_to='/static/iiits/files/news/')),
('image', models.ImageField(blank=True, null=True, upload_to='/static/iiits/images/news/')),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='visitingfaculty',
name='photo',
field=models.ImageField(upload_to='/static/iiits/images/faculty/'),
),
]
|
from django.core.urlresolvers import reverse
from django.shortcuts import Http404, HttpResponseRedirect
from django.views.generic import CreateView, UpdateView, DeleteView, \
ListView
from django_tables2 import SingleTableMixin
from core.views import LoginRequiredMixin
from ..forms import MetricForm
from ..models import Metric
from ..tables import MetricTable
class MetricViewMixin(object):
'''This is a helper mixin providing generic support for all of the
Metric views'''
model = Metric
form_class = MetricForm
def get_object(self, queryset=None):
obj = super(MetricViewMixin, self).get_object()
if not obj.creator == self.request.user:
raise Http404
return obj
def get_success_url(self):
return self.success_url or reverse("metrics:list")
class MetricListView(MetricViewMixin, LoginRequiredMixin, SingleTableMixin, ListView):
template_name = "metric/list.html"
table_class = MetricTable
class MetricDeleteView(MetricViewMixin, LoginRequiredMixin, DeleteView):
template_name = 'metric/confirm_delete.html'
class MetricFormMixin(object):
def check_save_add_another(self, form):
'''Check if someone clicked "save and add another" and redirect
when they do'''
if '_save_and_add_another' in form.data:
self.success_url = reverse("metrics:create")
return HttpResponseRedirect(self.get_success_url())
class MetricCreateView(MetricViewMixin, MetricFormMixin, LoginRequiredMixin, CreateView):
template_name = "metric/form.html"
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.creator = self.request.user
self.object.save()
return self.check_save_add_another(form)
class MetricUpdateView(MetricViewMixin, MetricFormMixin, LoginRequiredMixin, UpdateView):
template_name = "metric/form.html"
def form_valid(self, form):
return self.check_save_add_another(form)
|
import os
import logging
import logging.config
import sqlalchemy as sa
from alembic import context
from wuffi.conf import settings
from wuffi.core.db import DEFAULT_DATABASE_ALIAS
__all__ = (
'target_url',
'target_metadata',
'run',
'run_offline',
'run_online',
)
MIGRATIONS_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'generic': {
'format': '%(levelname)-5.5s [%(name)s] %(message)s',
'datefmt': '%H:%M:%S',
},
},
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'formatter': 'generic',
},
},
'loggers': {
'root': {
'handlers': ('console',),
'level': 'WARN',
},
'sqlalchemy': {
'handlers': ('console',),
'level': 'WARN',
'qualname': 'sqlalchemy.engine',
},
'alembic': {
'handlers': ('console',),
'level': 'INFO',
'qualname': 'alembic',
},
},
}
def target_url():
target_settings = settings.DATABASES[DEFAULT_DATABASE_ALIAS]
return sa.engine.url.URL(
target_settings['BACKEND'].split('.')[-1],
username=target_settings['USER'], password=target_settings['PASSWORD'],
host=target_settings['HOST'], port=target_settings['PORT'],
database=target_settings['DATABASE']
)
def target_metadata():
from wuffi.helpers.module_loading import import_module
apps_dir = os.path.join(settings.BASE_DIR, 'apps')
apps = [
x for x in os.listdir(apps_dir)
if os.path.isdir(os.path.join(apps_dir, x)) and x not in ('__pycache__',)
]
modules = [
import_module('apps.{app}.tables'.format(app=app))
for app in apps
]
metadatas = [
x for x in sum(
(list(module.__dict__.values()) for module in modules), []
) if isinstance(x, sa.MetaData)
]
m = sa.MetaData()
for metadata in metadatas:
for t in metadata.tables.values():
t.tometadata(m)
return m
def run():
"""
Run migrations.
"""
logging.config.dictConfig(MIGRATIONS_LOGGING)
if context.is_offline_mode():
run_offline(context)
else:
run_online(context)
def run_offline(context):
"""
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
:param context: Alembic context
"""
context.configure(url=target_url(), target_metadata=target_metadata(),
literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_online(context):
"""
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
:param context: Alembic context
"""
connectable = sa.create_engine(target_url(), poolclass=sa.pool.NullPool)
with connectable.connect() as connection:
context.configure(connection=connection,
target_metadata=target_metadata())
with context.begin_transaction():
context.run_migrations()
|
"""aiohttp request argument parsing module.
Example: ::
import asyncio
from aiohttp import web
from webargs import fields
from webargs.aiohttpparser import use_args
hello_args = {
'name': fields.Str(required=True)
}
@asyncio.coroutine
@use_args(hello_args)
def index(request, args):
return web.Response(
body='Hello {}'.format(args['name']).encode('utf-8')
)
app = web.Application()
app.router.add_route('GET', '/', index)
"""
import asyncio
import json
from aiohttp import web
from aiohttp import web_exceptions
from webargs import core
from webargs.async import AsyncParser
def is_json_request(req):
content_type = req.content_type
return core.is_json(content_type)
class HTTPUnprocessableEntity(web.HTTPClientError):
status_code = 422
exception_map = {
422: HTTPUnprocessableEntity
}
def _find_exceptions():
for name in web_exceptions.__all__:
obj = getattr(web_exceptions, name)
try:
is_http_exception = issubclass(obj, web_exceptions.HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.status_code is None:
continue
old_obj = exception_map.get(obj.status_code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
exception_map[obj.status_code] = obj
_find_exceptions()
del _find_exceptions
class AIOHTTPParser(AsyncParser):
"""aiohttp request argument parser."""
__location_map__ = dict(
match_info='parse_match_info',
**core.Parser.__location_map__
)
def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.GET, name, field)
@asyncio.coroutine
def parse_form(self, req, name, field):
"""Pull a form value from the request."""
post_data = self._cache.get('post')
if post_data is None:
self._cache['post'] = yield from req.post()
return core.get_value(self._cache['post'], name, field)
@asyncio.coroutine
def parse_json(self, req, name, field):
"""Pull a json value from the request."""
json_data = self._cache.get('json')
if json_data is None:
if not (req.has_body and is_json_request(req)):
return core.missing
self._cache['json'] = json_data = yield from req.json()
return core.get_value(json_data, name, field, allow_many_nested=True)
def parse_headers(self, req, name, field):
"""Pull a value from the header data."""
return core.get_value(req.headers, name, field)
def parse_cookies(self, req, name, field):
"""Pull a value from the cookiejar."""
return core.get_value(req.cookies, name, field)
def parse_files(self, req, name, field):
raise NotImplementedError(
'parse_files is not implemented. You may be able to use parse_form for '
'parsing upload data.'
)
def parse_match_info(self, req, name, field):
"""Pull a value from the request's ``match_info``."""
return core.get_value(req.match_info, name, field)
def get_request_from_view_args(self, view, args, kwargs):
"""Get request object from a handler function or method. Used internally by
``use_args`` and ``use_kwargs``.
"""
if len(args) > 1:
req = args[1]
else:
req = args[0]
assert isinstance(req, web.Request), 'Request argument not found for handler'
return req
def handle_error(self, error):
"""Handle ValidationErrors and return a JSON response of error messages to the client."""
error_class = exception_map.get(error.status_code)
if not error_class:
raise LookupError('No exception for {0}'.format(error.status_code))
raise error_class(
body=json.dumps(error.messages).encode('utf-8'),
content_type='application/json'
)
parser = AIOHTTPParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
|
import ViewPresenter as vp
import ui
import shelve
class ItemError(Exception):
pass
class Item(object):
def __init__(self, name:string, amount:float, units:string):
self.name = name
self.amount = amount
self.units = units
return
def __eq__(self, other):
return self.name == other.name
def merge(self, other:Item):
if not self == other:
raise ItemError('Can\'t merge unlike items')
new_amount = self.amount + other.amount
return Item(self.name, self.amount, self.units)
def __str__(self):
return '{}: {} {}'.format(self.name, self.amount, self.units)
class ItemRepository(object):
def __init__(self, shelf_name):
self.name = shelf_name
self.items = self.load()
self.key = 'item_list'
self.has_changed = True
return
def open_shelf(self):
if self.closed:
self.closed = False
return shelve.open(self.name)
else:
return self.shelf
def load(self):
self.shelf = self.open_shelf()
return self.shelf.get(self.key, [])
def save(self):
self.shelf = self.open_shelf()
self.shelf[self.key] = self.items
self.has_changed = False
return
def delete_item(self, index):
try:
for indx in index:
del self.items[indx]
except TypeError:
del self.items[index]
self.has_changed = True
return self.count
def add_item(self, item_name, item_quantity, item_unit):
item = Item(item_name, item_quantity, item_unit)
try:
indx = self.items.index(item)
self.items[indx].merge(item)
except ValueError:
self.items.append(item)
self.has_changed = True
return self.count
def close(self):
self.shelf.close()
self.closed = True
return
@property
def count(self):
return len(self.items)
def __str__(self):
item_strings = [str(item) for item in self.items]
return '\n'.join(item_strings)
class PantryInventory(object):
def __init__(self):
self.view_names = ['PantryInventory', 'detail_subview', 'add_item_popup']
self.vm = vp.MultipleViewModel(self.view_names)
self.items = ItemRepository('pantry')
main_actions = {'save_button':self.save_button_tapped,
'quit_button':self.quit_button_tapped,
'add_item_button':self.add_item_tapped,
'remove_item_button':self.remove_item_tapped}
self.vm.bind_actions(main_actions)
return
def save_button_tapped(self, sender):
pass
def quit_button_tapped(self, sender):
pass
def add_item_tapped(self, sender):
pass
def remove_item_tapped(self, sender):
pass
|
"""
meetbot.py - Willie meeting logger module
Copyright © 2012, Elad Alfassa, <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
This module is an attempt to implement at least some of the functionallity of Debian's meetbot
"""
from __future__ import unicode_literals
import time
import os
from willie.web import quote
from willie.modules.url import find_title
from willie.module import example, commands, rule, priority
from willie.tools import Ddict, Identifier
import codecs
def configure(config):
"""
| [meetbot] | example | purpose |
| --------- | ------- | ------- |
| meeting_log_path | /home/willie/www/meetings | Path to meeting logs storage directory (should be an absolute path, accessible on a webserver) |
| meeting_log_baseurl | http://example.com/~willie/meetings | Base URL for the meeting logs directory |
"""
if config.option('Configure meetbot', False):
config.interactive_add('meetbot', 'meeting_log_path', "Path to meeting logs storage directory (should be an absolute path, accessible on a webserver)")
config.interactive_add('meetbot', 'meeting_log_baseurl', "Base URL for the meeting logs directory (eg. http://example.com/logs)")
meetings_dict = Ddict(dict) # Saves metadata about currently running meetings
"""
meetings_dict is a 2D dict.
Each meeting should have:
channel
time of start
head (can stop the meeting, plus all abilities of chairs)
chairs (can add infolines to the logs)
title
current subject
comments (what people who aren't voiced want to add)
Using channel as the meeting ID as there can't be more than one meeting in a channel at the same time.
"""
meeting_log_path = '' # To be defined on meeting start as part of sanity checks, used by logging functions so we don't have to pass them bot
meeting_log_baseurl = '' # To be defined on meeting start as part of sanity checks, used by logging functions so we don't have to pass them bot
meeting_actions = {} # A dict of channels to the actions that have been created in them. This way we can have .listactions spit them back out later on.
def figure_logfile_name(channel):
if meetings_dict[channel]['title'] is 'Untitled meeting':
name = 'untitled'
else:
name = meetings_dict[channel]['title']
# Real simple sluggifying. This bunch of characters isn't exhaustive, but
# whatever. It's close enough for most situations, I think.
for c in ' ./\\:*?"<>|&*`':
name = name.replace(c, '-')
timestring = time.strftime('%Y-%m-%d-%H:%M', time.gmtime(meetings_dict[channel]['start']))
filename = timestring + '_' + name
return filename
def logHTML_start(channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
timestring = time.strftime('%Y-%m-%d %H:%M', time.gmtime(meetings_dict[channel]['start']))
title = '%s at %s, %s' % (meetings_dict[channel]['title'], channel, timestring)
logfile.write('<!doctype html>\n<html>\n<head>\n<meta charset="utf-8">\n<title>%TITLE%</title>\n</head>\n<body>\n<h1>%TITLE%</h1>\n'.replace('%TITLE%', title))
logfile.write('<h4>Meeting started by %s</h4><ul>\n' % meetings_dict[channel]['head'])
logfile.close()
def logHTML_listitem(item, channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
logfile.write('<li>' + item + '</li>\n')
logfile.close()
def logHTML_end(channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
current_time = time.strftime('%H:%M:%S', time.gmtime())
logfile.write('</ul>\n<h4>Meeting ended at %s UTC</h4>\n' % current_time)
plainlog_url = meeting_log_baseurl + quote(channel + '/' + figure_logfile_name(channel) + '.log')
logfile.write('<a href="%s">Full log</a>' % plainlog_url)
logfile.write('\n</body>\n</html>')
logfile.close()
def logplain(item, channel):
current_time = time.strftime('%H:%M:%S', time.gmtime())
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.log', 'a', encoding='utf-8')
logfile.write('[' + current_time + '] ' + item + '\r\n')
logfile.close()
def ismeetingrunning(channel):
try:
if meetings_dict[channel]['running']:
return True
else:
return False
except:
return False
def ischair(nick, channel):
try:
if nick.lower() == meetings_dict[channel]['head'] or nick.lower() in meetings_dict[channel]['chairs']:
return True
else:
return False
except:
return False
@commands('startmeeting')
@example('.startmeeting title or .startmeeting')
def startmeeting(bot, trigger):
"""
Start a meeting.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, there is already a meeting in progress here!')
return
if trigger.is_privmsg:
bot.say('Can only start meetings in channels')
return
if not bot.config.has_section('meetbot'):
bot.say('Meetbot not configured, make sure meeting_log_path and meeting_log_baseurl are defined')
return
#Start the meeting
meetings_dict[trigger.sender]['start'] = time.time()
if not trigger.group(2):
meetings_dict[trigger.sender]['title'] = 'Untitled meeting'
else:
meetings_dict[trigger.sender]['title'] = trigger.group(2)
meetings_dict[trigger.sender]['head'] = trigger.nick.lower()
meetings_dict[trigger.sender]['running'] = True
meetings_dict[trigger.sender]['comments'] = []
global meeting_log_path
meeting_log_path = bot.config.meetbot.meeting_log_path
if not meeting_log_path.endswith('/'):
meeting_log_path = meeting_log_path + '/'
global meeting_log_baseurl
meeting_log_baseurl = bot.config.meetbot.meeting_log_baseurl
if not meeting_log_baseurl.endswith('/'):
meeting_log_baseurl = meeting_log_baseurl + '/'
if not os.path.isdir(meeting_log_path + trigger.sender):
try:
os.makedirs(meeting_log_path + trigger.sender)
except Exception as e:
bot.say("Can't create log directory for this channel, meeting not started!")
meetings_dict[trigger.sender] = Ddict(dict)
raise
return
#Okay, meeting started!
logplain('Meeting started by ' + trigger.nick.lower(), trigger.sender)
logHTML_start(trigger.sender)
meeting_actions[trigger.sender] = []
bot.say('Meeting started! use .action, .agreed, .info, .chairs, .subject and .comments to control the meeting. to end the meeting, type .endmeeting')
bot.say('Users without speaking permission can use .comment ' +
trigger.sender + ' followed by their comment in a PM with me to '
'vocalize themselves.')
@commands('subject')
@example('.subject roll call')
def meetingsubject(bot, trigger):
"""
Change the meeting subject.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('what is the subject?')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
meetings_dict[trigger.sender]['current_subject'] = trigger.group(2)
logfile = codecs.open(meeting_log_path + trigger.sender + '/' + figure_logfile_name(trigger.sender) + '.html', 'a', encoding='utf-8')
logfile.write('</ul><h3>' + trigger.group(2) + '</h3><ul>')
logfile.close()
logplain('Current subject: ' + trigger.group(2) + ', (set by ' + trigger.nick + ')', trigger.sender)
bot.say('Current subject: ' + trigger.group(2))
@commands('endmeeting')
@example('.endmeeting')
def endmeeting(bot, trigger):
"""
End a meeting.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
meeting_length = time.time() - meetings_dict[trigger.sender]['start']
#TODO: Humanize time output
bot.say("Meeting ended! total meeting length %d seconds" % meeting_length)
logHTML_end(trigger.sender)
htmllog_url = meeting_log_baseurl + quote(trigger.sender + '/' + figure_logfile_name(trigger.sender) + '.html')
logplain('Meeting ended by %s, total meeting length %d seconds' % (trigger.nick, meeting_length), trigger.sender)
bot.say('Meeting minutes: ' + htmllog_url)
meetings_dict[trigger.sender] = Ddict(dict)
del meeting_actions[trigger.sender]
@commands('chairs')
@example('.chairs Tyrope Jason elad')
def chairs(bot, trigger):
"""
Set the meeting chairs.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('Who are the chairs?')
return
if trigger.nick.lower() == meetings_dict[trigger.sender]['head']:
meetings_dict[trigger.sender]['chairs'] = trigger.group(2).lower().split(' ')
chairs_readable = trigger.group(2).lower().replace(' ', ', ')
logplain('Meeting chairs are: ' + chairs_readable, trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Meeting chairs are: </span>' + chairs_readable, trigger.sender)
bot.say('Meeting chairs are: ' + chairs_readable)
else:
bot.say("Only meeting head can set chairs")
@commands('action')
@example('.action elad will develop a meetbot')
def meetingaction(bot, trigger):
"""
Log an action in the meeting log
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('ACTION: ' + trigger.group(2), trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Action: </span>' + trigger.group(2), trigger.sender)
meeting_actions[trigger.sender].append(trigger.group(2))
bot.say('ACTION: ' + trigger.group(2))
@commands('listactions')
@example('.listactions')
def listactions(bot, trigger):
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
for action in meeting_actions[trigger.sender]:
bot.say('ACTION: ' + action)
@commands('agreed')
@example('.agreed Bowties are cool')
def meetingagreed(bot, trigger):
"""
Log an agreement in the meeting log.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('AGREED: ' + trigger.group(2), trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Agreed: </span>' + trigger.group(2), trigger.sender)
bot.say('AGREED: ' + trigger.group(2))
@commands('link')
@example('.link http://example.com')
def meetinglink(bot, trigger):
"""
Log a link in the meeing log.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
link = trigger.group(2)
if not link.startswith("http"):
link = "http://" + link
try:
title = find_title(link)
except:
title = ''
logplain('LINK: %s [%s]' % (link, title), trigger.sender)
logHTML_listitem('<a href="%s">%s</a>' % (link, title), trigger.sender)
bot.say('LINK: ' + link)
@commands('info')
@example('.info all board members present')
def meetinginfo(bot, trigger):
"""
Log an informational item in the meeting log
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .info some informative thing')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('INFO: ' + trigger.group(2), trigger.sender)
logHTML_listitem(trigger.group(2), trigger.sender)
bot.say('INFO: ' + trigger.group(2))
@rule('(.*)')
@priority('low')
def log_meeting(bot, trigger):
if not ismeetingrunning(trigger.sender):
return
if trigger.startswith('.endmeeting') or trigger.startswith('.chairs') or trigger.startswith('.action') or trigger.startswith('.info') or trigger.startswith('.startmeeting') or trigger.startswith('.agreed') or trigger.startswith('.link') or trigger.startswith('.subject'):
return
logplain('<' + trigger.nick + '> ' + trigger, trigger.sender)
@commands('comment')
def take_comment(bot, trigger):
"""
Log a comment, to be shown with other comments when a chair uses .comments.
Intended to allow commentary from those outside the primary group of people
in the meeting.
Used in private message only, as `.comment <#channel> <comment to add>`
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not trigger.sender.is_nick():
return
if not trigger.group(4): # <2 arguements were given
bot.say('Usage: .comment <#channel> <comment to add>')
return
target, message = trigger.group(2).split(None, 1)
target = Identifier(target)
if not ismeetingrunning(target):
bot.say("There's not currently a meeting in that channel.")
else:
meetings_dict[trigger.group(3)]['comments'].append((trigger.nick, message))
bot.say("Your comment has been recorded. It will be shown when the"
" chairs tell me to show the comments.")
bot.msg(meetings_dict[trigger.group(3)]['head'], "A new comment has been recorded.")
@commands('comments')
def show_comments(bot, trigger):
"""
Show the comments that have been logged for this meeting with .comment.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
comments = meetings_dict[trigger.sender]['comments']
if comments:
msg = 'The following comments were made:'
bot.say(msg)
logplain('<%s> %s' % (bot.nick, msg), trigger.sender)
for comment in comments:
msg = '<%s> %s' % comment
bot.say(msg)
logplain('<%s> %s' % (bot.nick, msg), trigger.sender)
meetings_dict[trigger.sender]['comments'] = []
else:
bot.say('No comments have been logged.')
|
from . import *
SECRET_KEY = env.str('KOBRA_SECRET_KEY',
'Unsafe_development_key._Never_use_in_production.')
DEBUG = env.bool('KOBRA_DEBUG_MODE', True)
DATABASES = {
'default': env.db_url('KOBRA_DATABASE_URL', 'sqlite:///db.sqlite3')
}
|
class Counter:
"""
Counts up to a limit, and signals if that limit is reached.
"""
def __init__(self):
self.limit = None
self.counter = 0
def increment(self, amt=1):
self.counter += amt
return self.limit is not None and self.counter >= self.limit
|
import idiokit
from .. import handlers
from . import Transformation
class TransformationBot(Transformation):
handler = handlers.HandlerParam()
def __init__(self, *args, **keys):
Transformation.__init__(self, *args, **keys)
self.handler = handlers.load_handler(self.handler)
@idiokit.stream
def transform_keys(self, **keys):
yield idiokit.send((self.handler(log=self.log),))
def transform(self, handler):
return handler.transform()
if __name__ == "__main__":
TransformationBot.from_command_line().execute()
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from cribs.models import Crib
from cribs.models import Comment
from cribs.forms import CribForm, CommentForm
from assignments.models import Assignment
@login_required
def createCrib(request, assignmentID):
assignment = get_object_or_404(Assignment, pk=assignmentID)
if request.method == 'POST':
form = CribForm(request.POST)
if form.is_valid():
Crib.objects.get_or_create(
assignment=assignment,
created_by=request.user,
defaults=form.cleaned_data
)
return HttpResponseRedirect(reverse('cribs.views.myCribs', kwargs={'assignmentID': assignment.id}))
else:
try:
Crib.objects.get(assignment=assignment, created_by=request.user) # checking if there is already a crib.
return HttpResponseRedirect(reverse('cribs.views.myCribs', kwargs={'assignmentID': assignment.id}))
except Crib.DoesNotExist:
form = CribForm() # render empty form if crib was not registered.
return render_to_response(
'cribs/create_crib.html',
{'form': form, 'assignment': assignment},
context_instance=RequestContext(request)
)
@login_required
def myCribs(request, assignmentID):
assignment = get_object_or_404(Assignment, pk=assignmentID)
try:
crib = Crib.objects.get(assignment=assignment, created_by=request.user)
except Crib.DoesNotExist:
crib = None
comments = Comment.objects.filter(crib=crib)
form = CommentForm()
return render_to_response(
'cribs/my_crib.html',
{'crib': crib, 'form': form, 'comments': comments,
'assignment': assignment,},
context_instance=RequestContext(request)
)
@login_required
def allCribs(request, assignmentID):
assignment = get_object_or_404(Assignment, pk=assignmentID)
allCribs = Crib.objects.filter(assignment=assignment)
return render_to_response(
'cribs/all_cribs.html',
{'assignment': assignment, 'allcribs': allCribs},
context_instance=RequestContext(request)
)
@login_required
def cribDetail(request, cribID):
crib = get_object_or_404(Crib, pk=cribID)
comments = Comment.objects.filter(crib=crib)
form = CommentForm()
return render_to_response(
'cribs/my_crib.html',
{'crib': crib, 'form': form, 'comments': comments},
context_instance=RequestContext(request)
)
@login_required
def editCrib(request, cribID):
pass
@login_required
def closeCrib(request, cribID):
pass
@login_required
def reopenCrib(request, cribID):
pass
@login_required
def postComment(request, cribID):
# TODO: Implement in Ajax.
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = Comment(**form.cleaned_data)
comment.posted_by = request.user
comment.crib = get_object_or_404(Crib, pk=cribID)
comment.save()
return HttpResponse("Saved! (Later this will be implemented with Ajax. Hit back button and refresh page to see your comment.)")
@login_required
def editComment(request, commentID):
pass
|
from .default import DefaultPool
from .interruptible import InterruptiblePool
from .jl import JoblibPool
__all__ = ["DefaultPool", "InterruptiblePool", "JoblibPool"]
|
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from django_executor import settings as de_settings
class ExecutorConfig(AppConfig):
name = 'django_executor'
verbose_name = _("Django Executor")
def ready(self):
de_settings.patch_all()
|
import sys
import json
from httplib import HTTPConnection
from optparse import OptionParser
def fetchMetrics(host, port, url):
connection = HTTPConnection(host, port)
connection.request("GET", url)
response = connection.getresponse()
if response.status == 200:
try:
data = response.read()
except Exception as uhoh:
print "unknown error, possible empty response?: %s" % uhoh
elif response.status == 401:
print "Invalid username or password."
sys.exit(1)
elif response.status == 404:
print "Web service not found."
sys.exit(1)
else:
print "Web service error (%d): %s" % (response.status, response.reason)
sys.exit(1)
return data
def processResponse(body, nulls):
data = json.loads(body)
#if nulls and not data[0]['datapoints']:
# return 0
#else:
# for metric, timestamp in data[0]['datapoints']:
# return metric
if not data:
print "no data returned"
sys.exit(1)
for metric, timestamp in data[0]['datapoints']:
if nulls and not metric:
return 0
else:
return metric
def parse(range):
invert = False
if range.startswith('@'):
range = range.strip('@')
invert = True
if ':' in range:
start, end = range.split(':')
else:
start, end = '', range
if start == '~':
start = float('-inf')
else:
start = parse_atom(start, 0)
end = parse_atom(end, float('inf'))
return start, end, invert
def parse_atom(atom, default):
if atom is '':
return default
if '.' in atom:
return float(atom)
return int(atom)
def makeNagios(metric, warning, critical):
severity = "OK"
code = 0
min = ''
max = ''
if warning:
wstart, wend, winvert = parse(warning)
if winvert:
if metric > wstart or metric < wend:
severity = "WARNING"
code = 1
else:
if metric > wend or metric < wstart:
severity = "WARNING"
code = 1
min = wstart
max = wend
else: warning = ''
if critical:
cstart, cend, cinvert = parse(critical)
if cinvert:
if metric > cstart or metric < cend:
severity = "CRITICAL"
code = 2
else:
if metric > cend or metric < cstart:
severity = "CRITICAL"
code = 2
min = cstart
max = cend
else: critical = ''
print "%s - %s|%s=%s;%s;%s;%s;%s; " % (severity, carbonCache, carbonCache, metric, warning, critical, min, max )
sys.exit(code)
def main():
parser = OptionParser()
parser.add_option('-H', '--host', dest='host',
help='Short hostname of the graphite server')
parser.add_option('-p', '--port', dest='port',
type='int', default=80,
help='Port to connect to on the web server')
parser.add_option('-u', '--url', dest='url',
help='URL to retrieve data from')
parser.add_option('-N', '--cache', dest='carboncache',
help='name of carbon cache')
parser.add_option('-n', '--none', dest='nulls',
action='store_true', default=False,
help='set null values to zero')
parser.add_option('-c', '--critical', dest='critical',
default=False,
help='set range for critical threshold. [@][start:][end]')
parser.add_option('-w', '--warning', dest='warning',
default=False,
help='set range for warning threshold. [@][start:][end]')
options, args = parser.parse_args()
if not options.host:
print >> sys.stderr, "You must specify the host."
sys.exit(1)
elif not options.url:
print >> sys.stderr, "You must specify the url."
sys.exit(1)
global carbonCache
carbonCache = options.carboncache
url = options.url + '&format=json&maxDataPoints=1'
data = fetchMetrics(options.host, options.port, url)
metrics = processResponse(data, options.nulls)
makeNagios(metrics, options.warning, options.critical)
if __name__ == '__main__':
main()
|
def minimum_reductions(s):
n = len(s)
count = 0
for i in range(n // 2):
left = ord(s[i])
right = ord(s[(n - 1) - i])
if left != right:
if left > right:
count += left - right
else:
count += right - left
return count
T = int(input())
for _ in range(T):
s = input()
print(minimum_reductions(s))
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('treatment_sheets', '0006_auto_20160324_0247'),
]
operations = [
migrations.AlterField(
model_name='txitem',
name='dose',
field=models.FloatField(default=0),
),
]
|
"""
Implementation of paper: Deep Recurrent Attentive Writer (DRAW) network architecture introduced by
K. Gregor, I. Danihelka, A. Graves and D. Wierstra. The original paper can be found at:
http://arxiv.org/pdf/1502.04623
Reference implementations on GitHub:
1. https://github.com/jbornschein/draw
2. https://github.com/ikostrikov/TensorFlow-VAE-GAN-DRAW
"""
from tartist.core import get_env, get_logger
from tartist.core.utils.naming import get_dump_directory, get_data_directory
from tartist.nn import opr as O, optimizer, summary
import draw_opr
logger = get_logger(__file__)
__envs__ = {
'dir': {
'root': get_dump_directory(__file__),
'data': get_data_directory('WellKnown/mnist')
},
'trainer': {
'learning_rate': 0.001,
'batch_size': 100,
'epoch_size': 500,
'nr_epochs': 100,
},
'inference': {
'batch_size': 256,
'epoch_size': 40
},
'demo': {
'is_reconstruct': False,
'mode': 'draw',
'draw': {
'grid_desc': ('4v', '4h')
}
}
}
def make_network(env):
with env.create_network() as net:
h, w, c = 28, 28, 1
nr_glimpse = 16
att_dim = 5
code_length = 128
is_reconstruct = get_env('demo.is_reconstruct', False)
is_train = env.phase is env.Phase.TRAIN
dpc = env.create_dpcontroller()
with dpc.activate():
def inputs():
if is_train:
img = O.placeholder('img', shape=(None, h, w, c))
return [img]
return []
def forward(img=None):
encoder = O.BasicLSTMCell(256)
decoder = O.BasicLSTMCell(256)
batch_size = img.shape[0] if is_train else 1
canvas = O.zeros(shape=O.canonize_sym_shape([batch_size, h, w, c]), dtype='float32')
enc_state = encoder.zero_state(batch_size, dtype='float32')
dec_state = decoder.zero_state(batch_size, dtype='float32')
enc_h, dec_h = enc_state[1], dec_state[1]
def encode(x, state, reuse):
with env.variable_scope('read_encoder', reuse=reuse):
return encoder(x, state)
def decode(x, state, reuse):
with env.variable_scope('write_decoder', reuse=reuse):
return decoder(x, state)
all_sqr_mus, all_vars, all_log_vars = 0., 0., 0.
for step in range(nr_glimpse):
reuse = (step != 0)
if is_reconstruct or env.phase is env.Phase.TRAIN:
img_hat = draw_opr.image_diff(img, canvas) # eq. 3
# Note: here the input should be dec_h
with env.variable_scope('read', reuse=reuse):
read_param = O.fc('fc_param', dec_h, 5)
with env.name_scope('read_step{}'.format(step)):
cx, cy, delta, var, gamma = draw_opr.split_att_params(h, w, att_dim, read_param)
read_inp = O.concat([img, img_hat], axis=3) # of shape: batch_size x h x w x (2c)
read_out = draw_opr.att_read(att_dim, read_inp, cx, cy, delta, var) # eq. 4
enc_inp = O.concat([gamma * read_out.flatten2(), dec_h], axis=1)
enc_h, enc_state = encode(enc_inp, enc_state, reuse) # eq. 5
with env.variable_scope('sample', reuse=reuse):
_ = enc_h
sample_mu = O.fc('fc_mu', _, code_length)
sample_log_var = O.fc('fc_sigma', _, code_length)
with env.name_scope('sample_step{}'.format(step)):
sample_var = O.exp(sample_log_var)
sample_std = O.sqrt(sample_var)
sample_epsilon = O.random_normal([batch_size, code_length])
z = sample_mu + sample_std * sample_epsilon # eq. 6
# accumulate for losses
all_sqr_mus += sample_mu ** 2.
all_vars += sample_var
all_log_vars += sample_log_var
else:
z = O.random_normal([1, code_length])
# z = O.callback_injector(z)
dec_h, dec_state = decode(z, dec_state, reuse) # eq. 7
with env.variable_scope('write', reuse=reuse):
write_param = O.fc('fc_param', dec_h, 5)
write_in = O.fc('fc', dec_h, (att_dim * att_dim * c)).reshape(-1, att_dim, att_dim, c)
with env.name_scope('write_step{}'.format(step)):
cx, cy, delta, var, gamma = draw_opr.split_att_params(h, w, att_dim, write_param)
write_out = draw_opr.att_write(h, w, write_in, cx, cy, delta, var) # eq. 8
canvas += write_out
if env.phase is env.Phase.TEST:
dpc.add_output(O.sigmoid(canvas), name='canvas_step{}'.format(step))
canvas = O.sigmoid(canvas)
if env.phase is env.Phase.TRAIN:
with env.variable_scope('loss'):
img, canvas = img.flatten2(), canvas.flatten2()
content_loss = O.raw_cross_entropy_prob('raw_content', canvas, img)
content_loss = content_loss.sum(axis=1).mean(name='content')
# distrib_loss = 0.5 * (O.sqr(mu) + O.sqr(std) - 2. * O.log(std + 1e-8) - 1.0).sum(axis=1)
distrib_loss = -0.5 * (float(nr_glimpse) + all_log_vars - all_sqr_mus - all_vars).sum(axis=1)
distrib_loss = distrib_loss.mean(name='distrib')
summary.scalar('content_loss', content_loss)
summary.scalar('distrib_loss', distrib_loss)
loss = content_loss + distrib_loss
dpc.add_output(loss, name='loss', reduce_method='sum')
dpc.add_output(canvas, name='output')
dpc.set_input_maker(inputs).set_forward_func(forward)
net.add_all_dpc_outputs(dpc, loss_name='loss')
if env.phase is env.Phase.TRAIN:
summary.inference.scalar('loss', net.loss)
def make_optimizer(env):
wrapper = optimizer.OptimizerWrapper()
wrapper.set_base_optimizer(optimizer.base.AdamOptimizer(get_env('trainer.learning_rate'), beta1=0.75, beta2=0.5))
wrapper.append_grad_modifier(optimizer.grad_modifier.LearningRateMultiplier([
('*/b', 2.0),
]))
# wrapper.append_grad_modifier(optimizer.grad_modifier.WeightDecay([
# ('*/W', 0.0005)
# ]))
env.set_optimizer(wrapper)
from data_provider_vae_mnist import *
def main_train(trainer):
from tartist.plugins.trainer_enhancer import summary
summary.enable_summary_history(trainer)
summary.enable_echo_summary_scalar(trainer)
from tartist.plugins.trainer_enhancer import progress
progress.enable_epoch_progress(trainer)
from tartist.plugins.trainer_enhancer import snapshot
snapshot.enable_snapshot_saver(trainer)
# from tartist.plugins.trainer_enhancer import inference
# inference.enable_inference_runner(trainer, make_dataflow_inference)
trainer.train()
|
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
import serial
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 50
camera.hflip = True
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
blur = cv2.blur(image, (3,3))
lower = np.array([76,31,4],dtype="uint8")
upper = np.array([210,90,70], dtype="uint8")
thresh = cv2.inRange(blur, lower, upper)
thresh2 = thresh.copy()
image, contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
best_cnt = 1
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
best_cnt = cnt
M = cv2.moments(best_cnt)
cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
#if best_cnt>1:
#circulo exterior e interior que marca el centro en otro color
cv2.circle(blur,(cx,cy),12,(45,235,252),2)
cv2.circle(blur,(cx,cy),10,(0,0,255),-1)
#capturo las coordenadas y las imprimo en pantalla
print("x= ")
print(cx)
print(" y= ")
print(cy)
# show the frame
cv2.imshow("Frame", blur)
#cv2.imshow('thresh',thresh2)
key = cv2.waitKey(1) & 0xFF
if cx > 320:
arduino.write(b'SH'+str(cx-320))
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
|
"""
compute_accuracy.py
Usage:
compute_accuracy.py model.joblib
where model.joblib is a file created by cleverhans.serial.save containing
a picklable cleverhans.model.Model instance.
This script will run the model on a variety of types of data and print out
the accuracy for each data type.
clean : Clean data
semantic : Semantic adversarial examples
pgd: PGD adversarial examples
This script works by running a single attack on each example.
This is useful for quick evaluation during development, but for final
publication it would be better to use attack bundling.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import time
import tensorflow as tf
from cleverhans.attacks import ProjectedGradientDescent, Semantic
from cleverhans.compat import flags
from cleverhans.evaluation import accuracy
from cleverhans.serial import load
from cleverhans.utils import set_log_level
from cleverhans.utils_tf import infer_devices
from cleverhans.utils_tf import silence
silence()
devices = infer_devices()
num_devices = len(devices)
BATCH_SIZE = 128
TRAIN_START = 0
TRAIN_END = 60000
TEST_START = 0
TEST_END = 10000
WHICH_SET = 'test'
NB_ITER = 40
BASE_EPS_ITER = None # Differs by dataset
FLAGS = flags.FLAGS
def print_accuracies(filepath, train_start=TRAIN_START, train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER):
"""
Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(20181014)
set_log_level(logging.INFO)
sess = tf.Session()
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
x_data, y_data = dataset.get_set(which_set)
impl(sess, model, dataset, factory, x_data, y_data, base_eps_iter, nb_iter)
def impl(sess, model, dataset, factory, x_data, y_data,
base_eps_iter=BASE_EPS_ITER, nb_iter=NB_ITER,
batch_size=BATCH_SIZE):
"""
The actual implementation of the evaluation.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param dataset: cleverhans.dataset.Dataset
:param factory: the dataset factory corresponding to `dataset`
:param x_data: numpy array of input examples
:param y_data: numpy array of class labels
:param base_eps_iter: step size for PGD if data were in [0, 1]
:param nb_iter: number of PGD iterations
:returns: dict mapping string adversarial example names to accuracies
"""
center = dataset.kwargs['center']
max_val = dataset.kwargs['max_val']
value_range = max_val * (1. + center)
min_value = 0. - center * max_val
if 'CIFAR' in str(factory.cls):
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
elif 'MNIST' in str(factory.cls):
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
else:
raise NotImplementedError(str(factory.cls))
pgd_params = {'eps': base_eps * value_range,
'eps_iter': base_eps_iter * value_range,
'nb_iter': nb_iter,
'clip_min': min_value,
'clip_max': max_val}
semantic = Semantic(model, center, max_val, sess)
pgd = ProjectedGradientDescent(model, sess=sess)
jobs = [('clean', None, None, None),
('Semantic', semantic, None, None),
('pgd', pgd, pgd_params, None)]
out = {}
for job in jobs:
name, attack, attack_params, job_batch_size = job
if job_batch_size is None:
job_batch_size = batch_size
t1 = time.time()
acc = accuracy(sess, model, x_data, y_data, batch_size=job_batch_size,
devices=devices, attack=attack, attack_params=attack_params)
t2 = time.time()
out[name] = acc
print("Accuracy on " + name + " examples: ", acc)
print("Evaluation took", t2 - t1, "seconds")
return out
def main(argv=None):
"""
Print accuracies
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print_accuracies(filepath=filepath, test_start=FLAGS.test_start,
test_end=FLAGS.test_end, which_set=FLAGS.which_set,
nb_iter=FLAGS.nb_iter, base_eps_iter=FLAGS.base_eps_iter,
batch_size=FLAGS.batch_size)
if __name__ == '__main__':
flags.DEFINE_integer('train_start', TRAIN_START, 'Starting point (inclusive)'
'of range of train examples to use')
flags.DEFINE_integer('train_end', TRAIN_END, 'Ending point (non-inclusive) '
'of range of train examples to use')
flags.DEFINE_integer('test_start', TEST_START, 'Starting point (inclusive) '
'of range of test examples to use')
flags.DEFINE_integer('test_end', TEST_END, 'End point (non-inclusive) of '
'range of test examples to use')
flags.DEFINE_integer('nb_iter', NB_ITER, 'Number of iterations of PGD')
flags.DEFINE_string('which_set', WHICH_SET, '"train" or "test"')
flags.DEFINE_integer('batch_size', BATCH_SIZE,
'Batch size for most jobs')
flags.DEFINE_float('base_eps_iter', BASE_EPS_ITER,
'epsilon per iteration, if data were in [0, 1]')
tf.app.run()
|
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import AppPlatformManagementClientConfiguration
from .operations import AppsOperations, BindingsOperations, CertificatesOperations, ConfigServersOperations, CustomDomainsOperations, DeploymentsOperations, MonitoringSettingsOperations, Operations, RuntimeVersionsOperations, ServicesOperations, SkusOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClient:
"""REST API for Azure Spring Cloud.
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ServicesOperations
:ivar config_servers: ConfigServersOperations operations
:vartype config_servers:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ConfigServersOperations
:ivar monitoring_settings: MonitoringSettingsOperations operations
:vartype monitoring_settings:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.MonitoringSettingsOperations
:ivar apps: AppsOperations operations
:vartype apps: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.AppsOperations
:ivar bindings: BindingsOperations operations
:vartype bindings: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.BindingsOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CertificatesOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CustomDomainsOperations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.DeploymentsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.Operations
:ivar runtime_versions: RuntimeVersionsOperations operations
:vartype runtime_versions:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.RuntimeVersionsOperations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.SkusOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AppPlatformManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.services = ServicesOperations(self._client, self._config, self._serialize, self._deserialize)
self.config_servers = ConfigServersOperations(self._client, self._config, self._serialize, self._deserialize)
self.monitoring_settings = MonitoringSettingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.apps = AppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.bindings = BindingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.runtime_versions = RuntimeVersionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.skus = SkusOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AppPlatformManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
from django.core.urlresolvers import reverse, reverse_lazy, resolve
from django.core.exceptions import PermissionDenied
from django.template import RequestContext
from layerindex.models import Branch, LayerItem, LayerMaintainer, LayerBranch, LayerDependency, LayerNote, Recipe, Machine, BBClass, BBAppend, RecipeChange, RecipeChangeset, ClassicRecipe
from datetime import datetime
from django.views.generic import TemplateView, DetailView, ListView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic.base import RedirectView
from layerindex.forms import EditLayerForm, LayerMaintainerFormSet, EditNoteForm, EditProfileForm, RecipeChangesetForm, AdvancedRecipeSearchForm, BulkChangeEditFormSet, ClassicRecipeForm, ClassicRecipeSearchForm
from django.db import transaction
from django.contrib.auth.models import User, Permission
from django.db.models import Q, Count
from django.core.mail import EmailMessage
from django.template.loader import get_template
from django.template import Context
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from reversion.models import Revision
import simplesearch
import settings
from django.dispatch import receiver
import reversion
def edit_layernote_view(request, template_name, slug, pk=None):
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.classic:
raise Http404
if not (request.user.is_authenticated() and (request.user.has_perm('layerindex.publish_layer') or layeritem.user_can_edit(request.user))):
raise PermissionDenied
if pk:
# Edit mode
layernote = get_object_or_404(LayerNote, pk=pk)
else:
# Add mode
layernote = LayerNote()
layernote.layer = layeritem
if request.method == 'POST':
form = EditNoteForm(request.POST, instance=layernote)
if form.is_valid():
form.save()
return HttpResponseRedirect(layeritem.get_absolute_url())
else:
form = EditNoteForm(instance=layernote)
return render(request, template_name, {
'form': form,
})
def delete_layernote_view(request, template_name, slug, pk):
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.classic:
raise Http404
if not (request.user.is_authenticated() and (request.user.has_perm('layerindex.publish_layer') or layeritem.user_can_edit(request.user))):
raise PermissionDenied
layernote = get_object_or_404(LayerNote, pk=pk)
if request.method == 'POST':
layernote.delete()
return HttpResponseRedirect(layeritem.get_absolute_url())
else:
return render(request, template_name, {
'object': layernote,
'object_type': layernote._meta.verbose_name,
'cancel_url': layeritem.get_absolute_url()
})
def delete_layer_view(request, template_name, slug):
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.classic:
raise Http404
if not (request.user.is_authenticated() and request.user.has_perm('layerindex.publish_layer') and layeritem.status == 'N'):
raise PermissionDenied
if request.method == 'POST':
layeritem.delete()
return HttpResponseRedirect(reverse('layer_list', args=('master',)))
else:
return render(request, template_name, {
'object': layeritem,
'object_type': layeritem._meta.verbose_name,
'cancel_url': layeritem.get_absolute_url()
})
def edit_layer_view(request, template_name, branch='master', slug=None):
return_url = None
branchobj = Branch.objects.filter(name=branch)[:1].get()
if slug:
# Edit mode
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.classic:
raise Http404
if not (request.user.is_authenticated() and (request.user.has_perm('layerindex.publish_layer') or layeritem.user_can_edit(request.user))):
raise PermissionDenied
layerbranch = get_object_or_404(LayerBranch, layer=layeritem, branch=branchobj)
deplistlayers = LayerItem.objects.exclude(id=layeritem.id).order_by('name')
returnto = request.GET.get('returnto', 'layer_item')
if returnto:
if returnto == 'layer_review':
return_url = reverse_lazy(returnto, args=(layeritem.name,))
else:
return_url = reverse_lazy(returnto, args=(branch, layeritem.name))
else:
# Submit mode
layeritem = LayerItem()
layerbranch = LayerBranch(layer=layeritem, branch=branchobj)
deplistlayers = LayerItem.objects.filter(classic=False).order_by('name')
if request.method == 'POST':
last_vcs_url = layeritem.vcs_url
form = EditLayerForm(request.user, layerbranch, request.POST, instance=layeritem)
maintainerformset = LayerMaintainerFormSet(request.POST, instance=layerbranch)
if form.is_valid() and maintainerformset.is_valid():
with transaction.commit_on_success():
reset_last_rev = False
form.save()
layerbranch.layer = layeritem
new_subdir = form.cleaned_data['vcs_subdir']
if layerbranch.vcs_subdir != new_subdir:
layerbranch.vcs_subdir = new_subdir
reset_last_rev = True
layerbranch.save()
maintainerformset.save()
if slug:
new_deps = form.cleaned_data['deps']
existing_deps = [deprec.dependency for deprec in layerbranch.dependencies_set.all()]
reset_last_rev = False
for dep in new_deps:
if dep not in existing_deps:
deprec = LayerDependency()
deprec.layerbranch = layerbranch
deprec.dependency = dep
deprec.save()
reset_last_rev = True
for dep in existing_deps:
if dep not in new_deps:
layerbranch.dependencies_set.filter(dependency=dep).delete()
reset_last_rev = True
if layeritem.vcs_url != last_vcs_url:
reset_last_rev = True
if reset_last_rev:
layerbranch.vcs_last_rev = ''
layerbranch.save()
else:
# Save dependencies
for dep in form.cleaned_data['deps']:
deprec = LayerDependency()
deprec.layerbranch = layerbranch
deprec.dependency = dep
deprec.save()
# Send email
plaintext = get_template('layerindex/submitemail.txt')
perm = Permission.objects.get(codename='publish_layer')
users = User.objects.filter(Q(groups__permissions=perm) | Q(user_permissions=perm) ).distinct()
for user in users:
if user.first_name:
user_name = user.first_name
else:
user_name = user.username
d = Context({
'user_name': user_name,
'layer_name': layeritem.name,
'layer_url': request.build_absolute_uri(reverse('layer_review', args=(layeritem.name,))),
})
subject = '%s - %s' % (settings.SUBMIT_EMAIL_SUBJECT, layeritem.name)
from_email = settings.SUBMIT_EMAIL_FROM
to_email = user.email
text_content = plaintext.render(d)
msg = EmailMessage(subject, text_content, from_email, [to_email])
msg.send()
return HttpResponseRedirect(reverse('submit_layer_thanks'))
messages.success(request, 'Layer %s saved successfully.' % layeritem.name)
if return_url:
return HttpResponseRedirect(return_url)
else:
form = EditLayerForm(request.user, layerbranch, instance=layeritem)
maintainerformset = LayerMaintainerFormSet(instance=layerbranch)
return render(request, template_name, {
'form': form,
'maintainerformset': maintainerformset,
'deplistlayers': deplistlayers,
'return_url': return_url,
})
def bulk_change_edit_view(request, template_name, pk):
changeset = get_object_or_404(RecipeChangeset, pk=pk)
if request.method == 'POST':
formset = BulkChangeEditFormSet(request.POST, queryset=changeset.recipechange_set.all())
if formset.is_valid():
for form in formset:
form.clear_same_values()
formset.save()
return HttpResponseRedirect(reverse('bulk_change_review', args=(changeset.id,)))
else:
formset = BulkChangeEditFormSet(queryset=changeset.recipechange_set.all())
return render(request, template_name, {
'formset': formset,
})
def bulk_change_patch_view(request, pk):
import os
import os.path
import utils
changeset = get_object_or_404(RecipeChangeset, pk=pk)
# FIXME this couples the web server and machine running the update script together,
# but given that it's a separate script the way is open to decouple them in future
try:
ret = utils.runcmd('python bulkchange.py %d %s' % (int(pk), settings.TEMP_BASE_DIR), os.path.dirname(__file__))
if ret:
fn = ret.splitlines()[-1]
if os.path.exists(fn):
if fn.endswith('.tar.gz'):
mimetype = 'application/x-gzip'
else:
mimetype = 'text/x-diff'
response = HttpResponse(mimetype=mimetype)
response['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(fn)
with open(fn, "rb") as f:
data = f.read()
response.write(data)
os.remove(fn)
return response
return HttpResponse('No patch data generated', content_type='text/plain')
except Exception as e:
output = getattr(e, 'output', None)
if output:
if 'timeout' in output:
return HttpResponse('Failed to generate patches: timed out waiting for lock. Please try again shortly.', content_type='text/plain')
return HttpResponse('Failed to generate patches: %s' % e, content_type='text/plain')
# FIXME better error handling
def _check_url_branch(kwargs):
branchname = kwargs['branch']
if branchname:
if branchname == 'oe-classic':
raise Http404
branch = get_object_or_404(Branch, name=branchname)
def publish(request, name):
if not (request.user.is_authenticated() and request.user.has_perm('layerindex.publish_layer')):
raise PermissionDenied
return _statuschange(request, name, 'P')
def _statuschange(request, name, newstatus):
w = get_object_or_404(LayerItem, name=name)
if w.classic:
raise Http404
if w.status != newstatus:
w.change_status(newstatus, request.user.username)
w.save()
return HttpResponseRedirect(w.get_absolute_url())
class RedirectParamsView(RedirectView):
def get_redirect_url(self, *args, **kwargs):
redirect_name = kwargs.pop('redirect_name')
return reverse_lazy(redirect_name, args=args, kwargs=kwargs)
class LayerListView(ListView):
context_object_name = 'layerbranch_list'
def get_queryset(self):
_check_url_branch(self.kwargs)
return LayerBranch.objects.filter(branch__name=self.kwargs['branch']).filter(layer__status='P').order_by('layer__layer_type', 'layer__name')
def get_context_data(self, **kwargs):
context = super(LayerListView, self).get_context_data(**kwargs)
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
context['layer_type_choices'] = LayerItem.LAYER_TYPE_CHOICES
return context
class LayerReviewListView(ListView):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('layerindex.publish_layer'):
raise PermissionDenied
return super(LayerReviewListView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return LayerBranch.objects.filter(branch__name='master').filter(layer__status='N').order_by('layer__name')
class LayerDetailView(DetailView):
model = LayerItem
slug_field = 'name'
# This is a bit of a mess. Surely there has to be a better way to handle this...
def dispatch(self, request, *args, **kwargs):
self.user = request.user
res = super(LayerDetailView, self).dispatch(request, *args, **kwargs)
l = self.get_object()
if l:
if l.classic:
raise Http404
if l.status == 'N':
if not (request.user.is_authenticated() and request.user.has_perm('layerindex.publish_layer')):
raise PermissionDenied
return res
def get_context_data(self, **kwargs):
_check_url_branch(self.kwargs)
context = super(LayerDetailView, self).get_context_data(**kwargs)
layer = context['layeritem']
context['useredit'] = layer.user_can_edit(self.user)
layerbranch = layer.get_layerbranch(self.kwargs['branch'])
if layerbranch:
context['layerbranch'] = layerbranch
context['machines'] = layerbranch.machine_set.order_by('name')
context['appends'] = layerbranch.bbappend_set.order_by('filename')
context['classes'] = layerbranch.bbclass_set.order_by('name')
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
return context
class LayerReviewDetailView(LayerDetailView):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('layerindex.publish_layer'):
raise PermissionDenied
return super(LayerReviewDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
self.kwargs['branch'] = 'master'
context = super(LayerReviewDetailView, self).get_context_data(**kwargs)
return context
def recipes_preferred_count(qs):
# Add extra column so we can show "duplicate" recipes from other layers de-emphasised
# (it's a bit crude having to do this using SQL but I couldn't find a better way...)
return qs.extra(
select={
'preferred_count': """SELECT COUNT(1)
FROM layerindex_recipe AS recipe2
, layerindex_layerbranch as branch2
, layerindex_layeritem as layer1
, layerindex_layeritem as layer2
WHERE branch2.id = recipe2.layerbranch_id
AND layer2.id = branch2.layer_id
AND layer2.layer_type in ('S', 'A')
AND branch2.branch_id = layerindex_layerbranch.branch_id
AND recipe2.pn = layerindex_recipe.pn
AND recipe2.layerbranch_id <> layerindex_recipe.layerbranch_id
AND layer1.id = layerindex_layerbranch.layer_id
AND layer2.index_preference > layer1.index_preference
"""
},
)
class RecipeSearchView(ListView):
context_object_name = 'recipe_list'
paginate_by = 50
def get_queryset(self):
_check_url_branch(self.kwargs)
query_string = self.request.GET.get('q', '')
init_qs = Recipe.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if query_string.strip():
entry_query = simplesearch.get_query(query_string, ['pn', 'summary', 'description', 'filename'])
qs = init_qs.filter(entry_query).order_by('pn', 'layerbranch__layer')
else:
if 'q' in self.request.GET:
qs = init_qs.order_by('pn', 'layerbranch__layer')
else:
# It's a bit too slow to return all records by default, and most people
# won't actually want that (if they do they can just hit the search button
# with no query string)
return Recipe.objects.none()
return recipes_preferred_count(qs)
def get_context_data(self, **kwargs):
context = super(RecipeSearchView, self).get_context_data(**kwargs)
searchval = self.request.GET.get('q', '')
context['search_keyword'] = searchval
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
if searchval:
context['extra_url_param'] = '?q=%s' % searchval
return context
class DuplicatesView(TemplateView):
def get_recipes(self, layer_ids):
init_qs = Recipe.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if layer_ids:
init_qs = init_qs.filter(layerbranch__layer__in=layer_ids)
dupes = init_qs.values('pn').annotate(Count('layerbranch', distinct=True)).filter(layerbranch__count__gt=1)
qs = init_qs.all().filter(pn__in=[item['pn'] for item in dupes]).order_by('pn', 'layerbranch__layer', '-pv')
return recipes_preferred_count(qs)
def get_classes(self, layer_ids):
init_qs = BBClass.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if layer_ids:
init_qs = init_qs.filter(layerbranch__layer__in=layer_ids)
dupes = init_qs.values('name').annotate(Count('layerbranch', distinct=True)).filter(layerbranch__count__gt=1)
qs = init_qs.all().filter(name__in=[item['name'] for item in dupes]).order_by('name', 'layerbranch__layer')
return qs
def get_context_data(self, **kwargs):
layer_ids = [int(i) for i in self.request.GET.getlist('l')]
context = super(DuplicatesView, self).get_context_data(**kwargs)
context['recipes'] = self.get_recipes(layer_ids)
context['classes'] = self.get_classes(layer_ids)
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
context['layers'] = LayerBranch.objects.filter(branch__name=self.kwargs['branch']).filter(layer__status='P').order_by( 'layer__name')
context['showlayers'] = layer_ids
return context
class AdvancedRecipeSearchView(ListView):
context_object_name = 'recipe_list'
paginate_by = 50
def get_queryset(self):
field = self.request.GET.get('field', '')
if field:
search_form = AdvancedRecipeSearchForm(self.request.GET)
if not search_form.is_valid():
return Recipe.objects.none()
match_type = self.request.GET.get('match_type', '')
if match_type == 'B':
value = ''
else:
value = self.request.GET.get('value', '')
if value or match_type == 'B':
if match_type == 'C' or match_type == 'N':
query = Q(**{"%s__icontains" % field: value})
else:
query = Q(**{"%s" % field: value})
queryset = Recipe.objects.filter(layerbranch__branch__name='master')
layer = self.request.GET.get('layer', '')
if layer:
queryset = queryset.filter(layerbranch__layer=layer)
if match_type == 'N':
# Exclude blank as well
queryset = queryset.exclude(Q(**{"%s" % field: ''})).exclude(query)
else:
queryset = queryset.filter(query)
return queryset.order_by('pn', 'layerbranch__layer')
return Recipe.objects.none()
def get_context_data(self, **kwargs):
context = super(AdvancedRecipeSearchView, self).get_context_data(**kwargs)
if self.request.GET.get('field', ''):
searched = True
search_form = AdvancedRecipeSearchForm(self.request.GET)
else:
searched = False
search_form = AdvancedRecipeSearchForm()
context['search_form'] = search_form
context['searched'] = searched
return context
class BulkChangeView(CreateView):
model = RecipeChangeset
form_class = RecipeChangesetForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(BulkChangeView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
if not self.request.user.is_authenticated():
raise PermissionDenied
obj = form.save(commit=False)
obj.user = self.request.user
obj.save()
return HttpResponseRedirect(reverse('bulk_change_search', args=(obj.id,)))
def get_context_data(self, **kwargs):
context = super(BulkChangeView, self).get_context_data(**kwargs)
context['changesets'] = RecipeChangeset.objects.filter(user=self.request.user)
return context
class BulkChangeSearchView(AdvancedRecipeSearchView):
def get(self, request, *args, **kwargs):
self.changeset = get_object_or_404(RecipeChangeset, pk=kwargs['pk'])
if self.changeset.user != request.user:
raise PermissionDenied
return super(BulkChangeSearchView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated():
raise PermissionDenied
changeset = get_object_or_404(RecipeChangeset, pk=kwargs['pk'])
if changeset.user != request.user:
raise PermissionDenied
def add_recipes(recipes):
for recipe in recipes:
if not changeset.recipechange_set.filter(recipe=recipe):
change = RecipeChange()
change.changeset = changeset
change.recipe = recipe
change.save()
if 'add_selected' in request.POST:
id_list = request.POST.getlist('selecteditems')
id_list = [int(i) for i in id_list if i.isdigit()]
recipes = Recipe.objects.filter(id__in=id_list)
add_recipes(recipes)
elif 'add_all' in request.POST:
add_recipes(self.get_queryset())
elif 'remove_all' in request.POST:
changeset.recipechange_set.all().delete()
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BulkChangeSearchView, self).get_context_data(**kwargs)
context['changeset'] = self.changeset
context['current_branch'] = 'master'
return context
class BaseDeleteView(DeleteView):
def get_context_data(self, **kwargs):
context = super(BaseDeleteView, self).get_context_data(**kwargs)
obj = context.get('object', None)
if obj:
context['object_type'] = obj._meta.verbose_name
cancel = self.request.GET.get('cancel', '')
if cancel:
context['cancel_url'] = reverse_lazy(cancel, args=(obj.pk,))
return context
class BulkChangeDeleteView(BaseDeleteView):
model = RecipeChangeset
success_url = reverse_lazy('bulk_change')
def get_queryset(self):
qs = super(BulkChangeDeleteView, self).get_queryset()
return qs.filter(user=self.request.user)
class MachineSearchView(ListView):
context_object_name = 'machine_list'
paginate_by = 50
def get_queryset(self):
_check_url_branch(self.kwargs)
query_string = self.request.GET.get('q', '')
init_qs = Machine.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if query_string.strip():
entry_query = simplesearch.get_query(query_string, ['name', 'description'])
return init_qs.filter(entry_query).order_by('name', 'layerbranch__layer')
else:
if 'q' in self.request.GET:
return init_qs.order_by('name', 'layerbranch__layer')
else:
# Be consistent with RecipeSearchView
return Machine.objects.none()
def get_context_data(self, **kwargs):
context = super(MachineSearchView, self).get_context_data(**kwargs)
context['search_keyword'] = self.request.GET.get('q', '')
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
return context
class PlainTextListView(ListView):
def render_to_response(self, context):
"Returns a plain text response rendering of the template"
template = get_template(self.template_name)
return HttpResponse(template.render(Context(context)),
content_type='text/plain')
class HistoryListView(ListView):
context_object_name = "revisions"
paginate_by = 50
def get_queryset(self):
return Revision.objects.all().order_by('-date_created')
class EditProfileFormView(UpdateView):
form_class = EditProfileForm
def dispatch(self, request, *args, **kwargs):
self.user = request.user
return super(EditProfileFormView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
return self.user
def get_success_url(self):
return reverse('frontpage')
@receiver(reversion.pre_revision_commit)
def annotate_revision(sender, **kwargs):
ignorefields = ['vcs_last_rev', 'vcs_last_fetch', 'vcs_last_commit', 'updated']
versions = kwargs.pop('versions')
instances = kwargs.pop('instances')
changelist = []
for ver, inst in zip(versions, instances):
currentVersion = ver.field_dict
modelmeta = ver.content_type.model_class()._meta
if ver.type == reversion.models.VERSION_DELETE:
changelist.append("Deleted %s: %s" % (modelmeta.verbose_name.lower(), ver.object_repr))
else:
pastver = reversion.get_for_object(inst)
if pastver and ver.type != reversion.models.VERSION_ADD:
pastVersion = pastver[0].field_dict
changes = set(currentVersion.items()) - set(pastVersion.items())
changedVars = [var[0] for var in changes]
fieldchanges = []
for field in changedVars:
if field not in ignorefields:
modelfield = modelmeta.get_field(field)
newvalue = currentVersion[field]
if modelfield.choices:
for v in modelfield.choices:
if v[0] == newvalue:
newvalue = v[1]
break
fieldchanges.append("%s to '%s'" % (modelfield.verbose_name.lower(), newvalue))
if fieldchanges:
changelist.append("Changed %s %s %s" % (modelmeta.verbose_name.lower(), ver.object_repr, ", ".join(fieldchanges)))
else:
changelist.append("Added %s: %s" % (modelmeta.verbose_name.lower(), ver.object_repr))
comment = '\n'.join(changelist)
if not comment:
comment = 'No changes'
revision = kwargs.pop('revision')
revision.comment = comment
revision.save()
kwargs['revision'] = revision
class RecipeDetailView(DetailView):
model = Recipe
def get_context_data(self, **kwargs):
context = super(RecipeDetailView, self).get_context_data(**kwargs)
recipe = self.get_object()
if recipe:
verappendprefix = recipe.filename.split('.bb')[0]
appendprefix = verappendprefix.split('_')[0]
#context['verappends'] = BBAppend.objects.filter(layerbranch__branch=recipe.layerbranch.branch).filter(filename='%s.bbappend' % verappendprefix)
context['appends'] = BBAppend.objects.filter(layerbranch__branch=recipe.layerbranch.branch).filter(filename__regex=r'%s(_[^_]*)?\.bbappend' % appendprefix)
verappends = []
for append in context['appends']:
if append.matches_recipe(recipe):
verappends.append(append)
context['verappends'] = verappends
return context
class ClassicRecipeSearchView(RecipeSearchView):
def get_queryset(self):
self.kwargs['branch'] = 'oe-classic'
query_string = self.request.GET.get('q', '')
cover_status = self.request.GET.get('cover_status', None)
cover_verified = self.request.GET.get('cover_verified', None)
category = self.request.GET.get('category', None)
init_qs = ClassicRecipe.objects.filter(layerbranch__branch__name='oe-classic')
if cover_status:
if cover_status == '!':
init_qs = init_qs.filter(cover_status__in=['U', 'N'])
else:
init_qs = init_qs.filter(cover_status=cover_status)
if cover_verified:
init_qs = init_qs.filter(cover_verified=(cover_verified=='1'))
if category:
init_qs = init_qs.filter(classic_category__icontains=category)
if query_string.strip():
entry_query = simplesearch.get_query(query_string, ['pn', 'summary', 'description', 'filename'])
qs = init_qs.filter(entry_query).order_by('pn', 'layerbranch__layer')
else:
if 'q' in self.request.GET:
qs = init_qs.order_by('pn', 'layerbranch__layer')
else:
# It's a bit too slow to return all records by default, and most people
# won't actually want that (if they do they can just hit the search button
# with no query string)
return Recipe.objects.none()
return qs
def get_context_data(self, **kwargs):
context = super(ClassicRecipeSearchView, self).get_context_data(**kwargs)
context['multi_classic_layers'] = LayerItem.objects.filter(classic=True).count() > 1
if 'q' in self.request.GET:
searched = True
search_form = ClassicRecipeSearchForm(self.request.GET)
else:
searched = False
search_form = ClassicRecipeSearchForm()
context['search_form'] = search_form
context['searched'] = searched
return context
class ClassicRecipeDetailView(UpdateView):
model = ClassicRecipe
form_class = ClassicRecipeForm
context_object_name = 'recipe'
def _can_edit(self):
if self.request.user.is_authenticated():
if not self.request.user.has_perm('layerindex.edit_classic'):
user_email = self.request.user.email.strip().lower()
if not LayerMaintainer.objects.filter(email__iexact=user_email):
return False
else:
return False
return True
def post(self, request, *args, **kwargs):
if not self._can_edit():
raise PermissionDenied
return super(ClassicRecipeDetailView, self).post(request, *args, **kwargs)
def get_success_url(self):
return reverse_lazy('classic_recipe_search')
def get_context_data(self, **kwargs):
context = super(ClassicRecipeDetailView, self).get_context_data(**kwargs)
context['can_edit'] = self._can_edit()
return context
class ClassicRecipeStatsView(TemplateView):
def get_context_data(self, **kwargs):
context = super(ClassicRecipeStatsView, self).get_context_data(**kwargs)
# *** Cover status chart ***
statuses = []
status_counts = {}
for choice, desc in ClassicRecipe.COVER_STATUS_CHOICES:
statuses.append(desc)
status_counts[desc] = ClassicRecipe.objects.filter(cover_status=choice).count()
statuses = sorted(statuses, key=lambda status: status_counts[status], reverse=True)
chartdata = {'x': statuses, 'y': [status_counts[k] for k in statuses]}
context['charttype_status'] = 'pieChart'
context['chartdata_status'] = chartdata
context['extra_status'] = {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': False,
}
# *** Categories chart ***
categories = ['obsoletedir', 'nonworkingdir']
uniquevals = ClassicRecipe.objects.exclude(classic_category='').values_list('classic_category', flat=True).distinct()
for value in uniquevals:
cats = value.split()
for cat in cats:
if not cat in categories:
categories.append(cat)
categories.append('none')
catcounts = dict.fromkeys(categories, 0)
unmigrated = ClassicRecipe.objects.filter(cover_status='U')
catcounts['none'] = unmigrated.filter(classic_category='').count()
values = unmigrated.exclude(classic_category='').values_list('classic_category', flat=True)
# We gather data this way because an item might be in more than one category, thus
# the categories list must be in priority order
for value in values:
recipecats = value.split()
foundcat = 'none'
for cat in categories:
if cat in recipecats:
foundcat = cat
break
catcounts[foundcat] += 1
# Eliminate categories with zero count
categories = [cat for cat in categories if catcounts[cat] > 0]
categories = sorted(categories, key=lambda cat: catcounts[cat], reverse=True)
chartdata_category = {'x': categories, 'y': [catcounts[k] for k in categories]}
context['charttype_category'] = 'pieChart'
context['chartdata_category'] = chartdata_category
context['extra_category'] = {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': False,
}
return context
|
import time,os,sys
from time import gmtime, strftime
class buffer(object):
def __init__(self):
self.lines = {0:"" ,1:"" ,2:"" ,3:"" ,4:"" ,5:"" ,6:"" ,7:"" ,8:"" }
def print_lines(self):
for i in self.lines:
print(self.lines[i])
def clean(self):
for i in self.lines:
self.lines[i] = ""
class large_number:
def __init__(self,n):
self.n = n
self.lines = {0:"", 1:"", 2:"", 3:"", 4:"", 5:"", 6:"", 7:"", 8:""}
def add_str(self,i,stg):
self.lines[i] = stg
def add_to_buffer(self):
for i in range(0,9):
buf.lines[i] = buf.lines[i] + self.lines[i]
def get_big_chars():
global big_chars
digits=open("digits.txt","r")
dig_in = digits.read().split('\n')
curr = 0
j = 0
big_chars = {}
for i in dig_in:
if i[:1] == "-":
curr = i[1:]
j = 0
big_chars[curr]=large_number(curr)
else:
if j != 0:
big_chars[curr].add_str(j-1,i)
j += 1
def new_separator():
global buf
for i in buf.lines:
buf.lines[i] += " "
def load_to_buf(time_str):
global buf
global big_chars
for char in time_str:
big_chars[char].add_to_buffer()
new_separator()
def get_time_string(H,M,S,typeof):
pm = False
if H == True:
h = time.localtime().tm_hour
if h >=12:
if typeof == 12:
h = h-12
pm = True
if len(str(h)) == 1:
h = "0"+str(h)
if M == True:
m = time.localtime().tm_min
if len(str(m)) == 1:
m = "0"+str(m)
if S == True:
s = time.localtime().tm_sec
if len(str(s)) == 1:
s = "0"+str(s)
if typeof == 12 and pm == False:
return str(h) + ":" + str(m) + ":" + str(s) + ":AM"
if typeof == 12 and pm == True:
return str(h) + ":" + str(m) + ":" + str(s) + ":PM"
if typeof == 24:
return str(h) + ":" + str(m) + ":" + str(s)
def main(argv):
global buf
global big_chars
time_format = 24
buf = buffer()
get_big_chars()
count = 0
while True:
time.sleep(0.1)
os.system("clear")
print strftime("%a, %d %b %Y", gmtime())
now = get_time_string(True,True,True,time_format)
load_to_buf(now)#, bitch!
print("\033[1;31m")
buf.print_lines()
print("\033[0m")
buf.clean()
count += 1
if count > 40:
break
if __name__ == '__main__':
main(sys.argv)
|
__author__ = 'adrian'
import sys
import web
import logging
from parking import parking
urls = (
'/parking', 'parking',
'/parking/car', 'carWithdraw'
)
def init_loggers():
# set up logging for the example
logger = logging.getLogger('CylindricalParking')
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler(stream=sys.stdout)
consoleHandler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s%(message)s'))
logger.addHandler(consoleHandler)
def init_webserver():
#
app = web.application(urls, globals())
app.run()
def main():
init_loggers()
# Initialize the rule engine framework
logging.getLogger('CylindricalParking').debug('Start Parking backend.')
# Initialize the web server and listen to requests
init_webserver()
if __name__ == '__main__':
main()
|
import mock
import unittest
from webtest import TestApp
from webtest.debugapp import debug_app
from raygun4py.middleware.wsgi import Provider
class TestWSGIMiddleware(unittest.TestCase):
def setUp(self):
self.test_middleware = ExceptionMiddleware(debug_app)
self.raygun_middleware = Provider(self.test_middleware, "XXXXXXXXXX")
self.app = TestApp(app=self.raygun_middleware, lint=True)
self.raygun_middleware.sender.send_exception = mock.MagicMock(return_value=True)
def test_basic_exception(self):
with self.assertRaises(Exception):
self.app.get('/?error=t')
self.raygun_middleware.sender.send_exception.assert_called_once()
def test_json_post(self):
self.test_middleware.raise_on_request = True
with self.assertRaises(Exception):
self.app.post_json('/foo/bar', params={
'foo': 'bar'
})
self.raygun_middleware.sender.send_exception.assert_called_once()
class ExceptionMiddleware(object):
def __init__(self, app):
self.app = app
self.raise_on_request = False
def __call__(self, environ, start_response):
appiter = None
try:
appiter = self.app(environ, start_response)
if self.raise_on_request:
raise Exception("test exception")
for item in appiter:
yield item
except Exception:
raise
finally:
if hasattr(appiter, 'close'):
appiter.close()
|
from __future__ import unicode_literals
import re
import sys
import willie.tools
if sys.version_info.major >= 3:
unicode = str
basestring = str
class PreTrigger(object):
"""A parsed message from the server, which has not been matched against
any rules."""
component_regex = re.compile(r'([^!]*)!?([^@]*)@?(.*)')
intent_regex = re.compile('\x01(\\S+) (.*)\x01')
def __init__(self, own_nick, line):
"""own_nick is the bot's nick, needed to correctly parse sender.
line is the full line from the server."""
line = line.strip('\r')
self.line = line
# Break off IRCv3 message tags, if present
self.tags = {}
if line.startswith('@'):
tagstring, line = line.split(' ', 1)
for tag in tagstring[1:].split(';'):
tag = tag.split('=', 1)
if len(tag) > 1:
self.tags[tag[0]] = tag[1]
else:
self.tags[tag[0]] = None
# TODO note what this is doing and why
if line.startswith(':'):
self.hostmask, line = line[1:].split(' ', 1)
else:
self.hostmask = None
# TODO note what this is doing and why
if ' :' in line:
argstr, text = line.split(' :', 1)
self.args = argstr.split(' ')
self.args.append(text)
else:
self.args = line.split(' ')
self.text = self.args[-1]
self.event = self.args[0]
self.args = self.args[1:]
components = PreTrigger.component_regex.match(self.hostmask or '').groups()
self.nick, self.user, self.host = components
self.nick = willie.tools.Identifier(self.nick)
# If we have arguments, the first one is the sender
if self.args:
target = willie.tools.Identifier(self.args[0])
else:
target = None
# Unless we're messaging the bot directly, in which case that second
# arg will be our bot's name.
if target and target.lower() == own_nick.lower():
target = self.nick
self.sender = target
# Parse CTCP into a form consistent with IRCv3 intents
if self.event == 'PRIVMSG' or self.event == 'NOTICE':
intent_match = PreTrigger.intent_regex.match(self.args[-1])
if intent_match:
self.tags['intent'], self.args[-1] = intent_match.groups()
class Trigger(unicode):
"""A line from the server, which has matched a callable's rules.
Note that CTCP messages (`PRIVMSG`es and `NOTICE`es which start and end
with `'\\x01'`) will have the `'\\x01'` bytes stripped, and the command
(e.g. `ACTION`) placed mapped to the `'intent'` key in `Trigger.tags`.
"""
sender = property(lambda self: self._pretrigger.sender)
"""The channel from which the message was sent.
In a private message, this is the nick that sent the message."""
raw = property(lambda self: self._pretrigger.line)
"""The entire message, as sent from the server. This includes the CTCP
\\x01 bytes and command, if they were included."""
is_privmsg = property(lambda self: self._is_privmsg)
"""True if the trigger is from a user, False if it's from a channel."""
hostmask = property(lambda self: self._pretrigger.hostmask)
"""Hostmask of the person who sent the message as <nick>!<user>@<host>"""
user = property(lambda self: self._pretrigger.user)
"""Local username of the person who sent the message"""
nick = property(lambda self: self._pretrigger.nick)
"""The ``Identifier`` of the person who sent the message."""
host = property(lambda self: self._pretrigger.host)
"""The hostname of the person who sent the message"""
event = property(lambda self: self._pretrigger.event)
"""The IRC event (e.g. ``PRIVMSG`` or ``MODE``) which triggered the
message."""
match = property(lambda self: self._match)
"""The regular expression `MatchObject`_ for the triggering line.
.. _MatchObject: http://docs.python.org/library/re.html#match-objects"""
group = property(lambda self: self._match.group)
"""The ``group`` function of the ``match`` attribute.
See Python `re`_ documentation for details."""
groups = property(lambda self: self._match.groups)
"""The ``groups`` function of the ``match`` attribute.
See Python `re`_ documentation for details."""
args = property(lambda self: self._pretrigger.args)
"""
A tuple containing each of the arguments to an event. These are the
strings passed between the event name and the colon. For example,
setting ``mode -m`` on the channel ``#example``, args would be
``('#example', '-m')``
"""
tags = property(lambda self: self._pretrigger.tags)
"""A map of the IRCv3 message tags on the message."""
admin = property(lambda self: self._admin)
"""True if the nick which triggered the command is one of the bot's admins.
"""
owner = property(lambda self: self._owner)
"""True if the nick which triggered the command is the bot's owner."""
def __new__(cls, config, message, match):
self = unicode.__new__(cls, message.args[-1])
self._pretrigger = message
self._match = match
self._is_privmsg = message.sender.is_nick()
def match_host_or_nick(pattern):
pattern = willie.tools.get_hostmask_regex(pattern)
return bool(
pattern.match(self.nick) or
pattern.match('@'.join((self.nick, self.host)))
)
self._admin = any(match_host_or_nick(item)
for item in config.core.get_list('admins'))
self._owner = match_host_or_nick(config.core.owner)
self._admin = self.admin or self.owner
return self
|
'''Partial class to handle Vultr Regions API calls'''
from .utils import VultrBase, update_params
class VultrRegions(VultrBase):
'''Handles Vultr Regions API calls'''
def __init__(self, api_key):
VultrBase.__init__(self, api_key)
def availability(self, dcid, params=None):
''' /v1/regions/availability
GET - public
Retrieve a list of the VPSPLANIDs currently available
in this location. If your account has special plans available,
you will need to pass your api_key in in order to see them.
For all other accounts, the API key is not optional.
Link: https://www.vultr.com/api/#regions_region_available
'''
params = update_params(params, {'DCID': dcid})
return self.request('/v1/regions/availability', params, 'GET')
def list(self, params=None):
''' /v1/regions/list
GET - public
Retrieve a list of all active regions. Note that just
because a region is listed here, does not mean that
there is room for new servers.
Link: https://www.vultr.com/api/#regions_region_list
'''
params = params if params else dict()
return self.request('/v1/regions/list', params, 'GET')
|
"""graphics primitives"""
__authors__ = ["Ole Herman Schumacher Elgesem"]
__license__ = "MIT"
try:
import pyglet
from pyglet.text import Label
from pyglet.resource import image
except:
print("Warning: could not import pyglet.")
print("This is acceptable for tests, but rendering will not work.")
from sim_game.geometry import limit, Rectangle, Point
class Color:
def __init__(self, r,g,b,a=255):
self.r = r
self.g = g
self.b = b
self.a = a
def rgba(self):
return (self.r, self.g, self.b, self.a)
def __getitem__(self, key):
if type(key) is not int:
raise TypeError
return self.rgba()[key]
colors = {
"red": (255,0,0,255),
"green": (0,255,0,255),
"blue": (0,0,255,255),
"white": (255,255,255,255),
"black": (0,0,0,255)
}
@classmethod
def get(cls, name):
return Color(cls.colors[name])
class Renderer:
@staticmethod
def start(window):
window.clear()
pyglet.gl.glClearColor(255,255,255,255)
class GraphicsObject:
def __init__(self, pos=(0,0)):
GraphicsObject.set_pos(self, pos[0],pos[1])
def set_pos(self, x,y):
self.x = float(x)
self.y = float(y)
def move_pos(self, dx, dy):
self.x += dx
self.y += dy
# Sub class must override this function for drawing to work
def draw(self):
raise NotImplementedError
# Can replace draw(), more optimized:
def batch_add(self, batch):
raise NotImplementedError
def update(self, dt):
raise NotImplementedError
class GraphicsRectangle(GraphicsObject):
def __init__(self, width, height, fill=(128,128,128,255), stroke=(0,0,0,0), pos=(0,0), vel=(0,0), acc=(0,0), centered=False):
self.stroke = Color(*stroke)
self.fill = Color(*fill)
self.centered = centered
self.w = width
self.h = height
super().__init__(pos=pos)
if centered:
GraphicsRectangle.set_pos(self, pos[0], pos[1])
def set_pos(self, x,y):
super().set_pos(x,y)
if self.centered:
self.x -= self.w/2
self.y -= self.h/2
def set_fill(self, fill):
self.fill = Color(*fill)
def draw(self):
pyglet.gl.glLineWidth(4)
rect_vertices = pyglet.graphics.vertex_list(4,
('v2f', (self.x, self.y) +
(self.x+self.w, self.y) +
(self.x+self.w, self.y+self.h) +
(self.x, self.y+self.h)
),
('c4B', self.fill.rgba() * 4)
)
rect_vertices.draw(pyglet.gl.GL_QUADS)
rect_vertices.colors = self.stroke.rgba() * 4
rect_vertices.draw(pyglet.gl.GL_LINE_LOOP)
class ColoredRectangle(Rectangle):
def __init__(self, dimensions, *, pos=(0,0), anchor=(0,0), offset=(0,0), fill=(255,0,0), stroke=(0,0,0)):
super().__init__(pos=pos, dimensions=dimensions, offset=offset, anchor=anchor)
self.stroke = Color(*stroke)
self.fill = Color(*fill)
self.enabled = True
def set_fill(self, fill):
self.fill = Color(*fill)
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def draw(self):
if not self.enabled:
return
pyglet.gl.glLineWidth(4)
points = []
for point in self.points():
points += point.xy()
rect_vertices = pyglet.graphics.vertex_list(4,
('v2f', points),
('c4B', self.fill.rgba() * 4)
)
rect_vertices.draw(pyglet.gl.GL_QUADS)
rect_vertices.colors = self.stroke.rgba() * 4
rect_vertices.draw(pyglet.gl.GL_LINE_LOOP)
|
from ast import literal_eval
import os.path
import unittest
from antlr4 import CommonTokenStream, ParseTreeWalker
from antlr4.InputStream import InputStream
from json_database.parser.parser import CommandParser
from json_database.recognizer.DatabaseLexer import DatabaseLexer
from json_database.recognizer.DatabaseParser import DatabaseParser
class CommandsTest(unittest.TestCase):
def setUp(self):
self.db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'database.json')
def tearDown(self):
if os.path.exists(self.db_path):
os.remove(self.db_path)
def test_one_create_statement(self):
command = 'CREATE TABLE Isik'
result = self.execute_command(command, self.db_path)
self.assertEqual(result, {'Isik': []})
def test_multiple_create_statements(self):
command = 'CREATE TABLE Isik; CREATE TABLE Isik1; CREATE TABLE Isik2'
result = self.execute_command(command, self.db_path)
self.assertEqual(result, {'Isik': [], 'Isik1': [], 'Isik2': []})
def test_create_statement_with_existing_table(self):
with open(self.db_path, 'w+') as db_file:
db_file.write('{"Isik":[]}')
command = 'CREATE TABLE Isik'
result = self.execute_command(command, self.db_path)
self.assertEqual(result, {'Isik': []})
def test_insert_statement_with_one_value(self):
command = 'INSERT INTO Isik SET Nimi="John"'
result = self.execute_command(command, self.db_path)
self.assertEqual(result, {'Isik': [{'Nimi': 'John'}]})
def test_insert_statement_with_multiple_values(self):
command = 'INSERT INTO Isik SET Nimi="John", age=25, married=true'
result = self.execute_command(command, self.db_path)
self.assertEqual(result, {'Isik': [{'Nimi': 'John', 'age': 25, 'married': True}]})
def test_insert_statement_with_multiple_statements(self):
command = 'INSERT INTO Isik SET Nimi="John"; INSERT INTO Isik SET Nimi="Paul"'
result = self.execute_command(command, self.db_path)
self.assertEqual(result, {'Isik': [{'Nimi': 'John'}, {'Nimi': 'Paul'}]})
def test_update_statement_without_where_statement(self):
with open(self.db_path, 'w+') as db_file:
db_file.write(self.generate_sample_data())
command = 'UPDATE Cars SET age=2'
result = self.execute_command(command, self.db_path)
expected = literal_eval(self.generate_sample_data())
expected['Cars'][0]['age'] = 2
expected['Cars'][1]['age'] = 2
self.assertEqual(result, expected)
def test_update_statement_with_where_statement(self):
with open(self.db_path, 'w+') as db_file:
db_file.write(self.generate_sample_data())
command = 'UPDATE Person SET age=25 WHERE Name="John"'
result = self.execute_command(command, self.db_path)
expected = literal_eval(self.generate_sample_data())
expected['Person'][0]['age'] = 25
self.assertEqual(result, expected)
def test_delete_statement_without_where_statement(self):
with open(self.db_path, 'w+') as db_file:
db_file.write(self.generate_sample_data())
command = 'DELETE FROM Person'
result = self.execute_command(command, self.db_path)
expected = literal_eval(self.generate_sample_data())
expected['Person'] = []
self.assertEqual(result, expected)
def test_delete_statement_with_where_statement(self):
with open(self.db_path, 'w+') as db_file:
db_file.write(self.generate_sample_data())
command = 'DELETE FROM Person WHERE Name="Carl"'
result = self.execute_command(command, self.db_path)
expected = literal_eval(self.generate_sample_data())
del expected['Person'][1]
self.assertEqual(result, expected)
def test_drop_statement_with_one_table_name(self):
with open(self.db_path, 'w+') as db_file:
db_file.write(self.generate_sample_data())
command = 'DROP TABLE Person'
result = self.execute_command(command, self.db_path)
expected = literal_eval(self.generate_sample_data())
del expected['Person']
self.assertEqual(result, expected)
def test_drop_statement_with_multiple_table_names(self):
with open(self.db_path, 'w+') as db_file:
db_file.write(self.generate_sample_data())
command = 'DROP TABLE Person, Cars'
result = self.execute_command(command, self.db_path)
expected = literal_eval(self.generate_sample_data())
del expected['Person']
del expected['Cars']
self.assertEqual(result, expected)
# Helper methods
def execute_command(self, command, db_path):
stream = InputStream(command)
lexer = DatabaseLexer(stream)
tokens = CommonTokenStream(lexer)
parser = DatabaseParser(tokens)
tree = parser.commands()
command_parser = CommandParser(db_path)
walker = ParseTreeWalker()
walker.walk(command_parser, tree)
return command_parser.api.data
def generate_sample_data(self):
return """{
"Person": [
{
"Name": "John",
"age": 23,
"married": "false"
},
{
"Name": "Carl",
"Surname": "Paul",
"age": 23,
"married": "false"
}
],
"Cars": [
{
"Mark": "Audi",
"age": 23
},
{
"Mark": "Mercedez-Benz",
"age": 1,
"crashed": "false"
}
]
}"""
|
import random
import numpy as np
import torch
import torch.utils.data
from torch.utils.data.dataset import Dataset
from os.path import join
import torch.utils.data as data
from PIL import Image
from torchvision import transforms
import os
IMG_EXTENSIONS = [
'.jpg',
'png'
]
def default_loader(input_path):
input_image = (Image.open(input_path)).convert('RGB')
return input_image
class IcebergCustomDataSet(Dataset):
"""total datasets."""
def __init__(self, data, labels, transform=None):
self.data = data
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {'image': self.data[idx, :, :, :], 'labels': np.asarray([self.labels[idx]])}
if self.transform:
sample = self.transform(sample)
return sample
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, labels = sample['image'], sample['labels']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
# image = image.transpose((2, 0, 1))
image = image.astype(float) / 255
return {'image': torch.from_numpy(image.copy()).float(),
'labels': torch.from_numpy(labels).float()
}
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, sample):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
image, labels = sample['image'], sample['labels']
if random.random() < 0.5:
image = np.flip(image, 1)
return {'image': image, 'labels': labels}
class RandomVerticallFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, sample):
image, labels = sample['image'], sample['labels']
if random.random() < 0.3:
image = np.flip(image, 0)
return {'image': image, 'labels': labels}
class RandomTranspose(object):
def __call__(self, sample):
image, labels = sample['image'], sample['labels']
if random.random() < 0.7:
image = np.transpose(image, 0)
return {'image': image, 'labels': labels}
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
# TODO: make efficient
img = tensor['image'].float()
for t, m, s in zip(img, self.mean, self.std):
t.sub_(m).div_(s)
return {'image': img, 'labels': tensor['labels']}
|
from django.views.generic import View, ListView, DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from .forms import CodeRunForm
from .services import run_code
from .models import CodeRun
class ReplIndexView(LoginRequiredMixin, ListView):
template_name = 'repl/index.html'
queryset = CodeRun.objects.order_by('-id')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['code_run_form'] = CodeRunForm()
return context
class ReplCodeRunResultView(LoginRequiredMixin, DetailView):
model = CodeRun
pk_url_kwarg = 'code_run_id'
template_name = 'repl/code_run_detail.html'
class ReplCodeRunView(LoginRequiredMixin, View):
def post(self, request):
form = CodeRunForm(request.POST)
if form.is_valid():
code_run = run_code(**form.cleaned_data)
return redirect('repl:code-run-detail', code_run_id=code_run.id)
return redirect('repl:index')
|
import glob
import os
from setuptools import setup
import offlinecdn
github_url = 'https://github.com/gabegaster/django-offlinecdn'
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.readlines()
long_description = "\n".join(read("README.rst"))
dependencies = []
filename = os.path.join("requirements", "python")
for line in read(filename):
package = line.strip().split('#')[0]
if package:
dependencies.append(package)
setup(
name="django-offlinecdn",
version=offlinecdn.VERSION,
description=("A nice way to allow for online-offline development,"
" but also use cdn's for package dependencies."),
long_description=long_description,
url=github_url,
download_url="%s/archives/master" % github_url,
author='Gabe Gaster',
author_email='gabe.gaster@datascopeanalytics.com',
license='MIT',
packages=[
'offlinecdn',
],
install_requires=dependencies,
zip_safe=False,
)
|
import zipfile
import optparse
from threading import Thread
def crackIt(zFile, pw, i):
try:
zFile.extractall(pwd=pw)
print '[+] Password Found: '+pw
except Exception, e:
print str(i)+' Failed: '+pw
pass
def main():
parser = optparse.OptionParser("ZipCrack usage method. zipcracker.py -f <zipfile> -d <dictionary>")
parser.add_option('-f', dest='zname', type='string', help='specify zip file')
parser.add_option('-d', dest='dname', type='string', help='specify dictionary file')
(options, args) = parser.parse_args()
if(options.zname == None) | (options.dname == None):
print parser.usage
exit(0)
else:
zname = options.zname
dname = options.dname
try:
i=0
zFile = zipfile.ZipFile(zname)
passFile = open(dname)
for line in passFile.readlines():
i=i+1
password = line.strip('\n')
t = Thread(target=crackIt, args=(zFile, password, i))
t.start()
except Exception, e:
print 'Error : '+str(e)
if __name__ == '__main__':
main()
|
"""
Reformat links and page references
Created by Rui Carmo on 2006-09-12.
Published under the MIT license.
"""
import os
import logging
log = logging.getLogger()
import urlparse
import posixpath
from config import settings
from controllers.wiki import WikiController as wc
from utils.core import Singleton
from utils.timekit import time_since
from plugins import plugin
@plugin
class BaseURI:
__metaclass__ = Singleton
category = 'markup'
tags = ['a']
schemas = {
'*' :{'title': u'unknown protocol linking to %(uri)s','class': u'generic'},
'http' :{'title': u'external link to %(uri)s','class': u'http'},
'https' :{'title': u'secure link to %(uri)s','class': u'https'},
'ftp' :{'title': u'file transfer link to %(uri)s','class': u'ftp'},
'gopher' :{'title': u'(probably deprecated) link to %(uri)s','class': u'ftp'},
'sftp' :{'title': u'secure file transfer link to %(uri)s','class': u'ftp'},
'ssh' :{'title': u'secure shell session to %(uri)s','class': u'terminal'},
'telnet' :{'title': u'(probably insecure) terminal session to %(uri)s','class': u'terminal'},
'mailto' :{'title': u'e-mail to %(uri)s','class': u'mail'},
'outlook':{'title': u'MAPI link to %(uri)s','class': u'mail'},
'skype' :{'title': u'call %(uri)s using Skype','class': u'call'},
'sip' :{'title': u'call %(uri)s using SIP','class': u'call'},
'tel' :{'title': u'call %(uri)s using SIP','class': u'call'},
'callto' :{'title': u'call %(uri)s','class': u'call'},
'cid' :{'title': u'link to attached file %(uri)s', 'class': u'linkedfile'},
'attach' :{'title': u'link to attached file %(uri)s', 'class': u'linkedfile'}
}
def __init__(self):
log.debug(self)
def run(self, serial, tag, tagname, pagename, soup, request, response):
try:
uri = tag['href']
except KeyError:
return True
# Try to handle relative URIs
if uri[0] == '.':
uri = posixpath.normpath(os.path.join(pagename, uri))
# Try to handle the uri as a schema/path pair
schema = ''
path = uri
try:
schema, path = uri.split(':',1)
except:
pass
known = False
if schema == '':
alias = wc.resolve_alias(path)
if alias and alias != path:
path = tag['href'] = uri = alias
if path in wc.get_page_mtimes().keys():
known = True
if(schema == ''):
if wc.get_attachment(pagename, path):
tag['href'] = unicode(settings.wiki.media + "/" + pagename + "/" + path)
tag['title'] = self.schemas['attach']['title'] % {'uri':os.path.basename(path)}
tag['class'] = self.schemas['attach']['class']
return False
if(known): # this is a known Wiki link, so there is no need to run it through more plugins
if request is False:
# check for a direct outbound link
# TODO: check x-link handling
if path in wc.link_overrides:
uri = wc.link_overrides[path]
(schema,netloc,path,parameters,query,fragment) = urlparse.urlparse(uri)
tag['href'] = uri
tag['title'] = self.schemas[schema]['title'] % {'uri':uri}
tag['class'] = self.schemas[schema]['class']
return False
tag['href'] = settings.wiki.base + '/' + uri
tag['class'] = "wiki"
try: # to use indexed metadata to annotate links
last = i.page_info[path]['last-modified']
tag['title'] = _('link_update_format') % (path,time_since(last))
except:
tag['title'] = _('link_defined_notindexed_format') % path
elif('#' == uri[0]):
# this is an in-page anchor
if request != False:
tag['href'] = request.path + uri
tag['class'] = "anchor"
else:
if request is False:
# remove unknown wiki links for RSS feeds
tag.replace_with(tag.contents[0])
# format for online viewing
try:
exists = tag['class']
return True #we're done here, but this tag may need handling elsewhere
except:
tag['href'] = settings.wiki.base + '/' + uri
tag['class'] = "wikiunknown"
tag['title'] = _('link_undefined_format') % path
elif(schema in self.schemas.keys()): # this is an external link, so reformat it
tag['title'] = self.schemas[schema]['title'] % {'uri':uri}
tag['class'] = self.schemas[schema]['class']
#tag['target'] = '_blank'
else: # assume this is an interwiki link (i.e., it seems to have a custom schema)
tag['title'] = _('link_interwiki_format') % uri
tag['class'] = "interwiki"
tag['target'] = '_blank'
# Signal that this tag needs further processing
return True
# We're done
return False
|
import argparse
import os
import pandas as pd
import cPickle as pickle
from sklearn.preprocessing import LabelEncoder
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parser.parse_args()
df = pd.read_csv('data/train.csv')
df['whaleID'] = df['whaleID'].apply(lambda x: x.split('_')[1])
encoder = LabelEncoder()
y = df['whaleID'].values.astype(int)
encoder.fit(y)
pickle.dump(encoder, open('models/encoder.pkl', 'wb'))
print 'Wrote encoder to models/encoder.pkl'
print
print encoder.classes_
|
from collections import namedtuple
""" A container for transitions. """
Transition = namedtuple('Transition',
('id', 'state', 'action', 'reward', 'state_', 'done'))
|
from functools import partial
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from tsfresh import defaults
from tsfresh.feature_extraction.settings import from_columns
from tsfresh.transformers.feature_augmenter import FeatureAugmenter
from tsfresh.transformers.feature_selector import FeatureSelector
from tsfresh.utilities.dataframe_functions import (
get_range_values_per_column,
impute_dataframe_range,
)
class RelevantFeatureAugmenter(BaseEstimator, TransformerMixin):
"""
Sklearn-compatible estimator to calculate relevant features out of a time series and add them to a data sample.
As many other sklearn estimators, this estimator works in two steps:
In the fit phase, all possible time series features are calculated using the time series, that is set by the
set_timeseries_container function (if the features are not manually changed by handing in a
feature_extraction_settings object). Then, their significance and relevance to the target is computed using
statistical methods and only the relevant ones are selected using the Benjamini Hochberg procedure. These features
are stored internally.
In the transform step, the information on which features are relevant from the fit step is used and those features
are extracted from the time series. These extracted features are then added to the input data sample.
This estimator is a wrapper around most of the functionality in the tsfresh package. For more information on the
subtasks, please refer to the single modules and functions, which are:
* Settings for the feature extraction: :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters`
* Feature extraction method: :func:`~tsfresh.feature_extraction.extraction.extract_features`
* Extracted features: :mod:`~tsfresh.feature_extraction.feature_calculators`
* Feature selection: :func:`~tsfresh.feature_selection.feature_selector.check_fs_sig_bh`
This estimator works analogue to the :class:`~tsfresh.transformers.feature_augmenter.FeatureAugmenter` with
the difference that this estimator does only output and calculate the relevant features,
whereas the other outputs all features.
Also for this estimator, two datasets play a crucial role:
1. the time series container with the timeseries data. This container (for the format see
:mod:`~tsfresh.feature_extraction.extraction`) contains the data which is used for calculating the
features. It must be groupable by ids which are used to identify which feature should be attached to which row
in the second dataframe:
2. the input data, where the features will be added to.
Imagine the following situation: You want to classify 10 different financial shares and you have their development
in the last year as a time series. You would then start by creating features from the metainformation of the
shares, e.g. how long they were on the market etc. and filling up a table - the features of one stock in one row.
>>> # Fill in the information of the stocks and the target
>>> X_train, X_test, y_train = pd.DataFrame(), pd.DataFrame(), pd.Series()
You can then extract all the relevant features from the time development of the shares, by using this estimator:
>>> train_time_series, test_time_series = read_in_timeseries() # get the development of the shares
>>> from tsfresh.transformers import RelevantFeatureAugmenter
>>> augmenter = RelevantFeatureAugmenter()
>>> augmenter.set_timeseries_container(train_time_series)
>>> augmenter.fit(X_train, y_train)
>>> augmenter.set_timeseries_container(test_time_series)
>>> X_test_with_features = augmenter.transform(X_test)
X_test_with_features will then contain the same information as X_test (with all the meta information you have
probably added) plus some relevant time series features calculated on the time series you handed in.
Please keep in mind that the time series you hand in before fit or transform must contain data for the rows that are
present in X.
If your set filter_only_tsfresh_features to True, your manually-created features that were present in X_train (or
X_test) before using this estimator are not touched. Otherwise, also those features are evaluated and may be
rejected from the data sample, because they are irrelevant.
For a description what the parameters column_id, column_sort, column_kind and column_value mean, please see
:mod:`~tsfresh.feature_extraction.extraction`.
You can control the feature extraction in the fit step (the feature extraction in the transform step is done
automatically) as well as the feature selection in the fit step by handing in settings.
However, the default settings which are used if you pass no flags are often quite sensible.
"""
def __init__(
self,
filter_only_tsfresh_features=True,
default_fc_parameters=None,
kind_to_fc_parameters=None,
column_id=None,
column_sort=None,
column_kind=None,
column_value=None,
timeseries_container=None,
chunksize=defaults.CHUNKSIZE,
n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS,
disable_progressbar=defaults.DISABLE_PROGRESSBAR,
profile=defaults.PROFILING,
profiling_filename=defaults.PROFILING_FILENAME,
profiling_sorting=defaults.PROFILING_SORTING,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL,
hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
ml_task="auto",
multiclass=False,
n_significant=1,
multiclass_p_values="min",
):
"""
Create a new RelevantFeatureAugmenter instance.
:param filter_only_tsfresh_features: Whether to touch the manually-created features during feature selection or
not.
:type filter_only_tsfresh_features: bool
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters. This means that kinds,
for which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection.
:type kind_to_fc_parameters: dict
:param column_id: The column with the id. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_id: basestring
:param column_sort: The column with the sort data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_sort: basestring
:param column_kind: The column with the kind data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_kind: basestring
:param column_value: The column with the values. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_value: basestring
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used.
:type n_jobs: int
:param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param profile: Turn on profiling during feature extraction
:type profile: bool
:param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for
more information)
:type profiling_sorting: basestring
:param profiling_filename: Where to save the profiling results.
:type profiling_filename: basestring
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature
(currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage
of irrelevant features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumed to be classification,
else regression.
:type ml_task: str
:param multiclass: Whether the problem is multiclass classification. This modifies the way in which features
are selected. Multiclass requires the features to be statistically significant for
predicting n_significant classes.
:type multiclass: bool
:param n_significant: The number of classes for which features should be statistically significant predictors
to be regarded as 'relevant'
:type n_significant: int
:param multiclass_p_values: The desired method for choosing how to display multiclass p-values for each feature.
Either `'avg'`, `'max'`, `'min'`, `'all'`. Defaults to `'min'`, meaning the p-value
with the highest significance is chosen. When set to `'all'`, the attributes
`self.feature_importances_` and `self.p_values` are of type pandas.DataFrame, where
each column corresponds to a target class.
:type multiclass_p_values: str
"""
self.filter_only_tsfresh_features = filter_only_tsfresh_features
self.default_fc_parameters = default_fc_parameters
self.kind_to_fc_parameters = kind_to_fc_parameters
self.column_id = column_id
self.column_sort = column_sort
self.column_kind = column_kind
self.column_value = column_value
self.timeseries_container = timeseries_container
self.chunksize = chunksize
self.n_jobs = n_jobs
self.show_warnings = show_warnings
self.disable_progressbar = disable_progressbar
self.profile = profile
self.profiling_filename = profiling_filename
self.profiling_sorting = profiling_sorting
self.test_for_binary_target_binary_feature = (
test_for_binary_target_binary_feature
)
self.test_for_binary_target_real_feature = test_for_binary_target_real_feature
self.test_for_real_target_binary_feature = test_for_real_target_binary_feature
self.test_for_real_target_real_feature = test_for_real_target_real_feature
self.fdr_level = fdr_level
self.hypotheses_independent = hypotheses_independent
self.ml_task = ml_task
self.multiclass = multiclass
self.n_significant = n_significant
self.multiclass_p_values = multiclass_p_values
# attributes
self.feature_extractor = None
self.feature_selector = None
def set_timeseries_container(self, timeseries_container):
"""
Set the timeseries, with which the features will be calculated. For a format of the time series container,
please refer to :mod:`~tsfresh.feature_extraction.extraction`. The timeseries must contain the same indices
as the later DataFrame, to which the features will be added (the one you will pass to :func:`~transform` or
:func:`~fit`). You can call this function as often as you like, to change the timeseries later
(e.g. if you want to extract for different ids).
:param timeseries_container: The timeseries as a pandas.DataFrame or a dict. See
:mod:`~tsfresh.feature_extraction.extraction` for the format.
:type timeseries_container: pandas.DataFrame or dict
:return: None
:rtype: None
"""
self.timeseries_container = timeseries_container
def fit(self, X, y):
"""
Use the given timeseries from :func:`~set_timeseries_container` and calculate features from it and add them
to the data sample X (which can contain other manually-designed features).
Then determine which of the features of X are relevant for the given target y.
Store those relevant features internally to only extract them in the transform step.
If filter_only_tsfresh_features is True, only reject newly, automatically added features. If it is False,
also look at the features that are already present in the DataFrame.
:param X: The data frame without the time series features. The index rows should be present in the timeseries
and in the target vector.
:type X: pandas.DataFrame or numpy.array
:param y: The target vector to define, which features are relevant.
:type y: pandas.Series or numpy.array
:return: the fitted estimator with the information, which features are relevant.
:rtype: RelevantFeatureAugmenter
"""
self._fit_and_augment(X, y)
return self
def transform(self, X):
"""
After the fit step, it is known which features are relevant, Only extract those from the time series handed in
with the function :func:`~set_timeseries_container`.
If filter_only_tsfresh_features is False, also delete the irrelevant,
already present features in the data frame.
:param X: the data sample to add the relevant (and delete the irrelevant) features to.
:type X: pandas.DataFrame or numpy.array
:return: a data sample with the same information as X, but with added relevant time series features and
deleted irrelevant information (only if filter_only_tsfresh_features is False).
:rtype: pandas.DataFrame
"""
if self.timeseries_container is None:
raise RuntimeError(
"You have to provide a time series using the set_timeseries_container function before."
)
if self.feature_selector is None:
raise RuntimeError("You have to call fit before calling transform.")
if self.feature_selector.relevant_features is None:
raise RuntimeError("You have to call fit before calling transform.")
self.feature_extractor.set_timeseries_container(self.timeseries_container)
relevant_time_series_features = set(
self.feature_selector.relevant_features
) - set(pd.DataFrame(X).columns)
relevant_extraction_settings = from_columns(relevant_time_series_features)
# Set imputing strategy
impute_function = partial(
impute_dataframe_range,
col_to_max=self.col_to_max,
col_to_min=self.col_to_min,
col_to_median=self.col_to_median,
)
relevant_feature_extractor = FeatureAugmenter(
kind_to_fc_parameters=relevant_extraction_settings,
default_fc_parameters={},
column_id=self.feature_extractor.column_id,
column_sort=self.feature_extractor.column_sort,
column_kind=self.feature_extractor.column_kind,
column_value=self.feature_extractor.column_value,
chunksize=self.feature_extractor.chunksize,
n_jobs=self.feature_extractor.n_jobs,
show_warnings=self.feature_extractor.show_warnings,
disable_progressbar=self.feature_extractor.disable_progressbar,
impute_function=impute_function,
profile=self.feature_extractor.profile,
profiling_filename=self.feature_extractor.profiling_filename,
profiling_sorting=self.feature_extractor.profiling_sorting,
)
relevant_feature_extractor.set_timeseries_container(
self.feature_extractor.timeseries_container
)
X_augmented = relevant_feature_extractor.transform(X)
if self.filter_only_tsfresh_features:
return X_augmented.copy().loc[
:, self.feature_selector.relevant_features + X.columns.tolist()
]
else:
return X_augmented.copy().loc[:, self.feature_selector.relevant_features]
def fit_transform(self, X, y):
"""
Equivalent to :func:`~fit` followed by :func:`~transform`; however, this is faster than performing those steps
separately, because it avoids re-extracting relevant features for training data.
:param X: The data frame without the time series features. The index rows should be present in the timeseries
and in the target vector.
:type X: pandas.DataFrame or numpy.array
:param y: The target vector to define, which features are relevant.
:type y: pandas.Series or numpy.array
:return: a data sample with the same information as X, but with added relevant time series features and
deleted irrelevant information (only if filter_only_tsfresh_features is False).
:rtype: pandas.DataFrame
"""
X_augmented = self._fit_and_augment(X, y)
selected_features = X_augmented.copy().loc[
:, self.feature_selector.relevant_features
]
if self.filter_only_tsfresh_features:
selected_features = pd.merge(
selected_features, X, left_index=True, right_index=True, how="left"
)
return selected_features
def _fit_and_augment(self, X, y):
"""
Helper for the :func:`~fit` and :func:`~fit_transform` functions, which does most of the work described in
:func:`~fit`.
:param X: The data frame without the time series features. The index rows should be present in the timeseries
and in the target vector.
:type X: pandas.DataFrame or numpy.array
:param y: The target vector to define, which features are relevant.
:type y: pandas.Series or numpy.array
:return: a data sample with the extraced time series features. If filter_only_tsfresh_features is False
the data sample will also include the information in X.
:rtype: pandas.DataFrame
"""
if self.timeseries_container is None:
raise RuntimeError(
"You have to provide a time series using the set_timeseries_container function before."
)
self.feature_extractor = FeatureAugmenter(
default_fc_parameters=self.default_fc_parameters,
kind_to_fc_parameters=self.kind_to_fc_parameters,
column_id=self.column_id,
column_sort=self.column_sort,
column_kind=self.column_kind,
column_value=self.column_value,
timeseries_container=self.timeseries_container,
chunksize=self.chunksize,
n_jobs=self.n_jobs,
show_warnings=self.show_warnings,
disable_progressbar=self.disable_progressbar,
profile=self.profile,
profiling_filename=self.profiling_filename,
profiling_sorting=self.profiling_sorting,
)
self.feature_selector = FeatureSelector(
test_for_binary_target_binary_feature=self.test_for_binary_target_binary_feature,
test_for_binary_target_real_feature=self.test_for_binary_target_real_feature,
test_for_real_target_binary_feature=self.test_for_real_target_binary_feature,
test_for_real_target_real_feature=self.test_for_real_target_real_feature,
fdr_level=self.fdr_level,
hypotheses_independent=self.hypotheses_independent,
n_jobs=self.n_jobs,
chunksize=self.chunksize,
ml_task=self.ml_task,
multiclass=self.multiclass,
n_significant=self.n_significant,
multiclass_p_values=self.multiclass_p_values,
)
if self.filter_only_tsfresh_features:
# Do not merge the time series features to the old features
X_tmp = pd.DataFrame(index=X.index)
else:
X_tmp = X
X_augmented = self.feature_extractor.transform(X_tmp)
(
self.col_to_max,
self.col_to_min,
self.col_to_median,
) = get_range_values_per_column(X_augmented)
X_augmented = impute_dataframe_range(
X_augmented,
col_to_max=self.col_to_max,
col_to_median=self.col_to_median,
col_to_min=self.col_to_min,
)
self.feature_selector.fit(X_augmented, y)
return X_augmented
|
class GradientDecent:
"""
Adapt the weights in the opposite direction of the gradient to reduce the
error.
"""
def __call__(self, weights, gradient, learning_rate=0.1):
return weights - learning_rate * gradient
class Momentum:
"""
Slow down changes of direction in the gradient by aggregating previous
values of the gradient and multiplying them in.
"""
def __init__(self):
self.previous = None
def __call__(self, gradient, rate=0.9):
gradient = gradient.copy()
if self.previous is None:
self.previous = gradient.copy()
else:
assert self.previous.shape == gradient.shape
gradient += rate * self.previous
self.previous = gradient.copy()
return gradient
class WeightDecay:
"""
Slowly moves each weight closer to zero for regularization. This can help
the model to find simpler solutions.
"""
def __call__(self, weights, rate=1e-4):
return (1 - rate) * weights
class WeightTying:
"""
Constraint groups of slices of the gradient to have the same value by
averaging them. Should be applied to the initial weights and each gradient.
"""
def __init__(self, *groups):
for group in groups:
assert group and hasattr(group, '__len__')
assert all([isinstance(x[0], int) for x in group])
assert all([isinstance(y, (slice, int)) for x in group for y in x])
self.groups = groups
def __call__(self, matrices):
matrices = matrices.copy()
for group in self.groups:
slices = [matrices[slice_] for slice_ in group]
assert all([x.shape == slices[0].shape for x in slices]), (
'All slices within a group must have the same shape. '
'Shapes are ' + ', '.join(str(x.shape) for x in slices) + '.')
average = sum(slices) / len(slices)
assert average.shape == slices[0].shape
for slice_ in group:
matrices[slice_] = average
return matrices
|
'''
Les URL de l'application enseignants
@author: hal
'''
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='base.html')),
)
|
import matplotlib
matplotlib.use('TkAgg') # THIS MAKES IT FAST!
import numpy
import scipy
import struct
import pyaudio
import threading
import pylab
import struct
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
def __init__(self):
"""minimal garb is executed when class is loaded."""
self.RATE=48100
self.BUFFERSIZE=2**12 #1024 is a good buffer size
self.secToRecord=.1
self.threadsDieNow=False
self.newAudio=False
def setup(self):
"""initialize sound card."""
#TODO - windows detection vs. alsa or something for linux
#TODO - try/except for sound card selection/initiation
self.buffersToRecord=int(self.RATE*self.secToRecord/self.BUFFERSIZE)
if self.buffersToRecord==0: self.buffersToRecord=1
self.samplesToRecord=int(self.BUFFERSIZE*self.buffersToRecord)
self.chunksToRecord=int(self.samplesToRecord/self.BUFFERSIZE)
self.secPerPoint=1.0/self.RATE
self.p = pyaudio.PyAudio()
self.inStream = self.p.open(format=pyaudio.paInt16,channels=1,
rate=self.RATE,input=True,frames_per_buffer=self.BUFFERSIZE)
self.xsBuffer=numpy.arange(self.BUFFERSIZE)*self.secPerPoint
self.xs=numpy.arange(self.chunksToRecord*self.BUFFERSIZE)*self.secPerPoint
self.audio=numpy.empty((self.chunksToRecord*self.BUFFERSIZE),dtype=numpy.int16)
def close(self):
"""cleanly back out and release sound card."""
self.p.close(self.inStream)
### RECORDING AUDIO ###
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString=self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString,dtype=numpy.int16)
def record(self,forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow: break
for i in range(self.chunksToRecord):
self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio()
self.newAudio=True
if forever==False: break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow=True
### MATH ###
def downsample(self,data,mult):
"""Given 1D data, return the binned average."""
overhang=len(data)%mult
if overhang: data=data[:-overhang]
data=numpy.reshape(data,(len(data)/mult,mult))
data=numpy.average(data,1)
return data
def fft(self,data=None,trimBy=10,logScale=False,divBy=100):
if data==None:
data=self.audio.flatten()
left,right=numpy.split(numpy.abs(numpy.fft.fft(data)),2)
ys=numpy.add(left,right[::-1])
if logScale:
ys=numpy.multiply(20,numpy.log10(ys))
xs=numpy.arange(self.BUFFERSIZE/2,dtype=float)
if trimBy:
i=int((self.BUFFERSIZE/2)/trimBy)
ys=ys[:i]
xs=xs[:i]*self.RATE/self.BUFFERSIZE
if divBy:
ys=ys/float(divBy)
return xs,ys
### VISUALIZATION ###
def plotAudio(self):
"""open a matplotlib popup window showing audio data."""
pylab.plot(self.audio.flatten())
pylab.show()
|
import os
import sys
import numpy as np
from collections import Counter
from tqdm import tqdm
from moviepy.video.io.ffmpeg_reader import ffmpeg_read_image
from moviepy.video.io.ffmpeg_writer import ffmpeg_write_image
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.fx.resize import resize
PARAMETERS = {"VERBOSE":True}
"""
These lines enable to switch the libraries' parameters, for example:
>>> from pompei import set_parameter
>>> set_parameter("VERBOSE", False)
"""
def set_parameter(parameter, value):
PARAMETERS[parameter] = value
def verbose_print(s):
if PARAMETERS['VERBOSE']:
sys.stdout.write(s)
sys.stdout.flush()
def vtqdm(l, **kw):
""" applies tqdm (progress bar) to the list to iterate, only if
VERBOSE=True. """
return tqdm(l, **kw) if PARAMETERS['VERBOSE'] else l
def split_array(arr, nh, nw):
""" Splits a 2D-ish array into a mosaic of smaller arrays.
In short, this array:
[ [ A ] ]
is splitted like this
[ [ [A11] [A12] ]
[ [A21] [A22] ] ]
"""
h,w,_ = arr.shape
dh, dw = 1.0*h/nh, 1.0*w/nw
return [[ arr[ int(np.round(dh*i)): int(np.round(dh*(i+1))),
int(np.round(dw*j)): int(np.round(dw*(j+1)))]
for j in range(int(nw))] for i in range(int(nh))]
def stack_array(arr):
""" Reconstitutes a splitted array. Kind of the inverse of split-array.
In short, this array
[ [ [A11] [A12] ]
[ [A21] [A22] ] ]
becomes this:
[ [ A11 A12 ]
[ A21 A22 ] ]
"""
return np.vstack([np.hstack(line) for line in arr])
def map_array(fun, arr):
""" applies a function to each element of a 2D-ish array.
So this array:
[ [ [A11] [A12] ]
[ [A21] [A22] ] ]
becomes this:
[ [ [fun( A11 )] [fun( A12 )] ]
[ [fun( A21 )] [fun( A22 )] ] ]
"""
return np.array([[fun(e) for e in line] for line in arr])
def mean_color(arr):
""" Returns [R,G,B], mean color of the WxHx3 numpy image ``arr``. """
return arr.mean(axis=0).mean(axis=0)
def colors_signature(image, nh, nw):
""" Returns the signature of the image.
[ [ RGB1, RGB2, RGB3]
[ RGB4, RGB5, RGB6] ]
and returns [R1 G1 B1 R2 G2 B2 R3 G3 B3... R6 G6 B6]
"""
return map_array(mean_color, split_array(image, nh, nw)).flatten()
class MovieFrames:
"""
Base class in Pompei. Objects represent a series of thumbnail, which are
produced from a movie with MovieFrames.from_movie(), are analyzed with
MovieFrame.find_mosaicable_frames(), and are used to reconstitute mosaics
with MovieFrames.make_mosaic().
"""
def __init__(self, folder):
filename = lambda s: os.path.join(folder,s)
self.folder = folder
self.signatures = np.loadtxt(filename("signatures.txt"))
self.signatures_dim = np.loadtxt(filename("signatures_dim.txt"))
self.times = np.loadtxt(filename("times.txt"))
self.imagestack = ffmpeg_read_image(filename("all_frames.png"),
with_mask=False)
self.im_h, self.dw, _ = self.imagestack.shape
self.sig_h, self.sig_w = self.signatures.shape
self.dh = self.im_h/self.sig_h
def __getitem__(self, index):
return self.imagestack[index*self.dh:(index+1)*self.dh]
def __len__(self):
return self.sig_h
def __iter__(self):
for i in range(self.sig_h):
yield self[i]
def extract(self, index, filename=None):
if hasattr(index, '__iter__'):
for ind in index:
extract(self, ind, filename)
if filename is None:
filename = os.path.join([self.folder, "extracted", "%05d.png"%index])
ffmpeg_write_image(filename, self[index])
@staticmethod
def from_movie(filename, foldername=None, tt=None, fps=None,
crop=(0,0), thumbnails_width= 120, sig_dim=(2,2)):
"""
Extracts frames from a movie and turns them into thumbnails.
Parameters
==========
filename
Name of the legally obtained video file (batman.avi, superman.mp4,
etc.)
foldername
The extracted frames and more infos will be stored in that directory.
tt
An array of times [t1, t2...] where to extract the frames. Optional if
crop
Number of seconds to crop at the beginning and the end of the video,
to avoid opening and en credits.
(seconds_cropped_at_the beginning, seconds_cropped_at_the_end)
thumbnails_width
Width in pixels of the thumbnails obtained by resizing the frames of
the movie.
sig_dim
Number of pixels to consider when reducing the frames and thumbnails
to simple (representative )signatures.
sid_dim=(3,2) means 3x2 (WxH) pixels.
"""
if foldername is None:
name, ext = os.path.splitext(filename)
foldername = name
if not os.path.exists(foldername):
os.mkdir(foldername)
clip = VideoFileClip(filename).fx( resize, width=thumbnails_width)
if not os.path.exists(foldername):
os.mkdir(foldername)
if tt is None:
tt = np.arange(0,clip.duration, 1.0/fps)
t1, t2 = crop[0], clip.duration-crop[1]
tt = tt[ (tt>=t1)*(tt<=t2)]
signatures = []
result = np.zeros((clip.h*len(tt), clip.w, 3))
for i,t in vtqdm(enumerate(sorted(tt)), total=len(tt)):
frame= clip.get_frame(t)
result[i*clip.h:(i+1)*clip.h] = frame
signatures.append(colors_signature(frame, sig_dim[0], sig_dim[1]))
target = os.path.join(foldername, "all_frames.png")
ffmpeg_write_image(target, result)
for (obj, name) in [(signatures, 'signatures'), (tt, 'times'),
(sig_dim, 'signatures_dim')]:
target = os.path.join(foldername, name+'.txt')
np.savetxt(target, np.array(obj))
return MovieFrames(foldername)
def find_mosaicable_frames(self, nclusters = 8, luminosity_threshold=50):
""" Finds the frames whose colors resemble most the main colors of the
movie, and thus would make good candidates for a mosaic. This is
highly experimental.
Requires Scikit-learn installed.
Returns [(frame1,score1), (frame2, score2)...] where the lowest
scores indicates frames better suited for a mosaic.
"""
try:
from sklearn.cluster import KMeans
except ImportError:
raise ImportError("sklearn.Cluster not found. You must install"
" Scikit-learn to be able to use 'find_mosaicable_frames'.")
movie_colors = np.vstack(self.signatures.reshape((self.sig_h,self.sig_w/3,3)))
movie_kmeans = (KMeans(n_clusters=nclusters, random_state=0).fit(movie_colors))
def score(im):
""" Returns the sum of the squared distance of each pixel to the
nearest color in the movie_kmeans set."""
imcolors = np.vstack(im).astype(float)
return sum(movie_kmeans.transform(imcolors).min(axis=1)**2)
scores = [score(im) for im in vtqdm(self)]
sorted_scores= sorted([(i,score) for (i,score) in enumerate(scores)
if self[i].mean()>luminosity_threshold],
key=lambda s:s[1])
return sorted_scores
def _score(self, best_matches, image_signatures):
""" Returns the squared distance (matching error) between a frame and
a signature. """
f = lambda ind: self.signatures[ind]
matches_signatures =np.vstack( map_array(f, best_matches) )
return np.sqrt(np.sum((matches_signatures - image_signatures)**2))
def _best_match_index(self, frame_signature, forbidden=None):
""" Finds the movie frame that matches best the given signature. Will
try all movie frames except the ones designated as forbidden (which
are frames already too frequent in the mosaic). """
# an array of all the matching distances frame-signature
diffs = np.mean((1.0*frame_signature-self.signatures)**2, axis=1)
if forbidden is not None:
for ind in forbidden:
# Next line eliminates "de facto" the index from the possible
# candidates for a best match.
diffs[ind] = len(frame_signature)*500000
ind = np.argmin(diffs)
return ind
def _minimize_occurences(self, best_matches, image_signatures, maxiter,
max_occurences='auto', matching_quality=.7):
""" Diversifies the mosaic by replacing the overused frames by different
(and non optimal) frames.
Parameters
==========
best_matches
The current 'solution': a 2D array of frames numbers corresponding to
the optimal currently chosen to represent the corresponding region
in the mosaic.
image_signatures
The 2D array of signatures of the image to turn into a mosaic.
maxiter
Number of loops after which to stop (to avoid the program running
indefinitely).
max_occurences
The algorithm will stop when the most frequent frame is represented
max_occurences time. If you leave it to auto, the algorithm stops
after `maxiter` operations.
matching_quality
The program will stop when the current total matching error is less
than initial_score / matching_quality. this is to avoid that the
program degrades the quality of the mosaic too much.
"""
shape = best_matches.shape
initial_score = self._score(best_matches, image_signatures)
if max_occurences == 'auto':
max_occurences = -1
verbose_print("Minimizing occurences...")
best_matches = best_matches.flatten()
forbidden = []
npass, number, ratio = 0, 1000, 2
while ((npass < maxiter) and (number > max_occurences) and
(ratio > matching_quality)):
ind, number = Counter(best_matches).most_common(1)[0]
forbidden.append(ind)
def score(i):
_score = lambda index : np.sum( (image_signatures[i] -
self.signatures[index])**2)
score_before = _score(best_matches[i])
new_ind = self._best_match_index( image_signatures[i], forbidden)
score_after = _score(new_ind)
return np.sqrt(score_before) - np.sqrt(score_after)
indices = sorted ( (best_matches==ind).nonzero()[0], key=score)
ix = max_occurences if (max_occurences!=-1) else len(indices)/2
indices_to_change = indices[ix:]
for ind in indices_to_change:
best_matches[ind] = self._best_match_index(
image_signatures[ind], forbidden)
if (max_occurences == -1):
new_score = self.score(best_matches.reshape(shape),
image_signatures)
ratio = 1.0*initial_score/new_score
npass += 1
new_score = self._score(best_matches.reshape(shape),
image_signatures)
ratio = 1.0*initial_score/new_score
verbose_print("Done. Max occurence:%d, quality:%.04f.\n"%(number, ratio))
return best_matches.reshape(shape), number
def _eliminate_similar_neighbors(self, best_matches, image_signatures,
max_occurences, spatial_window, time_window):
""" Replaces some frames to avoid that frames too close in the movie end
up too close in the mosaic.
"""
counter = Counter(best_matches.flatten())
h,w = best_matches.shape
max_inds = len(self)
verbose_print("Eliminating similar neighbours...")
ctr=0
for i in vtqdm(range(h)):
for j in range(w):
ind = best_matches[i,j]
x1, x2 = max(0, j-spatial_window), min(w, j+spatial_window)
y1, y2 = max(0, i-spatial_window), min(h, i+spatial_window)
spatial_indices = list(best_matches[y1:y2, x1:x2].flatten())
spatial_indices.pop(spatial_indices.index(ind))
forbidden_indices = sum([range(max(0,k-time_window),
min(max_inds, k+time_window+1))
for k in spatial_indices], [])
# frames that are forbidden for the given position, i.e. frames
# that are too near in time from the neighbouring frames.
ff = [k for (k,v) in counter.items()
if (v>=max_occurences) and (k!=ind)]
forbidden_indices = ff+list(set(forbidden_indices))
new_ind = self._best_match_index( image_signatures[w*i+j],
forbidden_indices)
if new_ind != ind:
ctr +=1
counter[new_ind] += 1
best_matches[i,j] = new_ind
verbose_print("Done. %d frames changed\n"%ctr)
return best_matches
def _find_best_matches(self, image, frames_in_width=50, max_occurences='auto',
maxiter=500, time_window=2, spatial_window=2,
matching_quality=0.7):
"""
See method .make_mosaic
"""
w = frames_in_width
im_h, im_w, _ = image.shape
h = int(np.round(w* (1.0*im_h / im_w) * (1.0*self.dw/ self.dh)) )
signature = lambda im: colors_signature(im, *self.signatures_dim)
image_signatures = map_array(signature, split_array(image, h, w))
# first guess, will be refined just after
image_signatures = np.vstack(image_signatures)
# FIRST PASS: WE CHOOSE THE BEST FRAME FOR EVERY IMAGE REGION
best_matches = np.array([self._best_match_index(sig)
for sig in image_signatures]).reshape((h,w))
# SECOND PASS: MINIMIZE OCCURENCES
best_matches, max_occurences = self._minimize_occurences(best_matches,
image_signatures, maxiter, max_occurences,
matching_quality)
# THIRD PASS: ELIMINATE SIMILAR NEIGHTBORS
if spatial_window:
best_matches = self._eliminate_similar_neighbors(best_matches,
image_signatures, max_occurences, spatial_window,
time_window)
return best_matches, self._score(best_matches, image_signatures)
def make_mosaic(self, image, outputfile, frames_in_width=50,
max_occurences='auto',
maxiter=500, time_window=2, spatial_window=2,
matching_quality=0.7):
"""
Makes a mosaic file from the best_matches found.
Finds the right frames to reconstitute the given image, in three steps:
1. Find the optimal frame for each subregion of the given image.
2. Change the frames so as to avoid the same frames being used too many
times.
3. Change the frames so that frames that are near in time in the original
movie will not appear near from each other in the final mosaic.
Parameters
-----------
image
The (RGB WxHx3 numpy array) image to reconstitute.
frames_in_width
How many frames are in the width of the picture (determines the
'resolution' of the final mosaic)
max_occurences
How many occurences of the same frame are allowed in step 2. If auto,
this is not taken into account and the algorithm will try and reduce
the number of occurences until it reaches maxiter iterations or bad
overall matching quality (see below)
maxiter
Number of iterations in step 2 when reducing the number of occurence
of the most frequent frames.
matching_quality
The program will stop when the current total matching error is less
than (initial_score / matching_quality). this is to avoid that the
program degrades the quality of the mosaic too much.
time_window
The frames will be changed in step 3 so
"""
best_matches, score = self._find_best_matches(image, frames_in_width,
max_occurences, maxiter, time_window,
spatial_window, matching_quality)
nframes = len(list(set(best_matches.flatten())))
verbose_print("Assembling final picture (%d different frames)."%nframes)
h, w = best_matches.shape
dh,dw = self.dh, self.dw
final_picture = np.zeros((h*dh, w*dw, 3))
for i in range(h):
for j in range(w):
ind = best_matches[i,j]
final_picture[dh*i:dh*(i+1), dw*j:dw*(j+1)] = self[ind]
ffmpeg_write_image(outputfile, final_picture.astype('uint8'))
verbose_print("Finished.\n")
|
from skimage.transform.pyramids import pyramid_laplacian,resize
def multiscale_saliency(image, method, min_image_area=10000):
'''
Runs any saliency method as a multiscale method.
method is run for each image downsized until its area is lower than min_image_area.
The final result is an image with the same size as the original.
'''
sals = None
count = 0
for img in pyramid_laplacian(image, 1):
print 'calculating for shape = %s, %s'% img.shape[:2]
s = method(image)
s = resize(s, image.shape[:2], mode='nearest')
if sals is not None:
sals = sals + s
else:
sals = s
count +=1
return sals/count
def _ispar_shape(img):
par = img.shape[0] % 2 == 0 and img.shape[1] == 0
return par
|
from flask import request
import redis
import config
import datetime
import json
redis_client = redis.StrictRedis(host=config.redis_config["host"], port=config.redis_config["port"])
def handle_app_install(request):
print "Handling app install", request.json
userID = request.json["userId"]
token = request.json["token"]
redis_client.set(userID, token)
def handle_app_uninstall(request):
print "Handling app uninstall", request.json
userID = request.json["userId"]
redis_client.delete(userID)
def date_key():
return "{}-{:02d}".format(datetime.date.today().year, datetime.date.today().month)
def add_report_time(response):
response["reportTime"] = datetime.datetime.today().isoformat()
return response
def handle_message_action(request):
print "Handling message action", request.json
messagesUIDs = request.json["messageUids"]
for messageUID in messagesUIDs:
if not redis_client.exists(messageUID):
print "Saving UID", messageUID
key = date_key()
redis_client.rpush(key, messageUID)
redis_client.set(messageUID, json.dumps(add_report_time(request.json)))
else:
print "Ignoring UID", messageUID
def messageDetailsForUID(UID):
if redis_client.exists(UID):
details = redis_client.get(UID)
print "Got UID", UID, details
return json.loads(details)
return None
def UIDsForMonth(month):
if redis_client.exists(month):
details = redis_client.lrange(month, 0, -1)
print "Got UID", month, details
return details
return None
|
import inspect
import json
import traceback
import os
import types
'''
This print the full stack trace from the current point in the code
(from where "stack = inspect.stack()", below, is called).
It can be useful, e.g., to understand code that makes many calls
or to get information for debugging exceptions.
'''
def print_stack_history(limit=-1):
stack = inspect.stack()
# TODO: Turn into an optional parameter on this function parameters list
# removes the first element of the stack, which is the current function
stack.pop(0)
# reverse the stack trace so the most recent is at the bottom of the stack
# (the same order of the calls were made :)
stack.reverse()
# TODO: make relevant_stack be stack sliced
# to just the last "limit" elements
relevant_stack = stack
stack_list = {}
try:
for idx, s in enumerate(relevant_stack):
current_function_arguments = inspect.getargvalues(s.frame)
_, filename, line_no, func_name, code_list, index_in_code_list = s
code_str = code_list[index_in_code_list]
for name, data in inspect.getmembers(s.frame):
if name == 'f_locals':
break
params_and_vars_dict = {}
for item in data:
if not item.startswith('__'):
if isinstance(data[item], str) or isinstance(data[item], int) or isinstance(data[item], dict) or isinstance(data[item], list) or isinstance(data[item], set) or (data[item] is None):
params_and_vars_dict[item] = data[item]
current_stack_dict = {'filename': filename,
'line_number': line_no,
'function_name': func_name,
'params_and_vars': params_and_vars_dict,
'code': code_str}
stack_list[idx] = current_stack_dict
debugging = json.dumps(stack_list)
finally:
# avoid memory leak issues
del stack
return debugging
def func_one():
var_first = 'var1'
print('func 1')
func_two(f2param2=99)
def func_two(f2param1='teste', f2param2=15, f2param3=['el1']):
var_second = ['var2']
print('func 2')
func_three()
def func_three():
var_three = {'key3': 'value3'}
var_three_another = [("field3A", "value3A"),("field3B", "value3B")]
print('func 3')
func_four()
def func_four():
var_four = 4
print('func 4')
history = print_stack_history()
print('To get to the current point on the code, the calls order (stack) '
'was the following: \n {}'.format(history))
func_one()
|
import binascii
from lbryum.hashing import Hash
from lbryum.errors import InvalidProofError
def height_to_vch(n):
r = [0 for i in range(8)]
r[4] = n >> 24
r[5] = n >> 16
r[6] = n >> 8
r[7] = n % 256
# need to reset each value mod 256 because for values like 67784
# 67784 >> 8 = 264, which is obviously larger then the maximum
# value input into chr()
return ''.join([chr(x % 256) for x in r])
def get_hash_for_outpoint(txhash, nOut, nHeightOfLastTakeover):
txhash_hash = Hash(txhash)
nOut_hash = Hash(str(nOut))
height_of_last_takeover_hash = Hash(height_to_vch(nHeightOfLastTakeover))
outPointHash = Hash(txhash_hash + nOut_hash + height_of_last_takeover_hash)
return outPointHash
def verify_proof(proof, rootHash, name):
previous_computed_hash = None
reverse_computed_name = ''
verified_value = False
for i, node in enumerate(proof['nodes'][::-1]):
found_child_in_chain = False
to_hash = ''
previous_child_character = None
for child in node['children']:
if child['character'] < 0 or child['character'] > 255:
raise InvalidProofError("child character not int between 0 and 255")
if previous_child_character:
if previous_child_character >= child['character']:
raise InvalidProofError("children not in increasing order")
previous_child_character = child['character']
to_hash += chr(child['character'])
if 'nodeHash' in child:
if len(child['nodeHash']) != 64:
raise InvalidProofError("invalid child nodeHash")
to_hash += binascii.unhexlify(child['nodeHash'])[::-1]
else:
if previous_computed_hash is None:
raise InvalidProofError("previous computed hash is None")
if found_child_in_chain is True:
raise InvalidProofError("already found the next child in the chain")
found_child_in_chain = True
reverse_computed_name += chr(child['character'])
to_hash += previous_computed_hash
if not found_child_in_chain:
if i != 0:
raise InvalidProofError("did not find the alleged child")
if i == 0 and 'txhash' in proof and 'nOut' in proof and 'last takeover height' in proof:
if len(proof['txhash']) != 64:
raise InvalidProofError("txhash was invalid: {}".format(proof['txhash']))
if not isinstance(proof['nOut'], (long, int)):
raise InvalidProofError("nOut was invalid: {}".format(proof['nOut']))
if not isinstance(proof['last takeover height'], (long, int)):
raise InvalidProofError(
'last takeover height was invalid: {}'.format(proof['last takeover height']))
to_hash += get_hash_for_outpoint(
binascii.unhexlify(proof['txhash'])[::-1],
proof['nOut'],
proof['last takeover height']
)
verified_value = True
elif 'valueHash' in node:
if len(node['valueHash']) != 64:
raise InvalidProofError("valueHash was invalid")
to_hash += binascii.unhexlify(node['valueHash'])[::-1]
previous_computed_hash = Hash(to_hash)
if previous_computed_hash != binascii.unhexlify(rootHash)[::-1]:
raise InvalidProofError("computed hash does not match roothash")
if 'txhash' in proof and 'nOut' in proof:
if not verified_value:
raise InvalidProofError("mismatch between proof claim and outcome")
if 'txhash' in proof and 'nOut' in proof:
if name != reverse_computed_name[::-1]:
raise InvalidProofError("name did not match proof")
if not name.startswith(reverse_computed_name[::-1]):
raise InvalidProofError("name fragment does not match proof")
return True
|
from django import forms
from apps.hypervisor.models import Hypervisor
class HypervisorForm(forms.ModelForm):
class Meta:
model = Hypervisor
exclude = ('status',)
|
""" Recording modules
"""
import sys
import wave
import datetime as dt
import pyaudio
from scipy.io import wavfile as wav
RATE = 48000
CHUNK = 8192
FORMAT = pyaudio.paInt16
CHANNELS = 1
if sys.platform == 'darwin':
CHANNELS = 1
def time_now():
""" Get current time
"""
return dt.datetime.now().strftime("%Y%m%d%H%M%S")
def kayurecord_save(filename, frames, container):
""" Save audio recording to wav file
"""
wave_file = wave.open(filename, 'wb')
wave_file.setnchannels(CHANNELS)
wave_file.setsampwidth(container.get_sample_size(FORMAT))
wave_file.setframerate(RATE)
wave_file.writeframes(b''.join(frames))
wave_file.close()
def kayurecord(woodname, duration):
""" Record audio and save to wav file
"""
filename = time_now() + "_" + woodname + ".wav"
container = pyaudio.PyAudio()
stream = container.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* start recording...")
data = []
frames = []
for i in range(0, int(RATE / CHUNK * duration)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
container.terminate()
print("* done recording!")
kayurecord_save(filename, frames, container)
return filename
def kayuopen(woodname):
""" Open wav file
"""
rate, data = wav.read(woodname)
data = data / (2.**15)
try:
ch1 = data[:, 0]
except IndexError:
ch1 = data
return rate, ch1
|
import copy
import itertools
import re
SET_ORDERING_RE = re.compile(r'''
\s*
(?P<order>[\d\s,-]+)\)
\s*
''', re.X)
SET_RE = re.compile(r'''
(\[(?P<rest>\d+)\])?
\s*
((?P<work>\d+) \s* x \s*)?
\s*
(?P<reps>\d+)
''', re.X)
class Set:
def __init__(self,
work=0,
reps=0,
rest=0,
order=0) -> None:
self.work = work or 0
assert isinstance(self.work, int)
self.reps = reps or 0
assert isinstance(self.reps, int)
self.rest = rest or 0
assert isinstance(self.rest, int)
self.order = order or 0
assert isinstance(self.order, int)
@classmethod
def parse(cls, string):
# parse ordering groups
order_groups, set_str = parse_ordering(string)
# parse set information
sets = parse_set_body(set_str)
final = []
# Many-to-one order-to-set notation
# 4-6) [30] 114 x 8
if len(sets) == 1 and len(order_groups) >= 1:
base = sets[0]
for o in itertools.chain(*order_groups):
s = copy.copy(base)
s.order = o
final.append(s)
# One-to-one order-to-set notation
# 1-3,5-7) 100 x 8, 110x9
elif len(order_groups) == len(sets):
for i, og in enumerate(order_groups): # [(1,2,3), (5,6,7)]
for o in og: # (1,2,3)
s = copy.copy(sets[i])
s.order = o
final.append(s)
# One-to-Many notation
# 1-3) 100x8, 110x7, 120x 6
elif (len(order_groups) == 1 and
len(order_groups[0]) == len(sets)):
for i, o in enumerate(order_groups[0]):
sets[i].order = o
final.append(sets[i])
else:
raise ValueError("Set notation mismatch")
return final
@classmethod
def parse_sets(cls, lines):
"""Parse a .wkt-formatted string containing one or more Sets."""
sets = []
for l in lines:
ret = cls.parse(l)
if not ret:
break
sets.extend(ret)
if not sets:
raise ValueError("No sets parsed")
return sets, lines[len(sets):]
def to_json(self):
d = {}
for attr in ['work', 'reps', 'rest', 'order']:
v = getattr(self, attr)
if v:
d[attr] = v
return d
@classmethod
def from_json(cls, d):
"""Build a Set from a JSON object (dict)."""
return cls(**d)
def __lt__(self, other):
"""Sets are sorted by their workout order.
It is invalid to compare Sets outside of the same Workout.
"""
if not isinstance(other, Set):
return NotImplemented
return self.order < other.order
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return (self.order == other.order and
self.work == other.work and
self.reps == other.reps and
self.rest == other.rest)
def __str__(self):
s = ''
if self.rest:
s += '[' + str(self.rest) + '] '
if self.work:
s += str(self.work) + ' x '
return s + str(self.reps)
def __repr__(self):
return ("Set(work={set.work}, "
"reps={set.reps}, rest={set.rest}, "
"order={set.order})").format(set=self)
def parse_ordering(string):
m = SET_ORDERING_RE.match(string)
if not m:
raise ValueError(string + " isn't a recognized set string")
parts = m.groupdict()['order'].strip(', ')
ordering = []
for s in parts.split(','):
val = s.strip()
try:
ordering.append((int(val),))
except ValueError:
n = re.match(r'(\d+)-(\d+)', val)
if not n:
raise
# 'a-d) ...' => (a,b,c,d)
ordering.append(tuple(range(int(n.groups()[0]),
int(n.groups()[1])+1)))
return ordering, m.string[m.end():]
def parse_set_body(string):
sets = []
for m in SET_RE.finditer(string):
gd = m.groupdict()
# Pass it through the Set Constructor to filter out values
vals = {}
for attr in ['work', 'reps', 'rest']:
v = gd.get(attr)
if v:
vals[attr] = int(v)
sets.append(Set(**vals))
return sets
|
t = int(raw_input())
for tt in range(t):
n, m = map(int, raw_input().split())
mice = [int(x) for x in raw_input().split(' ')]
holes = [int(x) for x in raw_input().split(' ')]
ret = -1
mice.sort()
holes.sort()
for m, h in zip(mice, holes):
ret = max(ret, abs(m - h))
print ret
|
from lampost.gameops.script import Scriptable
from lampmud.comm.broadcast import BroadcastMap
from lampost.meta.auto import TemplateField
from lampost.db.dbofield import DBOField, DBOTField
from lampmud.model.article import Article, ArticleTemplate
from lampmud.mud.action import mud_action
class ArticleTemplateLP(ArticleTemplate):
class_id = 'article'
remove_msg = BroadcastMap(s="You unequip {N}", e="{n} unequips {N}")
equip_msg = BroadcastMap(s="You wear {N}", e="{n} wears {N}")
wield_msg = BroadcastMap(s="You wield {N}", e="{n} wields {N}")
def _on_loaded(self):
if self.art_type == 'weapon':
self.equip_msg = self.wield_msg
class ArticleLP(Article, Scriptable):
equip_slot = DBOTField()
current_slot = DBOField()
weapon_type = DBOTField('mace')
damage_type = DBOTField('blunt')
delivery = DBOTField('melee')
equip_msg = TemplateField()
remove_msg = TemplateField()
def on_equipped(self, equipper):
equipper.broadcast(target=self, broadcast_map=self.equip_msg)
def on_removed(self, remover):
remover.broadcast(target=self, broadcast_map=self.remove_msg)
@mud_action(('wear', 'equip', 'wield'), 'equip_slot', target_class="inven")
def wear(source, target, **_):
source.equip_article(target)
@mud_action(('remove', 'unequip', 'unwield'), 'current_slot')
def remove(source, target, **_):
source.remove_article(target)
|
import argparse
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from LinearKalmanFilter import *
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def plotData(data_dict, vx, vy):
f, axarr = plt.subplots(2, 2)
l = len(data_dict["P"])
axarr[0, 0].semilogy(range(l), [p[0, 0] for p in data_dict["P"]], label='$x$')
axarr[0, 0].semilogy(range(l), [p[1, 1] for p in data_dict["P"]], label='$y$')
axarr[0, 0].semilogy(range(l), [p[2, 2] for p in data_dict["P"]], label='$\dot x$')
axarr[0, 0].semilogy(range(l), [p[3, 3] for p in data_dict["P"]], label='$\dot y$')
axarr[0, 0].set_xlabel('Filter Step')
axarr[0, 0].set_title('Uncertainty (Elements from Matrix $P$)')
axarr[0, 0].legend(loc='best')
l = len(data_dict["x"])
axarr[0, 1].plot(range(l), [x[2, 0] for x in data_dict["x"]], label='$\dot x$')
axarr[0, 1].plot(range(l), [x[3, 0] for x in data_dict["x"]], label='$\dot y$')
axarr[0, 1].axhline(vx, color='#999999', label='$\dot x_{real}$')
axarr[0, 1].axhline(vy, color='#999999', label='$\dot y_{real}$')
axarr[0, 1].set_xlabel('Filter Step')
axarr[0, 1].set_title('Estimate (Elements from State Vector $x$)')
axarr[0, 1].legend(loc='best')
axarr[0, 1].set_ylabel('Velocity')
l = len(data_dict["R"])
axarr[1, 0].semilogy(range(l), [r[0, 0] for r in data_dict["R"]], label='$\dot x$')
axarr[1, 0].semilogy(range(l), [r[1, 1] for r in data_dict["R"]], label='$\dot y$')
axarr[1, 0].set_xlabel('Filter Step')
axarr[1, 0].set_ylabel('')
axarr[1, 0].set_title('Measurement Uncertainty $R$ (Adaptive)')
axarr[1, 0].legend(loc='best')
l = len(data_dict["x"])
axarr[1, 1].scatter([x[0, 0] for x in data_dict["x"]], [x[1, 0] for x in data_dict["x"]], s=20, label='State', c='k')
axarr[1, 1].scatter(data_dict["x"][0][0, 0], data_dict["x"][0][1, 0], s=30, label='Start', c='b')
axarr[1, 1].scatter(data_dict["x"][-1][0, 0], data_dict["x"][-1][1, 0], s=30, label='Goal', c='r')
axarr[1, 1].set_xlabel('X')
axarr[1, 1].set_ylabel('Y')
axarr[1, 1].set_title('Position')
axarr[1, 1].legend(loc='best')
axarr[1, 1].set_aspect('equal')
for i in range(l):
if i % 10 == 0:
plot_cov_ellipse(data_dict["P"][i][0:2, 0:2], np.array([data_dict["x"][i][0, 0], data_dict["x"][i][1, 0]]), ax=axarr[1, 1],
nstd=30, alpha=0.5, color='green')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adaptive Kalman Filter sample')
parser.add_argument('-t', type=float, default=0.5, help='time step')
parser.add_argument('-r', type=float, default=1.0, help='standard deviation of R')
parser.add_argument('-N', type=int, default=200, help='number of trials')
parser.add_argument('--vx', type=float, default=20, help='ground truth of velocity x')
parser.add_argument('--vy', type=float, default=40, help='ground truth of velocity y')
parser.add_argument('--noise', type=float, default=50, help='unexpected observation noise of velocity y')
group = parser.add_mutually_exclusive_group()
group.add_argument('--adaptive', dest='akf', action='store_true', help='use adaptive kalman filter')
group.add_argument('--non-adaptive', dest='akf', action='store_false', help='do not use adaptive kalman filter')
parser.set_defaults(akf=True)
# parameter
dt = parser.parse_args().t
ra = parser.parse_args().r**2
sv = 1.0
num = parser.parse_args().N
vx = parser.parse_args().vx # in X
vy = parser.parse_args().vy # in Y
# initialize
x0 = np.matrix([[0.0, 0.0, 0, 0]]).T
P0 = 1.0 * np.eye(4)
R = np.matrix([[ra, 0.0],
[0.0, ra]])
G = np.matrix([[0.5*dt**2],
[0.5*dt**2],
[dt],
[dt]])
Q = G * G.T * sv**2
F = np.matrix([[1.0, 0.0, dt, 0.0],
[0.0, 1.0, 0.0, dt],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
H = np.matrix([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
AKF = LinearKalmanFilter(x0, P0, Q, R, F, H)
# input measurement
mx = np.array(vx + np.random.randn(num))
my = np.array(vy + np.random.randn(num))
# some different error somewhere in the measurements
my[(2* num/4):(3 * num/4)] = np.array(vy + parser.parse_args().noise * np.random.randn(num/4))
measurements = np.vstack((mx, my))
for i in range(len(measurements[0])):
if parser.parse_args().akf:
n = 10
if i > n:
R = np.matrix([[np.std(measurements[0, (i-n):i])**2, 0.0],
[0.0, np.std(measurements[1, (i-n):i])**2]])
AKF.proc(Q, measurements[:, i].reshape(2, 1), R)
plotData(AKF.getData(), vx, vy)
# print AKF.getData()["x"][0]
|
import json
import glob, os, sys
try:
import xlsxwriter
except:
print("This software requires XlsxWriter package. Install it with 'sudo pip install XlsxWriter', see http://xlsxwriter.readthedocs.io/")
sys.exit(1)
import datetime
import argparse
import re
def parseDateAndRun(filename):
m=re.match( r'.*profile.(?P<run>[0-9]*).(?P<date>20[0-9][0-9]-[01][0-9]-[0-3][0-9]).json', filename)
if m:
return (m.group('date'), m.group('run'))
else: # not found
return ('0','0')
def calc_minutes(timestr):
# returns the number of minutes from midnight. seconds are ignored
# based on http://stackoverflow.com/questions/10663720/converting-a-time-string-to-seconds-in-python
ftr = [60,1,0] # ignore seconds, count minutes, and use 60 minutes per hour
return sum([a*b for a,b in zip(ftr, map(int,timestr.split(':')))])
def expandProfile(l, valueField, offsetField):
r=[]
minutes=0
value=l[0][valueField]
for i in range(len(l)):
start1=l[i]['start']
minutes1=calc_minutes(start1)
offset1=l[i][offsetField]
if minutes1!=offset1:
print("Error in JSON offSetField %s contains %s does not match start time %s (%d minutes). Please report this as a bug" % (offsetField, offset1, start1, minutes1))
sys.exit(1)
while (minutes<minutes1):
r.append(value)
minutes=minutes+30
value=l[i][valueField]
# add the last value until midnight
while (minutes<24*60):
r.append(value)
minutes=minutes+30
# return the expanded profile
return r
def writeExcelHeader(ws, date_format, headerFormat):
ws.write_string(0,0, 'Filename', headerFormat)
ws.write_string(0,1, 'Date', headerFormat)
ws.write_string(0,2, 'Run', headerFormat)
col=3
for hours in range(24):
for minutes in [0,30]:
dt=datetime.datetime.strptime('%02d:%02d' % (hours,minutes) , '%H:%M')
ws.write_datetime(0, col, dt, date_format)
col=col+1
def write_profile(worksheet, row, json, excel_number_format):
worksheet.write_string(row, 0, filename)
date, run = parseDateAndRun(filename)
worksheet.write_string(row, 1, date)
worksheet.write_string(row, 2, run)
col=3
value=""
for i in PROFILE_FIELDS:
if json.has_key(i):
worksheet.write_number(row, col, json[i], excel_number_format)
col=col+1
def write_timebased_profile(worksheet, row, expandedList, excel_number_format):
worksheet.write_string(row, 0, filename)
date, run = parseDateAndRun(filename)
worksheet.write_string(row, 1, date)
worksheet.write_string(row, 2, run)
col=3
for i in range(len(expandedList)):
worksheet.write_number(row, col, expandedList[i], excel_number_format)
col=col+1
def excel_init_workbook(workbook):
#see http://xlsxwriter.readthedocs.io/format.html#format for documentation on the Excel format's
excel_hour_format = workbook.add_format({'num_format': 'hh:mm', 'bold': True, 'font_color': 'black'})
excel_2decimals_format = workbook.add_format({'num_format': '0.00', 'font_size': '16'})
excel_integer_format = workbook.add_format({'num_format': '0', 'font_size': '16'})
headerFormat = workbook.add_format({'bold': True, 'font_color': 'black'})
worksheetInfo = workbook.add_worksheet('Read this first')
worksheetProfile = workbook.add_worksheet('Profile')
worksheetProfile.write_string(0,0, 'Filename', headerFormat)
worksheetProfile.write_string(0,1, 'Date', headerFormat)
worksheetProfile.write_string(0,2, 'Run', headerFormat)
col=3
for colName in PROFILE_FIELDS:
worksheetProfile.write_string(0,col, colName, headerFormat)
col=col+1
worksheetIsf = workbook.add_worksheet('isfProfile')
worksheetBasal = workbook.add_worksheet('basalProfile')
writeExcelHeader(worksheetBasal, excel_hour_format,headerFormat)
writeExcelHeader(worksheetIsf, excel_hour_format,headerFormat)
worksheetBasal.autofilter('A1:C999')
worksheetIsf.autofilter('A1:C999')
worksheetBasal.set_column(3, 50, 6) # set columns starting from 3 to same width
worksheetIsf.set_column(3, 50, 6) # set columns starting from 3 to same width
infoText=['Released under MIT license. See the accompanying LICENSE.txt file for', 'full terms and conditions', '']
infoText.append('THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR')
infoText.append('IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,')
infoText.append('FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE')
infoText.append('AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER')
infoText.append('LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,')
infoText.append('OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN')
infoText.append('THE SOFTWARE.')
row=1
for i in range(len(infoText)):
worksheetInfo.write_string(row, 1, infoText[i])
row=row+1
return (worksheetProfile, worksheetBasal, worksheetIsf, excel_2decimals_format, excel_integer_format)
def sortedFilenames():
filelist=glob.glob("settings/profile.json")
filelist=filelist+glob.glob("settings/pumpprofile.json")
profiles=glob.glob("autotune/profile*.json")
listdateandrun=[]
for i in profiles:
date, run = parseDateAndRun(i)
sortkey="%s-%3d" % (date,int(run))
listdateandrun.append((sortkey,i))
listdateandrun.sort()
for (daterun,filename) in listdateandrun:
filelist.append(filename)
return filelist
PROFILE_FIELDS=['max_iob', 'carb_ratio', 'csf', 'max_basal', 'sens']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Export oref0 autotune files to Microsoft Excel')
parser.add_argument('-d', '--dir', help='openaps directory', default='.')
parser.add_argument('-o', '--output', help='default autotune.xlsx', default='autotune.xlsx')
parser.add_argument('--version', action='version', version='%(prog)s 0.0.4-dev')
args = parser.parse_args()
# change to openaps directory
os.chdir(args.dir)
print("Writing headers to Microsoft Excel file %s" % args.output)
workbook = xlsxwriter.Workbook(args.output)
(worksheetProfile,worksheetBasal, worksheetIsf,excel_2decimals_format,excel_integer_format)=excel_init_workbook(workbook)
row=1 # start on second row, row=0 is for headers
filenamelist=sortedFilenames()
for filename in filenamelist:
f=open(filename, 'r')
print("Adding %s to Excel" % filename)
j=json.load(f)
try:
basalProfile=j['basalprofile']
isfProfile=j['isfProfile']['sensitivities']
expandedBasal=expandProfile(basalProfile, 'rate', 'minutes')
expandedIsf=expandProfile(isfProfile, 'sensitivity', 'offset')
write_timebased_profile(worksheetBasal, row, expandedBasal, excel_2decimals_format)
write_timebased_profile(worksheetIsf, row, expandedIsf, excel_integer_format)
write_profile(worksheetProfile, row, j, excel_integer_format)
row=row+1
except Exception as e:
if j.has_key('error'):
print("Skipping file. Error: %s " % j['error'])
else:
print("Skipping file. Exception: %s" % e)
workbook.close()
print("Written %d lines to Excel" % row)
|
from board import Board
from board.move import Move
from util import input_parser
import util.printer as printer
from util.enums import Player
if __name__ == '__main__':
# get desired player or None for two player
player = input_parser.player()
# load and print initial board
board = Board(True)
unicode = input_parser.use_unicode(board)
printer.print_board(board)
# game loop
while True:
# if checkmate then end the game
check, draw, checkmate = board.status()
if checkmate:
print('{} wins by checkmate'.format(Player.opponent(board.current_player)))
break
# if draw then end the game
if draw:
print('draw!')
break
# print check message
if check:
print('check!!!')
# if a human then ask for input
if player is None or board.current_player == player:
cmd = input_parser.cmd(board)
if isinstance(cmd, Move): # move
board.move(cmd)
printer.print_board(board)
elif cmd == 'q': # quit
break
elif cmd == 'r': # random move
board.random_move()
printer.print_board(board)
elif cmd == 'l': # list possible moves
printer.print_moves(board)
elif cmd == 's': # current score
printer.print_score(board)
else:
pass
else:
# ai turn
# move = board.random_move()
print('ai thinking...')
move = board.recommended_move(3)
board.move(move)
print('{} moved {}'.format(board.current_player.opponent(), board.last_move()))
printer.print_board(board)
|
import pynmea2
import sys
import re
def checksum(data):
res = 0
data = data.split('$')[1]
for c in data:
res ^= ord(c)
return hex(res)
if __name__ == '__main__':
gpgga = '$GPGGA,102154.552,4308.0718,N,14114.9862,E,1,03,19.8,48.4,M,29.5,M,,0000'
altdata = 100
time = 101114
for i , alt in enumerate(range(0 , altdata, 5)):
#print(pynmea2.GGA('GP' + 'GGA' + str(time+i),'4308.0718' + 'N' + '14114.9862' + 'E' + '' + '01' + '03' + '19.8' + '48.4' + 'M' + str(29.5+alt) + 'M' + '' + '0000'))
data = '$GPGGA,' + str(time+i) + ',4308.0718' + ',N' + ',14114.9862' + ',E' + ',' + ',01' + ',03' + ',19.8' + ',48.4' + ',M,' + str(29.5+alt) + ',M' + ',' + ',0000'
data += '*'+str(checksum(data)).split('x')[1]
print(data)
for i , alt in enumerate(range(altdata, 0, -5)):
data = '$GPGGA,' + str(20+time+i) + ',4308.0718' + ',N' + ',14114.9862' + ',E' + ',' + ',01' + ',03' + ',19.8' + ',48.4' + ',M,' + str(alt-29.5) + ',M' + ',' + ',0000'
data += '*'+str(checksum(data)).split('x')[1]
print(data)
|
def xuniqueCombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xuniqueCombinations(items[i+1:],n-1):
yield [items[i]]+cc
def xcombinations(items, n):
from operator import itemgetter
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xcombinations(items[:i]+items[i+1:],n-1):
yield [items[i]]+cc
|
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from model.year import Year
from model.wave import Wave
from model.config import Config
from model.profile import Profile
from model.user import User
from model.article import Article
from model.achievement import Achievement
from model.thread import Thread, ThreadVisit
from model.post import Post
from model.prerequisite import Prerequisite, PrerequisiteType
from model.task import Task, SolutionComment
from model.module import Module, ModuleType
from model.module_custom import ModuleCustom
from model.token import Token
from model.user_achievement import UserAchievement
from model.mail_easteregg import MailEasterEgg
from model.feedback_recipients import FeedbackRecipient
from model.programming import CodeExecution
from model.evaluation import Evaluation
from model.submitted import SubmittedFile, SubmittedCode
from model.active_orgs import ActiveOrg
from model.feedback import Feedback
from model.user_notify import UserNotify
|
from hid import enumerate
from ba63.simple_hid import SimpleHID
from ba63.constant import SEQUENCE_MARQUEUR_DEBUT, SEQUENCES_CURSEUR, SEQUENCE_NETTOYAGE, SEQUENCE_SET_CHARSET, TAILLE_MESSAGE_MAX, NOMBRE_CARACTERES_PAR_LIGNE
from unidecode import unidecode
class FormatDonneesInvalideBA63(Exception):
pass
class TailleMessageTropGrande(Exception):
pass
class CharsetInvalideBA63(Exception):
pass
class NumeroLigneInvalideBA63(Exception):
pass
class FormatTexteInvalideBA63(Exception):
pass
class BA63(SimpleHID):
hid_tree = None
def __init__(self, chemin):
super().__init__(chemin)
def _paquet(self, donnees):
"""
Construction d'un paquet à destination du BA63
:param bytes donnees: Les données à transmettre sous forme de bytes
:return: Séquence prête à l'envoie
:rtype: bytes
"""
if not isinstance(donnees, bytes):
raise FormatDonneesInvalideBA63('La construction de paquet ne peux pas se faire sans un tableau de bytes.')
if len(donnees) > TAILLE_MESSAGE_MAX:
raise TailleMessageTropGrande('La construction de paquet a échoué car votre message est de taille %i '
'octets mais la taille maximale est de %i octets' % (len(donnees),
TAILLE_MESSAGE_MAX))
return SEQUENCE_MARQUEUR_DEBUT + bytes(chr(len(donnees)), 'ascii') + donnees
def imprimer(self, numero_ligne, message):
"""
Imprime du texte à l'écran
:param int numero_ligne: Numéro de la ligne
:param str message: Texte à écrire
:return: None
"""
if not isinstance(message, str):
raise FormatTexteInvalideBA63('Nous ne pouvons pas imprimer autre chose que du texte sur le BA63.')
self.curseur(numero_ligne)
message_sanitized = unidecode(message)
if len(message_sanitized) > NOMBRE_CARACTERES_PAR_LIGNE:
message_sanitized = message_sanitized[:(NOMBRE_CARACTERES_PAR_LIGNE-2)] + '..'
super().ecrire(self._paquet(bytes(message_sanitized, 'ascii')))
def nettoyer(self):
"""
Efface l'écran du BA63.
:return: None
"""
super().ecrire(self._paquet(SEQUENCE_NETTOYAGE))
def curseur(self, numero_ligne):
"""
Déplace le curseur sur le début d'une ligne
:param int numero_ligne: Le numéro de la ligne
:return: None
"""
if numero_ligne not in SEQUENCES_CURSEUR.keys():
raise NumeroLigneInvalideBA63('La ligne numéro %i est invalide. Disponibles: %s.' % (numero_ligne, SEQUENCES_CURSEUR.keys()))
super().ecrire(self._paquet(SEQUENCES_CURSEUR[numero_ligne]))
def charset(self, target):
"""
Change le charset du BA63
:param target: Le type de charset (use. constant)
:return: None
"""
if len(target) != 3:
raise CharsetInvalideBA63('Une séquence charset doit être codée sur 3 octets, votre séquence est de %i '
'octet(s).' % len(target))
super().ecrire(self._paquet(SEQUENCE_SET_CHARSET))
super().ecrire(self._paquet(target))
@staticmethod
def get(identifiant_vendeur=2727, identifiant_materiel=512, interface_cible=1):
"""
Recherche du BA63 et création d'une nouvelle instance BA63
:param identifiant_vendeur: Identifiant du vendeur matériel
:param identifiant_materiel: Identifiant du matériel chez le vendeur
:param interface_cible: Numéro de l'interface
:return: Nouvelle instance du BA63
:rtype: BA63
"""
BA63.hid_tree = enumerate() # type: list
for dev in BA63.hid_tree:
# Cas LINUX ~ WINDOWS
if dev['vendor_id'] == identifiant_vendeur \
and dev['product_id'] == identifiant_materiel \
and dev['interface_number'] == interface_cible:
return BA63(dev['path'])
# Cas DARWIN
elif dev['vendor_id'] == identifiant_vendeur \
and dev['product_id'] == identifiant_materiel \
and dev['interface_number'] == -1 \
and dev['usage'] == 9216:
return BA63(dev['path'])
return None
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def reverse_between(head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
1 ≤ m ≤ n ≤ length of list.
"""
if not head:
return None
# pp: pointer to prev
# p: pointer to current
# pn: pointer to next
# +- -+
# n1 -> | n2 -> n3 -> n4 | -> n5
# | +- | | -+
# pp p pn
#
# +- -+
# n1 -> | n3 -> n2 -> n4 | -> n5
# | +- | | -+
# pp pn p
#
# +- -+
# n1 -> | n3 -> n2 -> n4 | -> n5
# | +- | | -+
# pp p pn
#
# +- -+
# n1 -> | n4 -> n3 -> n2 | -> n5
# | +- | | -+
# pp pn p
dummy = ListNode(0)
dummy.next = head
pp = dummy
# range starts from 1
pos = 1
while pos < m:
pp = pp.next
pos += 1
p = pp.next
while pos < n:
pn = p.next
p.next = pn.next
pn.next = pp.next
pp.next = pn
pos += 1
return dummy.next
|
"""snmp_interface: module called to generate SNMP monitoring data formatted for use with StatusBoard iPad App
"""
from pysnmp.entity.rfc3413.oneliner import cmdgen
import time
import json
import logging.config
from credentials import SNMP_COMMUNITY
__author__ = 'scott@flakshack.com (Scott Vintinner)'
MAX_DATAPOINTS = 30
SAMPLE_INTERVAL = 60
GRAPH_TITLE = "Core Bandwidth (Mbps)"
DEVICE_IP = "cisco-clt-core"
DEVICE_SNMP = SNMP_COMMUNITY
DEVICE_UPTIME_OID = "1.3.6.1.2.1.1.3.0"
AGGREGATE_INTERFACES = (
{"oid": ["1.3.6.1.2.1.31.1.1.1.6.855", "1.3.6.1.2.1.31.1.1.1.10.855"], "name": "Byod"}, # vlan32
{"oid": ["1.3.6.1.2.1.31.1.1.1.6.856", "1.3.6.1.2.1.31.1.1.1.10.856"], "name": "iPhone"}, # vlan36
{"oid": ["1.3.6.1.2.1.31.1.1.1.6.857", "1.3.6.1.2.1.31.1.1.1.10.857"], "name": "Trusted"}, # vlan40
{"oid": ["1.3.6.1.2.1.31.1.1.1.6.858", "1.3.6.1.2.1.31.1.1.1.10.858"], "name": "Guest"}, # vlan44
{"oid": ["1.3.6.1.2.1.31.1.1.1.6.877", "1.3.6.1.2.1.31.1.1.1.10.877"], "name": "WiredGuest"} # vlan199
)
class MonitorJSON:
"""This is a simple class passed to Monitor threads so we can access the current JSON data in that thread"""
def __init__(self):
self.json = output_message("Waiting " + str(SAMPLE_INTERVAL) + " seconds for first run", "")
class AggregateInterface:
all_aggr_interfaces = [] # Static array containing all interfaces
def __init__(self, name, oids):
self.name = name
self.interface_oids = oids # List of oids
self.snmp_data = [] # Hold raw data
self.datapoints = [] # Holds pretty data
self.__class__.all_aggr_interfaces.append(self) # Add self to static array
class SNMPDatapoint:
def __init__(self, value, timeticks):
self.value = value
self.timeticks = timeticks
def get_snmp(device, community, snmp_oid1, snmp_oid2, snmp_uptime_oid):
"""Returns the value of the specified snmp OID.
Also gets the uptime (TimeTicks) so we know exactly when the sample was taken."""
# Perform a synchronous SNMP GET
cmd_gen = cmdgen.CommandGenerator()
error_indication, error_status, error_index, var_binds = cmd_gen.getCmd(
cmdgen.CommunityData(community), cmdgen.UdpTransportTarget((device, 161)), snmp_oid1, snmp_oid2, snmp_uptime_oid
)
snmp_value1 = None
snmp_value2 = None
snmp_error = None
snmp_uptime_value = None
if error_indication: # Check for SNMP errors
snmp_error = str(error_indication)
else:
if error_status:
snmp_error = error_status.prettyPrint()
else:
# varBinds are returned as SNMP objects, so convert to integers
snmp_value1 = int(var_binds[0][1])
snmp_value2 = int(var_binds[1][1])
snmp_uptime_value = int(var_binds[2][1])
return snmp_value1, snmp_value2, snmp_uptime_value, snmp_error
def calculate_bps(current_sample_octets, current_sample_time, historical_sample_octets, historical_sample_time):
"""Calculate the bits-per-second based on the octets and timeticks (hundreths of a second)."""
# When the SNMP counter reaches 18446744073709551615, it will rollover and reset to ZERO.
# If this happens, we want to make sure we don't output a negative bps
if current_sample_octets < historical_sample_octets:
# If we reset to 0, add the max value of the octets counter
current_sample_octets += 18446744073709551615
delta = current_sample_octets - historical_sample_octets
# SysUpTime is in TimeTicks (Hundreds of a second), so covert to seconds
seconds_between_samples = (current_sample_time - historical_sample_time) / 100.0
# Multiply octets by 8 to get bits
bps = (delta * 8) / seconds_between_samples
bps /= 1048576 # Convert to Mbps
bps = round(bps, 2)
return bps
def output_message(message, detail):
"""This function will output an error message formatted in JSON to display on the StatusBoard app"""
output = {"graph": {"title": GRAPH_TITLE, "error": {"message": message, "detail": detail}}}
return json.dumps(output)
def generate_json(snmp_monitor):
"""This function will take the device config and raw data (if any) from the snmp_monitor and output JSON data
formatted for the StatusBar iPad App"""
logger = logging.getLogger("snmp_interface_6")
time_x_axis = time.strftime("%H:%M") # Use the same time value for all samples per iteration
datasequences = []
snmp_error = None
logger.debug("SNMP generate_json started: " + time_x_axis)
# First time through, create a list of InterfaceDevices using the contants provided above
if len(AggregateInterface.all_aggr_interfaces) == 0:
for aggr_interface in AGGREGATE_INTERFACES:
AggregateInterface(aggr_interface["name"], aggr_interface["oid"])
# Loop through each aggregate interface, update the SNMP data for each item
for aggr_interface in AggregateInterface.all_aggr_interfaces:
logger.debug(aggr_interface.name + " " + str(aggr_interface.interface_oids))
# Get the SNMP data
try:
snmp_value1, snmp_value2, snmp_uptime_value, \
snmp_error = get_snmp(
DEVICE_IP,
DEVICE_SNMP,
aggr_interface.interface_oids[0],
aggr_interface.interface_oids[1],
DEVICE_UPTIME_OID
)
except Exception as error:
if not snmp_error:
snmp_error = str(error)
if snmp_error:
logger.warning(snmp_error)
break
else:
# Total the value of our 2 interfaces
snmp_value = snmp_value1 + snmp_value2
logger.debug("interface1: " + str(snmp_value1) + " interface2: " + str(snmp_value2))
# Add the raw SNMP data to a list
if len(aggr_interface.snmp_data) == 0: # first time through, initialize the list
aggr_interface.snmp_data = [SNMPDatapoint(snmp_value, snmp_uptime_value)]
else:
aggr_interface.snmp_data.append(SNMPDatapoint(snmp_value, snmp_uptime_value))
# If we already have the max number of datapoints in our list, delete the oldest item
if len(aggr_interface.snmp_data) >= MAX_DATAPOINTS:
del(aggr_interface.snmp_data[0])
# If we have at least 2 samples, calculate bps by comparing the last item with the second to last item
if len(aggr_interface.snmp_data) > 1:
bps = calculate_bps(
aggr_interface.snmp_data[-1].value,
aggr_interface.snmp_data[-1].timeticks,
aggr_interface.snmp_data[-2].value,
aggr_interface.snmp_data[-2].timeticks
)
bps = round(bps, 2)
if len(aggr_interface.datapoints) == 0:
aggr_interface.datapoints = [{"title": time_x_axis, "value": bps}]
else:
aggr_interface.datapoints.append({"title": time_x_axis, "value": bps})
# If we already have the max number of datapoints, delete the oldest item.
if len(aggr_interface.datapoints) >= MAX_DATAPOINTS:
del(aggr_interface.datapoints[0])
# Generate the data sequence
datasequences.append({"title": aggr_interface.name, "datapoints": aggr_interface.datapoints})
# If we ran into an SNMP error, go ahead and write out the JSON file with the error
if snmp_error:
snmp_monitor.json = output_message("Error retrieving SNMP data", snmp_error)
# If this is the first run through, show Initializing on iPad
elif len(AggregateInterface.all_aggr_interfaces[-1].snmp_data) <= 2:
snmp_monitor.json = output_message(
"Initializing bandwidth dataset: " +
str(SAMPLE_INTERVAL * (3 - len(AggregateInterface.all_aggr_interfaces[-1].snmp_data))) +
" seconds...", ""
)
else:
# Generate JSON output and assign to snmp_monitor object (for return back to caller module)
graph = {
"title": GRAPH_TITLE, "type": "line",
"refreshEveryNSeconds": SAMPLE_INTERVAL,
"datasequences": datasequences
}
snmp_monitor.json = json.dumps({"graph": graph})
logger.debug(snmp_monitor.json)
if __name__ == '__main__':
# When run by itself, we need to create the logger object (which is normally created in webserver.py)
try:
f = open("log_settings.json", 'rt')
log_config = json.load(f)
f.close()
logging.config.dictConfig(log_config)
except FileNotFoundError as e:
print("Log configuration file not found: " + str(e))
logging.basicConfig(level=logging.DEBUG) # fallback to basic settings
except json.decoder.JSONDecodeError as e:
print("Error parsing logger config file: " + str(e))
raise
monitor = MonitorJSON()
while True:
main_logger = logging.getLogger(__name__)
generate_json(monitor)
# Wait X seconds for the next iteration
main_logger.debug("Waiting for " + str(SAMPLE_INTERVAL) + " seconds")
time.sleep(SAMPLE_INTERVAL)
|
from app.models import NnTrainingResult
def fetch_from_patch(patch):
return NnTrainingResult.objects.filter(patch=patch).order_by('-end_time')
|
"""
Settings package is acting exactly like settings module in standard django projects.
However, settings combines two distinct things:
(1) General project configuration, which is property of the project
(like which application to use, URL configuration, authentication backends...)
(2) Machine-specific environment configuration (database to use, cache URL, ...)
Thus, we're changing module into package:
* base.py contains (1), so no adjustments there should be needed to make project
on your machine
* config.py contains (2) with sensible default values that should make project
runnable on most expected machines
* local.py contains (2) for your specific machine. File your defaults there.
"""
from base import *
from config import *
from logging import *
try:
from local import *
except ImportError:
pass
|
import sys
if sys.version_info[0] == 2:
from ConfigParser import RawConfigParser
if sys.version_info[0] >= 3:
from configparser import RawConfigParser
import json
import requests
config_file_name = "usermanagement.config"
config = RawConfigParser()
config.read(config_file_name)
host = config.get("server", "host")
endpoint = config.get("server", "endpoint")
org_id = config.get("enterprise", "org_id")
api_key = config.get("enterprise", "api_key")
access_token = config.get("enterprise", "access_token")
page = 0
url = "https://" + host + endpoint + "/users/" + org_id + "/" + str(page)
headers = {
"Content-type" : "application/json",
"Accept" : "application/json",
"x-api-key" : api_key,
"Authorization" : "Bearer " + access_token
}
res = requests.get(url, headers=headers)
print(res.status_code)
print(res.headers)
print(res.text.encode('utf-8'))
if res.status_code == 200:
res_json_data = json.loads(res.text)
result = res_json_data["result"]
if result == "success":
lastPage = res_json_data["lastPage"]
users = res_json_data["users"]
print("Total Users returned " + str(len(users)) + ", last page? " + str(lastPage))
exit(res.status_code)
|
import socket
import os
import sys
SPECIAL_COMMANDS = ["snapshot", "speak", "vibrate"]
def toString(array):
new = ""
for item in array:
new += item+" "
return new
def snapshot(s, cam_id, output_file): # Where "cam" is either 0 or 1.
os.system("termux-camera-photo -c " + str(cam_id) + " " + output_file)
with open("snapshot.jpg", "rb") as f:
filesize = bytes(str(os.path.getsize("snapshot.jpg")).encode("utf-8"))
s.send(filesize)
bytesToSend = f.read(1024)
s.send(bytesToSend)
while bytesToSend != "":
bytesToSend = f.read(1024)
s.send(bytesToSend)
def vibrate(duration):
os.system("termux-vibrate -d " + str(duration))
def say(text):
os.system("termux-tts-speak " + toString(text))
HOST = sys.argv[1]
PORT = 7000
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
with conn:
print("New device connected.")
while True:
data = conn.recv(1024)
decoded = str(data.decode("utf-8"))
splitted = decoded.split()
if splitted[0] in SPECIAL_COMMANDS:
if splitted[0] == "snapshot":
print("Sending snapshot...")
snapshot(s, int(splitted[1]), "snapshot.jpg")
elif splitted[0] == "vibrate":
print("Vibrating...")
vibrate(splitted[1])
elif splitted[0] == "speak":
print("Speaking: " + toString(splitted[1:]))
say(splitted[1:])
else:
os.system(data)
|
'''
@author: Michael Eddington
@version: $Id$
'''
import path, rand, sequencial
__all__ = ["path", "rand", "sequencial"]
|
"""
Last Edited By: Kevin Flathers
Date Las Edited: 06/07/2017
Author: Kevin Flathers
Date Created: 05/27/2017
Purpose:
"""
from .Tgml import *
class Arc(Tgml):
DEFAULT_PROPERTIES = {}
SUPPORTED_CHILDREN = {}
def __init__(self, *args, input_type='blank', **kwargs):
super().__init__(*args, input_type=input_type, **kwargs)
self.__properties = {}
self.__exposed_properties = {}
@property
def properties(self):
return self.__properties
@properties.setter
def properties(self, value):
self.__properties = value
@property
def exposed_properties(self):
return self.__properties
@exposed_properties.setter
def exposed_properties(self, value):
self.exposed_properties = value
|
import logging
import os
from jobs.job import Job
from lib.jobstatus import JobStatus
from lib.filemanager import FileStatus
from lib.util import get_climo_output_files, print_line
from lib.slurm import Slurm
class Climo(Job):
def __init__(self, *args, **kwargs):
super(Climo, self).__init__(*args, **kwargs)
self._data_required = ['atm']
self._job_type = 'climo'
self._dryrun = True if kwargs.get('dryrun') == True else False
self._slurm_args = {
'num_cores': '-n 16', # 16 cores
'run_time': '-t 0-10:00', # 5 hours run time
'num_machines': '-N 1', # run on one machine
}
# -----------------------------------------------
def setup_dependencies(self, *args, **kwargs):
"""
Climo doesnt require any other jobs
"""
return True
# -----------------------------------------------
def postvalidate(self, config, *args, **kwargs):
"""
Postrun validation for Ncclimo
Ncclimo outputs 17 files, one for each month and then one for the 5 seasons
"""
if self._dryrun:
return True
regrid_path = os.path.join(
config['global']['project_path'], 'output', 'pp',
config['post-processing']['climo']['destination_grid_name'],
self._short_name, 'climo', '{length}yr'.format(length=self.end_year-self.start_year+1))
climo_path = os.path.join(
config['global']['project_path'], 'output', 'pp',
config['simulations'][self.case]['native_grid_name'],
self._short_name, 'climo', '{length}yr'.format(length=self.end_year-self.start_year+1))
self._output_path = climo_path
# check the output directories exist
if not os.path.exists(regrid_path):
return False
if not os.path.exists(climo_path):
return False
file_list = get_climo_output_files(
input_path=regrid_path,
start_year=self.start_year,
end_year=self.end_year)
if len(file_list) < 17: # number of months plus seasons and annual
msg = '{prefix}: Failed to produce all regridded climos'.format(
prefix=self.msg_prefix())
logging.error(msg)
return False
file_list = get_climo_output_files(
input_path=climo_path,
start_year=self.start_year,
end_year=self.end_year)
if len(file_list) < 17: # number of months plus seasons and annual
msg = '{prefix}: Failed to produce all native grid climos'.format(
prefix=self.msg_prefix())
logging.error(msg)
return False
# nothing's gone wrong, so we must be done
return True
# -----------------------------------------------
def execute(self, config, dryrun=False):
regrid_path = os.path.join(
config['global']['project_path'], 'output', 'pp',
config['post-processing']['climo']['destination_grid_name'],
self._short_name, 'climo', '{length}yr'.format(length=self.end_year-self.start_year+1))
if not os.path.exists(regrid_path):
os.makedirs(regrid_path)
climo_path = os.path.join(
config['global']['project_path'], 'output', 'pp',
config['simulations'][self.case]['native_grid_name'],
self._short_name, 'climo', '{length}yr'.format(length=self.end_year-self.start_year+1))
if not os.path.exists(climo_path):
os.makedirs(climo_path)
self._output_path = climo_path
if not dryrun:
self._dryrun = False
if not self.prevalidate():
return False
if self.postvalidate(config):
self.status = JobStatus.COMPLETED
return True
else:
self._dryrun = True
input_path, _ = os.path.split(self._input_file_paths[0])
cmd = [
'ncclimo',
'-c', self.case,
'-a', 'sdd',
'-s', str(self.start_year),
'-e', str(self.end_year),
'-i', input_path,
'-r', config['post-processing']['climo']['regrid_map_path'],
'-o', climo_path,
'-O', regrid_path,
'--no_amwg_links',
]
slurm_command = ' '.join(cmd)
return self._submit_cmd_to_slurm(config, cmd)
# -----------------------------------------------
def handle_completion(self, filemanager, event_list, config):
if self.status != JobStatus.COMPLETED:
msg = '{prefix}: Job failed, not running completion handler'.format(
prefix=self.msg_prefix())
print_line(msg, event_list)
logging.info(msg)
return
else:
msg = '{prefix}: Job complete'.format(
prefix=self.msg_prefix())
print_line(msg, event_list)
logging.info(msg)
regrid_path = os.path.join(
config['global']['project_path'], 'output', 'pp',
config['post-processing']['climo']['destination_grid_name'],
self._short_name, 'climo', '{length}yr'.format(length=self.end_year-self.start_year+1))
new_files = list()
for regrid_file in get_climo_output_files(regrid_path, self.start_year, self.end_year):
new_files.append({
'name': regrid_file,
'local_path': os.path.join(regrid_path, regrid_file),
'case': self.case,
'year': self.start_year,
'local_status': FileStatus.PRESENT.value
})
filemanager.add_files(
data_type='climo_regrid',
file_list=new_files)
if not config['data_types'].get('climo_regrid'):
config['data_types']['climo_regrid'] = {'monthly': True}
climo_path = os.path.join(
config['global']['project_path'], 'output', 'pp',
config['simulations'][self.case]['native_grid_name'],
self._short_name, 'climo', '{length}yr'.format(length=self.end_year-self.start_year+1))
for climo_file in get_climo_output_files(climo_path, self.start_year, self.end_year):
new_files.append({
'name': climo_file,
'local_path': os.path.join(regrid_path, climo_file),
'case': self.case,
'year': self.start_year,
'local_status': FileStatus.PRESENT.value
})
filemanager.add_files(
data_type='climo_native',
file_list=new_files)
if not config['data_types'].get('climo_native'):
config['data_types']['climo_native'] = {'monthly': True}
msg = '{prefix}: Job completion handler done'.format(
prefix=self.msg_prefix())
print_line(msg, event_list)
logging.info(msg)
|
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {boolean}
def containsNearbyDuplicate(self, nums, k):
window = set()
left = 0
for right in range(len(nums)):
if nums[right] in window:
return True
if k and len(window) == k:
window.remove(nums[left])
left += 1
if k:
window.add(nums[right])
return False
|
import Broker
from time import time
destination = '/netflix/work'
kind = 'QUEUE'
broker = Broker.Client('localhost', 3322)
msg = Broker.Message(payload='este é o payload', destination=destination)
def produce(n):
for id in xrange(n):
broker.produce(msg, kind)
while True:
n = 1000
t = time()
produce(n)
d = time()-t
print "produced %f msg/s" % (n/d)
|
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from astropy import units
from galpy.orbit import Orbit
from FerrersPotential import FerrersPotential as FP
def allorbits(x,y):
xo = [x[2*i] for i in range(int(len(x)/2))]
xo1 = [x[2*i+1] for i in range(int(len(x)/2))]
yo = [y[2*i] for i in range(int(len(x)/2))]
yo1 = [y[2*i+1] for i in range(int(len(x)/2))]
return [xo,yo],[xo1,yo1]
x = []
y = []
def evolveorbit(icon, ti, tau, pot):
global x
global y
o = Orbit(vxvv=icon) # [R,vR,vT,z,vz,phi]
tf = ti+tau
ts = np.linspace(ti,tf,100)
o.integrate(ts, pot, method = 'leapfrog')
x.append(o.x(ts[0]))
y.append(o.y(ts[0]))
return [o.R(tf),o.vR(tf),o.vT(tf),o.z(tf),o.vz(tf),o.phi(tf)]
def dvector(o,d):
return np.array(d)-np.array(o)
def alpha(delta):
size = np.linalg.norm(delta)
return size, delta/size
def LEs(time, size, initsize):
return np.log(size/initsize)
def lyapunov(o,tau, potential, Tm):
global x
global y
x,y = [],[]
time,LE = [],[]
continuing = True
i = 1
w = [1.,0.,0.,0.,0.,0.]
initsize = 1e-5
while continuing:
icdo = list(np.array(o)+initsize*np.array(w))
newo = evolveorbit(o, tau*i, tau, potential)
newdo = evolveorbit(icdo, tau*i, tau, potential)
wj = dvector(o=newo,d=newdo)
size, veps0 = alpha(wj)
LE.append(LEs(tau*i, size, initsize))
time.append(tau*i)
if i*tau > Tm:
break
i += 1
o = newo
w = veps0
A = np.array([sum(LE[:i]) / time[i] for i in range(len(time))])
return A, time
input_filename = sys.argv[1]
input_file = open(input_filename, 'r')
dataline = input_file.read().split('\n')
for line in dataline:
if line != '':
initc_string = line.split('\t')
input_file.close()
icon = [float(initc_string[i]) for i in range(len(initc_string))]
print(icon)
pmw = FP(amp = 1, a = 8*units.kpc, b = 0.35, c = 0.2375, normalize = True, omegab = 10.*units.km/units.s/units.kpc)
tau = 0.01
Tm = 100 #680s
time = np.linspace(0,10,11)
les = np.linspace(0,20,11)
for i in range(len(les)):
print(str(time[i])+'\t'+str(les[i]))
|
def gray_to_binary(gray,bits=4):
"""converts a given gray code to its binary number"""
mask = 1 << (bits - 1)
binary = gray & mask
for i in xrange(bits-1):
bmask = 1 << (bits - i-1)
gmask = bmask >> 1
if (binary & bmask) ^ ((gray & gmask) << 1):
binary = binary | gmask
return binary
def binary_to_gray(binary,bits=4):
"""converts a given binary number to is gray code"""
mask = 1 << (bits - 1)
gray = binary & mask
binary = (binary ^ binary << 1) >> 1
gray = gray | binary
return gray
def ROR(x, n, bits = 32):
n = n % bits
mask = (2**n) - 1
mask_bits = x & mask
return (x >> n) | (mask_bits << (bits - n))
def ROL(x, n, bits = 32):
n = n % bits
return ROR(x, bits - n, bits)
def bin_string(integer):
return str(integer) if integer<=1 else bin_string(integer>>1) + str(integer&1)
def get_signed_number(number, bitLength):
mask = (2 ** bitLength) - 1
if number & (1 << (bitLength - 1)):
return number | ~mask
else:
return number & mask
def get_unsigned_number(number, bitLength):
mask = pow(2,bitLength) - 1
return number & mask
|
ADDRESS = "";
PORT = 1337;
import http.server as hs;
from modules.Tunnel import tunnel;
import json;
class CustomRequestHandler(hs.BaseHTTPRequestHandler):
data={"user":""};
def do_GET(self):
print("\n\n");
self.send_response(200);
self.send_header("Content-Type","text/JSON");
self.send_header("Content-Encoding","ASCII");
self.end_headers();
if self.data["user"] != self.client_address:
self.data["user"] = self.client_address;
data = self.rfile.read(int(self.headers["content-length"]));
data = str(data).strip("b'").strip("'"); #Ask Jacques why we strip
data = json.loads(data);
res = self.execute(data);
#self.wfile.write(b"GET-ed Successfully!");
self.wfile.write(res);
return;
def execute(self,params):
print("GOT ",params);
#default values
action=None;
value=None;
extras=None;
try:
action = params["action"];
value = params["value"];
extras = params["extras"]
except KeyError:
pass;
ret = None; #To be returned at end
if action == "tunnel":
if "tunnel_obj" not in self.data.keys():
self.data["tunnel_obj"] = tunnel.Tunnel();
tunnel_obj = self.data["tunnel_obj"]; #retrieve tunnel object
url = value;
if params["sub_action"] == "get":
ret = tunnel_obj.get(url);
elif params["sub_action"] == "post":
ret = tunnel_obj.post(url);
elif params["sub_action"] == "session":
ret = tunnel_obj.session(url);
elif params["sub_action"] == "login session":
ret = ret = tunnel_obj.login_session(url,extras);
self.data["tunnel_obj"] = tunnel_obj; #store tunnel object
elif action == "keylogger":
if "logger_obj" not in self.data.keys():
self.data["logger_obj"] = logger.LoggerThread();
logger_obj = self.data["logger_obj"]; #retrieve tunnel object
sub_act = params["sub_action"];
if sub_act == "start":
ret = logger_obj.start();
elif sub_act == "stop":
ret = logger_obj.stop();
elif sub_act == "get":
ret = logger_obj.get_file();
self.data["logger_obj"] = logger_obj;
return(ret);
if __name__ == "__main__":
server_addr = (ADDRESS,PORT);
request_handler = CustomRequestHandler;
http_daemon = hs.HTTPServer(server_addr,request_handler);
try:
print("Starting server on port "+str(PORT));
http_daemon.serve_forever();
except KeyboardInterrupt:
print("\n\nKilling Server...");
exit();
|
from datetime import datetime
from elasticsearch import Elasticsearch
import redis
tskey = "pytsbench"
es = Elasticsearch(["localhost:9200"])
elastic_5_0 = True
client = redis.Redis()
rsize = int(client.info("memory")['used_memory'])
num_entries = 3
def info():
'''
Data:
storage_used
pages_visited
usage_time
unique ids:
user_id
device_id
Metadata:
username
email
account
'''
def delete_all():
try:
es.indices.delete(tskey)
except:
pass
for key in client.execute_command('KEYS', tskey + "*"):
client.execute_command('DEL', key)
global rsize
rsize = int(client.info("memory")['used_memory'])
def get_timestamp(day, hour, minute):
return "2016:01:%.2d %.2d:%.2d:00" % (day, hour, minute)
def add_redis_entry(i, day, hour):
timestamp = get_timestamp(1, 0, 0)
user_id = "user_id_%d" % (i)
dev_id = "device_id_%d" % (i)
key = "%s_%s_%s" % (tskey, user_id, dev_id)
if day == 1 and hour == 0:
client.hmset(key, {
"user_id": user_id,
"device_id": dev_id,
"username": "username%d" % (i),
"email": "username%d@timeseries.com" % (i),
"account": "standard"
})
client.execute_command('TS.CREATE', key + "_storage_used", "hour", timestamp)
client.execute_command('TS.CREATE', key + "_pages_visited", "hour", timestamp)
client.execute_command('TS.CREATE', key + "_usage_time", "hour", timestamp)
for e in range(1, num_entries + 1):
timestamp = get_timestamp(day, hour, e)
client.execute_command('TS.INSERT', key + "_storage_used", str(i * 1.1 * e), timestamp)
client.execute_command('TS.INSERT', key + "_pages_visited", str(i * e), timestamp)
client.execute_command('TS.INSERT', key + "_usage_time", str(i * 0.2 * e), timestamp)
def add_es_entry(i, day, hour):
timestamp = get_timestamp(day, hour, 0)
prefix = "params." if elastic_5_0 else ""
user_id = "user_id_%d" % (i)
dev_id = "device_id_%d" % (i)
key = "%s_%s" % (user_id, dev_id)
script = "ctx._source.count += 1; "
script += "ctx._source.storage_used += %sstorage_used; " % (prefix)
script += "ctx._source.pages_visited += %spages_visited; " % (prefix)
script += "ctx._source.usage_time += %susage_time; " % (prefix)
# Can't really perform the aggregation in elastic. its too slow.
#for e in range(1, num_entries + 1):
for e in range(1, 2):
params = {
"storage_used": i * 1.1 * e,
"pages_visited": i * e,
"usage_time": i * 0.2 * e
}
upsert = {
"user_id": user_id,
"device_id": dev_id,
"username": "username%d" % (i),
"email": "username%d@timeseries.com" % (i),
"account": "standard",
"count": 1
}
upsert.update(params)
script_1_7 = {
"script": script,
"params": params,
"upsert": upsert
}
script_5_0 = {
"script": {
"inline": script,
"lang": "painless",
"params": params
},
"upsert": upsert
}
body = script_5_0 if elastic_5_0 else script_1_7
es.update(index=tskey, doc_type=tskey, id=key + "_" + timestamp, body=body)
def add_es_entry_5_0(i, day, hour):
add_es_entry(i, day, hour, True)
def sizeof_fmt(num, suffix='b'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def get_redis_memory_size():
sz = int(client.info("memory")['used_memory']) - rsize
return "size: %s (%d)" % (sizeof_fmt(sz), sz)
def get_redis_size(thekey = tskey):
redis_size = 0
for key in client.execute_command('KEYS', thekey + "*"):
redis_size += client.execute_command('DEBUG OBJECT', key)['serializedlength']
return "size: %s (%d)" % (sizeof_fmt(redis_size), redis_size)
def get_es_size():
ind = 0
try:
ret = es.indices.stats(tskey)
ind = ret['_all']['total']['store']['size_in_bytes']
except:
pass
return "size: %s (%d)" % (sizeof_fmt(ind), ind)
def run_for_all(size, cb, msg, size_cb):
start = datetime.now().replace(microsecond=0)
for i in range(1, size + 1):
for day in range(1, 31):
for hour in range(0, 24):
cb(i, day, hour)
end = datetime.now().replace(microsecond=0)
print msg, (end - start), size_cb()
def do_benchmark(size):
print "delete data"
delete_all()
print "----------------------------------------"
print "benchmark size: ", size, "number of calls: ", num_entries * size * 24 * 30
run_for_all(size, add_redis_entry, "redis ", get_redis_memory_size)
#run_for_all(size, add_redis_hset_entry, "hset ", get_redis_hset_size)
#run_for_all(size, add_redis_list_entry, "list ", get_redis_list_size)
run_for_all(size, add_es_entry, "elastic ", get_es_size)
#run_for_all(size, add_es_entry_5_0, "es5_0", get_es_size_5_0)
print "----------------------------------------"
do_benchmark(1)
do_benchmark(10)
|
class autoconf(Wig):
git_uri = 'git://git.sv.gnu.org/autoconf'
tarball_uri = 'http://ftp.gnu.org/gnu/autoconf/autoconf-{RELEASE_VERSION}.tar.gz'
last_release_version = '2.69'
|
"""
CombinatoricalMediaSimulations.py
Simulates all possible minimal media compositions consisting of unique carbon,
nitrogen, sulfate, phosphate sources. If a single compound provides multiple
elemental sources it serves a the solely source for them. # TODO: Reformulate this
KO analyses of genes and reactions are optional.
TODO:
1. Yaml has to be replaced with JSON (since 2.6 JSON is part of the stdlib)
Created by Nikolaus Sonnenschein on 2010-11-03.
Copyright (c) 2010 Jacobs University of Bremen. All rights reserved.
"""
import sys
import time
import textwrap
import Queue
import yaml
if sys.argv[1] != 'client':
from ifba.storage.hdf5storage import SimulationDB, h5Container
from ifba.distributedFBA.networking import Server, Client
from ifba.distributedFBA.concurrency import GeneratorInputClient, h5OutputClient
from ifba.GlpkWrap.util import ImportCplex
from ifba.GlpkWrap.metabolism import Metabolism
from ifba.GlpkWrap.fluxdist import FBAsimulationResult
if sys.argv[1] != 'client':
from RandomMediaSimulations import generateStorageObject
from ifba.glpki.glpki import glp_delete_prob
def generateStorageObject(path, lp):
"""docstring for generateStorageObject"""
return SimulationDB(h5Container(path, lp))
def readSources(path):
f = open(path, 'r')
sources = list()
for line in f:
line = line.replace('\n', '')
sources.append(line.split('\t'))
return [tuple(sub) for sub in sources]
def generateCombinatoricalSets(*sources):
tmpString1 = " ".join(["for e%d in sources[%d]" % (i, i) for i in range(len(sources))])
tmpString2 = "("+ ", ".join(["e%d" % i for i in range(len(sources))]) + ")"
funcString = "gen = ("+tmpString2+" "+tmpString1+")"
codeBlock = compile(funcString, "blah", "exec")
exec(codeBlock, locals())
return gen
def generateCombinatoricalMedia(uptake, setGenerator):
for s in setGenerator:
yield dict([('R("'+elem+'_Transp")', (0, uptake)) for elem in s])
def readSourcesTable(path):
"""Reads a table of the form:
source carbon nitrogen phosphor sulfur
carb1 1 1 0 1"""
auxFunc = lambda x: (x[0], int(x[1]), int(x[2]), int(x[3]), int(x[4]))
return [auxFunc(line.rstrip().split('\t')) for line in open(path)]
def possibleSigs(sig):
return [(elem1, elem2, elem3, elem4) \
for elem1 in [0, 1][0:abs(sig[0] - 1)+1] \
for elem2 in [0, 1][0:abs(sig[1] - 1)+1] \
for elem3 in [0, 1][0:abs(sig[2] - 1)+1] \
for elem4 in [0, 1][0:abs(sig[3] - 1)+1]][1:]
def _DirtyHack(sourceTable, seedSources):
sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
combiDict = dict([(k, list()) for k in [(elem1, elem2, elem3, elem4) for elem1 in [0, 1] for elem2 in [0, 1] for elem3 in [0, 1] for elem4 in [0, 1]]])
for row in sourceTable:
combiDict[tuple(row[1:])].append(row[0])
for i, carb in enumerate(seedSources):
sig = sources2sig[carb]
# print carb, sig
# print "possible other signatures:"
nextSigs = possibleSigs(sig)
if nextSigs == []:
yield [carb]
for sig2 in nextSigs:
potentialSources = combiDict[sig2]
# print potentialSources
tmp1 = [[carb, s] for s in potentialSources]
# print '\t', sig2
combSig1 = tuple([sig[i] + sig2[i] for i in range(4)])
# print "\tpossible other signatures2:"
nextSigs = possibleSigs(combSig1)
if nextSigs == []:
for elem in tmp1:
yield elem
for sig3 in nextSigs:
potentialSources = combiDict[sig3]
# print potentialSources
# if potentialSources == []:
# for elem in tmp1:
# yield elem
tmp2 = [elem + [s] for elem in tmp1 for s in potentialSources]
# print '\t\t', sig3
combSig2 = tuple([combSig1[i] + sig3[i] for i in range(4)])
# print "\t\tpossible other signatures2:"
nextSigs = possibleSigs(combSig2)
if nextSigs == []:
for elem in tmp2:
yield elem
for sig4 in nextSigs:
potentialSources = combiDict[sig4]
# print "4", potentialSources
# if potentialSources == []:
# for elem in tmp2:
# yield elem
# print 3*'\t', sig4
# print textwrap.fill(str(tmp2), initial_indent=3*'\t', subsequent_indent=3*'\t')
for s in potentialSources:
for elem in tmp2:
yield elem + [s]
def combinatoricalSources(sourceTable, seedSources):
mediaGenerator = _DirtyHack(sourceTable, seedSources)
stuff = set()
for med in mediaGenerator:
# print 4*'\t', med
stuff.add(tuple(sorted(med)))
# print len(stuff)
return stuff
def testCombinatoricalSources(sourceTable, carbonSources):
import numpy
stuff = combinatoricalSources(sourceTable, carbonSources)
stuff3 = set()
for elem in list(stuff):
tot = numpy.array([sources2sig[s] for s in elem]).sum()
if tot != 4:
print elem
print [sources2sig[s] for s in elem]
stuff3.add(tot)
print stuff3
def generateSolveMediumObject(path2model="", medium={}, include={}, objective=None, optimizationRoutine='pFBA', koQ=True, *args, **kwargs):
return SolveMedium(path2model=path2model, medium=medium, include=include, objective=objective, optimizationRoutine=optimizationRoutine, koQ=koQ, *args, **kwargs)
class SolveMedium(object):
def __init__(self, path2model="", medium={}, include={}, objective=None, optimizationRoutine='pFBA', koQ=True, *args, **kwargs):
self.koQ = koQ
self.optimizationRoutine = optimizationRoutine
self.objective = objective
self.lp = Metabolism(ImportCplex(path2model))
self.path2model = path2model
if objective:
self.lp.setReactionObjective(self.objective)
self.preMed = dict([(r, (-1000., 0)) for r in self.lp.getTransporters()])
self.preMed.update(include)
self.lp.modifyColumnBounds(self.preMed)
self.lp.modifyColumnBounds(dict([(r, (0., 1000.)) for r in self.lp.getReactions()]))
self.lp.modifyColumnBounds(medium)
self.lp.eraseHistory()
def run(self, *args, **kwargs):
"""docstring for run"""
f = getattr(self.lp, self.optimizationRoutine)()
knockoutEffects = dict()
wt = f[self.objective]
if self.koQ and wt > 0.:
knockoutEffects = self.lp.singleKoAnalysis(f.getActiveReactions())
for k in knockoutEffects:
knockoutEffects[k] = knockoutEffects[k] / wt
self.lp.undo()
return FBAsimulationResult(f, knockoutEffects, self.lp.getColumnBounds(),
self.lp.getObjectiveFunction(),
time.time(), self.path2model, "Test")
def __del__(self):
"""docstring for __del__"""
glp_delete_prob(self.lp.lp) # FIXME this is a dirty hack
del self
def solveMedium(path2model="", medium={}, include={}, objective=None, optimizationRoutine='pFBA', koQ=True, *args, **kwargs):
"""doc"""
lp = Metabolism(ImportCplex(path2model))
if objective:
lp.setReactionObjective(objective)
preMed = dict([(r, (-1000., 0)) for r in lp.getTransporters()])
preMed.update(include)
lp.modifyColumnBounds(preMed)
lp.modifyColumnBounds(medium)
lp.modifyColumnBounds(dict([(r, (0., 1000.)) for r in lp.getReactions()]))
lp.eraseHistory()
# print lp.cplex()
f = lp.pFBA()
# simulationStorage = generateStorageObject(outputfile, lp)
knockoutEffects = dict()
wt = f[objective]
print wt
if koQ and wt > 0.:
knockoutEffects = lp.singleKoAnalysis(f.getActiveReactions())
for k in knockoutEffects:
knockoutEffects[k] = knockoutEffects[k] / wt
lp.initialize()
# print knockoutEffects
return FBAsimulationResult(f, knockoutEffects, lp.getColumnBounds(),
lp.getObjectiveFunction(),
time.time(), path2model, "Test")
def basicFunctionality(outputfile, configPath):
config = yaml.load(open(configPath))
descr = yaml.dump(config)
print descr
config['descr'] = descr
sourceTable = readSourcesTable(config["sourcesPath"])
carbonSources = [elem[0] for elem in sourceTable if elem[1] == 1]
sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
combSources = list(combinatoricalSources(sourceTable, carbonSources))
gen = generateCombinatoricalMedia(config["uptake"], combSources)
run = 0
for medium in gen:
print "Run:", run
run += 1
solveMedium(medium=medium,**config)
def client(serverip):
"""docstring for client"""
counter = 0
client = Client(task=generateSolveMediumObject, host=serverip)
while True:
counter = counter + 1
print counter
client.run()
def stub(gen, config):
for elem in gen:
config["medium"] = elem
yield config
def server(outputfile='test.h5', configPath='parameters.yaml'):
"""Server"""
config = yaml.load(open(configPath))
descr = yaml.dump(config)
print descr
config['descr'] = descr
sourceTable = readSourcesTable(config["sourcesPath"])
carbonSources = [elem[0] for elem in sourceTable if elem[1] == 1]
sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
combSources = list(combinatoricalSources(sourceTable, carbonSources))
gen = generateCombinatoricalMedia(config["uptake"], combSources)
lp = Metabolism(ImportCplex(config["path2model"]))
simulationStorage = generateStorageObject(outputfile, lp)
inputQueue = Queue.Queue(20)
outputQueue = Queue.Queue(20)
# gen2 = (config["medium"] = elem for elem in gen)
gen2 = stub(gen, config)
t1 = GeneratorInputClient(inputQueue, gen2)
t1.start()
time.sleep(1)
t2 = h5OutputClient(outputQueue, simulationStorage)
t2.start()
time.sleep(1)
s = Server(inputQueue=inputQueue, outputQueue=outputQueue, host="localhost")
print s
s.run()
if __name__ == '__main__':
# print possibleSigs((0,0,1,1))
#
# sourcePath = '/Users/niko/arbeit/Data/SBMLmodels/iAF1260/biologValidatedSourcesWithElementalComposition.tsv'
# sourceTable = readSourcesTable(sourcePath)
# carbonSources = [elem[0] for elem in sourceTable if elem[1] == 1]
# print carbonSources
# sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
# testCombinatoricalSources(sourceTable, carbonSources)
# combSources = list(combinatoricalSources(sourceTable, carbonSources))
# print list(generateCombinatoricalMedia(20., combSources))[0:10]
# include = dict([['R("R_ATPM")', [8.39, 8.39]],
# ['R("Mo2b_Transp")', [0, 18.5]],
# ['R("Mco2b_Transp")', [-1000, 1000]],
# ['R("Mh2ob_Transp")', [-1000, 1000]],
# ['R("Mhb_Transp")', [-1000, 1000]],
# ['R("Mna1b_Transp")', [-1000, 1000]],
# ['R("Mkb_Transp")', [-1000, 1000]],
# ['R("Mca2b_Transp")', [-1000, 1000]],
# ['R("Mcu2b_Transp")', [-1000, 1000]],
# ['R("Mmg2b_Transp")', [-1000, 1000]],
# ['R("Mzn2b_Transp")', [-1000, 1000]],
# ['R("Mmobdb_Transp")', [-1000, 1000]],
# ['R("Mfe2b_Transp")', [-1000, 1000]],
# ['R("Mfe3b_Transp")', [-1000, 1000]],
# ['R("Mcobalt2b_Transp")', [-1000, 1000]],
# ['R("Mmn2b_Transp")', [-1000, 1000]],
# ['R("Mclb_Transp")', [-1000, 1000]],
# ['R("R_CAT")', [0, 0]],
# ['R("R_SPODM")', [0, 0]],
# ['R("R_SPODMpp")', [0, 0]],
# ['R("R_FHL")', [0, 0]]])
# gen = generateCombinatoricalMedia(20., combSources)
# for i in range(10):
# medium = gen.next()
# print medium
# solveMedium('../models/iAF1260templateMinMax.lp', medium=medium, include=include, objective='R("R_Ec_biomass_iAF1260_core_59p81M")')
try:
sys.argv[1]
except IndexError:
sys.argv.append('server')
sys.argv.append('test.h5')
usage = """Usage:
python RandomMediaSimulations.py standalone storagefile configfile --> standalone mode
python RandomMediaSimulations.py server storagefile configfile --> server mode
python RandomMediaSimulations.py client serverip --> client mode"""
try:
if sys.argv[1] == 'standalone':
basicFunctionality(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'server':
server(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'client':
client(sys.argv[2])
else:
print usage
except IndexError:
print usage
|
import datetime
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('desecapi', '0009_token_allowed_subnets'),
]
operations = [
migrations.AddField(
model_name='token',
name='max_age',
field=models.DurationField(default=None, null=True, validators=[django.core.validators.MinValueValidator(datetime.timedelta(0))]),
),
migrations.AddField(
model_name='token',
name='max_unused_period',
field=models.DurationField(default=None, null=True, validators=[django.core.validators.MinValueValidator(datetime.timedelta(0))]),
),
]
|
import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickprefix", parent_name="heatmap.colorbar", **kwargs
):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
"""Define Rotest's core models.
The Django infrastructure expects a models.py file containing all the models
definitions for each application. This folder is a workaround used in order
to separate the different core application models into different files
"""
from .run_data import RunData
from .case_data import CaseData
from .suite_data import SuiteData
from .signature import SignatureData
from .general_data import GeneralData
|
def tnuml(n):
o = []
for _ in n:
_ = float(_)
o.append(_)
return o
def configr(vars, filename, section):
import configparser
c = configparser.ConfigParser()
c.read(filename)
r = []
for _ in vars:
a = c.get(section, _)
r.append(a)
return r
def writer (fname, data, sep):
z = open(fname, 'w')
for x in data:
z.write (x)
z.write (sep)
z.close()
def po (number, pof):
b = number ** pof
return b
def factors (number):
current, ao, nums = 0, 0, []
while current < number:
ao = ao + 1
current = number % ao
if current == 0:
nums.append(ao)
return nums
def sqroot (number):
fac, f = factors (number), ''
for x in fac:
a = x * x
if a == number:
return (x)
f = True
if f != True:
return "No Square Root Found"
def lseq(ls1, ls2, ls3, ls4):
if ls2 - ls1 == ls4 - ls3:
lsd1 = ls2 - ls1 # common difference
lsc = lsd1 - ls1 # constant e.g. Tn = xn + c
lsc = lsc * -1
# Fixes Bug
if lsc > 0:
lsc = "+" + str(lsc)
if lsc == 0: # added to prevent problem where 0 is neither '+' or '-'. So a sequence: 1;2;3;4 -> Tn = n0
return ("Tn = %sn" % (lsd1))
else:
return ("Tn = %sn" % (lsd1) + ("%s" % (lsc)))
elif ls2 - ls1 != ls4 - ls3:
return ("This is not a Linear Equation!")
def qseq (qs1, qs2, qs3, qs4):
d1 = qs3 - qs2
d2 = qs2 - qs1
d3 = qs4 - qs3
d4 = qs3 - qs2
d5 = d1 - d2
# checking if 2nd difference is constant
if d1 - d2 == d3 - d4:
a = d5 / 2 # a = 2nd difference/2
b = qs2 - qs1 - 3 * a # b = (T2 - T1)-3a
c = qs1 - a - b # c = T1-(a+b)
return "Tn = %sn² + %sn + %s" % ( a, b, c)
# use your factor genorator to add (x+y)(x+y) format
else: # this line no longer works: enter: 1; 2; 3; 4
return "This is not a quadratic sequence!"
def lineareq(numbers):
ai = numbers[3]
bi = numbers[1] * -1
ci = numbers[2] * -1
di = numbers[0]
# Calculate the Determinent of the inverse
de = ai * di - bi * ci
# Calculate the final answer, for easy eye viewing
xo = ai * numbers[4]
xoo = bi * numbers[5]
ans1 = xo + xoo
xo = ci * numbers[4]
xoo = di * numbers[5]
ans2 = xo + xoo
# Finish Equation
ans1 = ans1 / de
ans2 = ans2 / de
return ans1, ans2
|
from typing import (
Callable,
Iterator,
TypeVar,
)
def exists(it: Iterator) -> bool:
try:
next(it)
except StopIteration:
return False
return True
T = TypeVar('T')
def and_(*args: Callable[[T], bool]) -> Callable[[T], bool]:
def _inner(x: T) -> bool:
for fn in args:
if not fn(x):
return False
return True
return _inner
def or_(*args: Callable[[T], bool]) -> Callable[[T], bool]:
def _inner(x: T) -> bool:
for fn in args:
if fn(x):
return True
return False
return _inner
def not_(fn: Callable[[T], bool]) -> Callable[[T], bool]:
def _inner(x: T) -> bool:
return not fn(x)
return _inner
|
import os, sys
module = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, module)
from netkiller.kubernetes import *
namespace = Namespace()
namespace.metadata.name('development')
namespace.metadata.namespace('development')
service = Service()
service.metadata().name('nginx')
service.metadata().namespace('development')
service.spec().selector({'app': 'nginx'})
service.spec().type('NodePort')
service.spec().ports([{
'name': 'http',
'protocol': 'TCP',
'port': 80,
'targetPort': 80
}])
deployment = Deployment()
deployment.apiVersion('apiVersion: apps/v1')
deployment.metadata().name('nginx').labels({'app': 'nginx'}).namespace('development')
deployment.spec().replicas(2)
deployment.spec().selector({'matchLabels': {'app': 'nginx'}})
deployment.spec().template().metadata().labels({'app': 'nginx'})
deployment.spec().template().spec().containers().name('nginx').image(
'nginx:latest').ports([{
'containerPort': 80
}])
ingress = Ingress()
ingress.apiVersion('networking.k8s.io/v1')
ingress.metadata().name('nginx')
ingress.metadata().namespace('development')
ingress.metadata().annotations({'ingress.kubernetes.io/ssl-redirect': "false"})
ingress.spec().rules([{
# 'host': 'www.netkiller.cn',
'http': {
'paths': [{
'path': '/',
'pathType': 'Prefix',
'backend': {
'service': {
'name': 'nginx',
'port': {
'number': 80
}
}
}
}]
}
}])
compose = Compose('development')
compose.add(namespace)
compose.add(service)
compose.add(deployment)
compose.add(ingress)
kubernetes = Kubernetes()
kubernetes.compose(compose)
kubernetes.main()
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(frame,13)
#result is dilated for marking the corners, not important
'''
Filtering the white
'''
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
#In testing!!!
sensitivity = 3
lower_white = np.array([0,0,255-sensitivity])
upper_white = np.array([255,sensitivity,255])
mask = cv2.inRange(hsv, lower_white, upper_white)
# Frame is target, res is output
res = cv2.bitwise_and(frame,frame, mask= mask)
#Grayscale
draw = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(draw,60,0)
#Perhaps gray, then edges?
# Finding contours from *edges*
contours,h = cv2.findContours(edges,1,2)
#print(contours)
#In the form of (contours, eplison, True[?])
num_squares = 0
for cnt in contours:
num_squares += 1
epsilon = 0.1*cv2.arcLength(cnt,True)
approx = cnt #cv2.approxPolyDP(cnt,epsilon,True)
# Approximating the rectangles basically is bad with the amount of light filtering going on.
#cv2.drawContours(draw, contours, -1, (0,255,0), 3)
rect = cv2.minAreaRect(approx)
tilt = int(rect[2])
print("Rect[2] is currently at: ", tilt)
if (rect[2] > -100) & (rect[2] < -70): #Looking for squares that are only straight
#Low point High point
#print("Passed!")
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
cv2.drawContours(draw,[box],0,(0,0,255),2)
cv2.drawContours(frame,[box],0,(0,0,255),2)
print("The center of square " ,num_squares , "Is at: " , box[1])
# Threshold for an optimal value, it may vary depending on the image.
# Display the resulting frame
cv2.imshow('blur',blur)
cv2.imshow('Draw',draw)
cv2.imshow('Edges',edges)
cv2.imshow('frame', frame)
#cv2.imshow('grey',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import math
import warnings
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize
from scipy.special import comb, factorial
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... ['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections * 2, np.complex128)
p = np.empty(n_sections * 2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2 * section:2 * (section + 1)] = zpk[0]
p[2 * section:2 * (section + 1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2 * fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo ** degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw / 2
p_lp = p * bw / 2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp ** 2 - wo ** 2),
z_lp - sqrt(z_lp ** 2 - wo ** 2)))
p_bp = concatenate((p_lp + sqrt(p_lp ** 2 - wo ** 2),
p_lp - sqrt(p_lp ** 2 - wo ** 2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw ** degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw / 2) / z
p_hp = (bw / 2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp ** 2 - wo ** 2),
z_hp - sqrt(z_hp ** 2 - wo ** 2)))
p_bs = concatenate((p_hp + sqrt(p_hp ** 2 - wo ** 2),
p_hp - sqrt(p_hp ** 2 - wo ** 2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j * wo * ones(degree))
z_bs = append(z_bs, -1j * wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_' + norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N + 1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10 ** (-rp / 20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N + 1, N, 2)
theta = pi * m / (2 * N)
p = -sinh(mu + 1j * theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N + 1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N + 1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N + 1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10 ** (-rp / 20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2 * n - k, n)
den = 2 ** (n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1 + 0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n + 1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j * y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1 / (x[k] - x[k + 1:]))
beta[k] += np.sum(1 / (x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N + 0.5, 1 / x)
# First derivative of above
def fp(x):
return (special.kve(N - 0.5, 1 / x) / (2 * x ** 2) -
special.kve(N + 0.5, 1 / x) / (x ** 2) +
special.kve(N + 1.5, 1 / x) / (2 * x ** 2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j * w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1 / np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1 / _bessel_zeros(N)
a_last = _falling_factorial(2 * N, N) // 2 ** N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor ** -N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10 ** (-math.log10(a_last) / N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0 / Q
# Normalize inputs
bw = bw * np.pi
w0 = w0 * np.pi
# Compute -3dB atenuation
gb = 1 / np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0 - gb ** 2.0) / gb) * np.tan(bw / 2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb / np.sqrt(1.0 - gb ** 2.0)) * np.tan(bw / 2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0 / (1.0 + beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain * np.array([1.0, -2.0 * np.cos(w0), 1.0])
else:
b = (1.0 - gain) * np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0 * gain * np.cos(w0), (2.0 * gain - 1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.