prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from modeltranslation.translator import translator, TranslationOptions
from nsms.text.models import *
from django.utils.translation import ugettext as _
from django.utils.translation import get_language as _get_language
from modeltranslation import utils
class TextTranslationOptions(TranslationOptions):
fields = | ('text',)
| translator.register(Text, TextTranslationOptions)
# need to translate something for django translations to kick in
_("Something to trigger localizations")
# monkey patch a version of get_language that isn't broken
def get_language():
lang = _get_language()
return lang
utils.get_language = get_language
|
import logging
from serial_grabber.reader import MessageVerifier
class XBeeMessageVerifier(MessageVerifier):
logger = logging.getLogger("MessageVerifier")
def verify_message(self, transaction):
| try:
data = transaction.split("\n")
if int(data[-2]) == len("\n".join(data[1:-2])):
return True, "OK"
else:
self.logger.error("Reported length: %s, Actual length: %s"%(int(data[-2]), len("\n".join(data[1:-2]))))
raise ValueError()
except ValueError, e:
self.logger.error("Could not convert %s to an integer."%data[-2])
return | False, "NA"
|
nate_lines(self, x_page_offsets, page_y_top):
""" Step three go through the lines and put each in page(s) """
for box1 in self.boxes:
if not box1.line_to:
continue
line = box1.line_to
pages = [box1.page.y_page_num]
end = line.start + line.end
x_page = box1.page.x_page_num
start_y_page = end[0].page.y_page_num
end_y_page = end[0].page.y_page_num
for box in end:
y_page = box.page.y_page_num
if y_page not in pages:
if (x_page, y_page) not in self.__pages:
#Add the new page into the dictionary
self.__new_page(x_page, y_page,
x_page_offsets[x_page],
page_y_top[y_page])
self.__pages[x_page, y_page].add_line(box1.line_to)
pages.append(y_page)
if y_page < start_y_page:
start_y_page = y_page
if y_page > end_y_page:
end_y_page = y_page
#if len(end) = 2 & end[0].y_page = 0 & end[1].y_page = 4
#the line will not print on y_pages 1,2,3. Fix that here.
#x_page = start_x_page
for y_page in range(start_y_page, end_y_page+1):
if y_page not in pages:
if (x_page, y_page) not in self.__pages:
#Add the new page into the dictionary
self.__new_page(x_page, y_page,
x_page_offsets[x_page],
page_y_top[y_page])
self.__pages[x_page, y_page].add_line(box1.line_to)
def __paginate_title(self, x_page_offsets):
#step four work with the title
if self.title.boxstr == "None":
return
#x_page_offsets[page] tells me the widths I can use
if len(x_page_offsets) > 1:
if self.title.mark_text and not self.title.text:
self.title.width = self.doc.get_usable_width()
self.__pages[list(self.__pages.keys())[0]].add_box(self.title)
return
title_list = self.title.text.split(" ")
title_font = self.__get_font(self.title)
#space_width = PT2CM(self.doc.string_width(title_font," "))
list_title = [title_list.pop(0)]
while len(title_list):
tmp = list_title[-1] + " " + title_list[0]
if PT2CM(self.doc.string_width(title_font, tmp)) > \
x_page_offsets[1]:
list_title.append("")
if list_title[-1] != "":
list_title[-1] += " "
list_title[-1] += title_list.pop(0)
start_page = int((len(x_page_offsets) - len(list_title)) / 2)
for tmp in range(start_page):
list_title.insert(0, "")
list_title.append("")
#one extra for security. doesn't hurt.
list_title.append("")
x_page = 0
for title in list_title:
if title == "":
x_page += 1
continue
if (x_page, 0) not in self.__pages:
#Add the new page into the dictionary
self.__new_page(x_page, 0, x_page_offsets[1], 0)
title_part = TitleBox(self.doc, self.title.boxstr)
title_part.text = list_title[x_page]
title_part.width = x_page_offsets[1]
#Add the box into the page
self.__pages[x_page, 0].add_box(title_part)
x_page = x_page + 1
else:
self.title.width = self.doc.get_usable_width()
self.__pages[0, 0].add_box(self.title)
def __paginate(self, colsperpage):
""" take the boxes on the canvas and put them into separate pages.
The boxes need to be sorted by y_cm """
liloffset = self.report_opts.littleoffset
self.__pages = {}
x_page_offsets = self.__paginate_x_offsets(colsperpage)
page_y_top = self.__paginate_y_pages(colsperpage, x_page_offsets)
if self.note is not None:
self.__paginate_note(x_page_offsets, page_y_top)
self.__paginate_lines(x_page_offsets, page_y_top)
self.__paginate_title(x_page_offsets)
def paginate(self, colsperpage, one_page_report):
""" self.boxes must be sorted by box.y_cm for this to work. """
if one_page_report:
#self.canvas.add_box(self.canvas.title)
title_part = TitleBox(self.doc, self.title.boxstr)
title_part.text = self.title.text
title_part.width = self.doc.get_usable_width()
self.add_box(title_part)
if self.note is not None:
self.note.set_on_page(self)
self.boxes.insert(0, self.note) |
self.note.doc = self.doc
self.note.page = self
else:
self.__paginate(colsperpage)
#------------------------------------------------------------------------
#
# Class Box_Base
#
#------------------------------------------------------------------------
class BoxBase:
""" boxes are always in/on a Page
Needed to print are: boxstr, text, x_cm, y_cm, width, height
| """
def __init__(self):
self.page = None
#'None' will cause an error. Sub-classes will init
self.boxstr = "None"
self.text = ""
#level requires ...
# (# - which column am I in (zero based)
# ,# - Am I a (0)direct descendant/ancestor or (>0)other
# , ) - anything else the report needs to run
self.__mark = None #Database person object
self.level = (0,0)
self.x_cm = 0.0
self.y_cm = 0.0
self.width = 0.0
self.height = 0.0
self.line_to = None
#if text in TOC needs to be different from text, set mark_text
self.mark_text = None
def scale(self, scale_amount):
""" Scale the amounts """
self.x_cm *= scale_amount
self.y_cm *= scale_amount
self.width *= scale_amount
self.height *= scale_amount
def add_mark(self, database, person):
self.__mark = utils.get_person_mark(database, person)
def display(self):
""" display the box accounting for page x, y offsets
Ignore any box with 'None' is boxstr """
if self.boxstr == "None":
return
doc = self.page.canvas.doc
report_opts = self.page.canvas.report_opts
text = '\n'.join(self.text)
xbegin = self.x_cm - self.page.page_x_offset
ybegin = self.y_cm - self.page.page_y_offset
doc.draw_box(self.boxstr,
text,
xbegin, ybegin,
self.width, self.height, self.__mark)
#I am responsible for my own lines. Do them here.
if self.line_to:
#draw my line out here.
self.line_to.display(self.page)
if self.page.x_page_num > 0 and self.level[1] == 0 and \
xbegin < report_opts.littleoffset*2:
#I am a child on the first column
yme = ybegin + self.height/2
doc.draw_line(report_opts.line_str, 0, yme, xbegin, yme)
class TitleNoDisplay(BoxBase):
"""
Holds information about the Title that will print on a TOC
and NOT on the report
"""
def __init__(self, doc, boxstr):
""" initialize the title box """
BoxBase.__init__(self)
self.doc = doc
self.boxstr = boxstr
def set_box_height_width(self):
self.width = self.height = 0
def display(self):
""" display the title box. """
#Set up the Table of Contents here
if self.mark_text is None:
mark = IndexMark(self.text, INDEX_TYPE_TOC, 1)
else:
mark = IndexMark(self.mark_text, INDEX_TYPE_TOC, 1)
self.doc.center_text(self.boxstr, '', 0, -100, mark)
cla |
#!/usr/bin/python
| #coding=utf-8
'''
@ | author: sheng
@license:
'''
SPELL=u'shuǐdào'
CN=u'水道'
NAME=u'shuidao34'
CHANNEL='stomach'
CHANNEL_FULLNAME='StomachChannelofFoot-Yangming'
SEQ='ST28'
if __name__ == '__main__':
pass
|
#!/usr/bin/env python
#
# Beautiful Capi generates beautiful C API wrappers for your C++ classes
# Copyright (C) 2015 Petr Petrovich Petrov
#
# This file is part of Beautiful Capi.
#
# Beautiful Capi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Beautiful Capi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Beautiful Capi. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy
from Parser import TClass, TNamespace, TBeautifulCapiRoot
class ExtensionSemanticProcessor(object):
def __init__(self, root_node: TBeautifulCapiRoot):
self.root_node = root_node
self.class_stack = []
def process_class(self, cur_class: TClass, cur_namespace: TNamespace):
self.class_stack.append(cur_class.name)
for lifecycle_extension in cur_class.lifecycle_extensions:
new_extension_class = deepcopy(cur_class)
new_extension_class.name = lifecycle_extension.name
new_extension_class.lifecycle = lifecycle_extension.lifecycle
new_extension_class.lifecycle_filled = True
new_extension_class.wrap_name = lifecycle_extension.wrap_name
new_extension_class.wrap_name_filled = lifecycle_extension.wrap_name_filled
new_extension_class.cast_tos = deepcopy(lifecycle_extension.cast_tos)
new_extension_class.lifecycle_extensions = []
new_extension_class.lifecycle_extension = lifecycle_extension
new_extension_class.extension_base_class_name = '::'.join(self.class_stack)
new_extension_class.down_cast = lifecycle_extension.down_cast
new_extension_class.down_cast_filled = True
cur_namespace.classes.append(new_extension_class)
self.class_stack.pop()
def process_namespace(self, namespace: TNamespace):
self.class_stack.append(namespace.na | me)
for nested_namespace in namespace.namespaces:
self.process_namespace(nested_namespace)
for cur_class in namespace.classes:
self.process_class(cur_class, namespace)
self.class_stack.pop()
def process(self):
for cur_namespace in self.root_node.namespaces:
self.process_namespace(cur_namespace)
def process(root_node: TBeautifulCapiRoot):
semantic_processor = ExtensionSemanticProc | essor(root_node)
semantic_processor.process()
|
from d | jango.conf.urls import include, url
from . import views
app_name = 'todo'
urlpatterns = [
url(r'^lists/$', views.todolists, name='todo_lists'),
url(r'^lists/(?P<list | _id>[\w\d]+)/update/$', views.UpdateTodoList.as_view(), name='update_list'),
url(r'^lists/card/(?P<card_id>[\w\d]+)/$', views.cards, name='cards'),
url(r'^lists/new/$', views.CreateList.as_view(), name='new_list'),
url(r'^lists/card/new/(?P<list_id>[\w\d]+)/$', views.CreateCard.as_view(), name='new_card'),
url(r'^lists/card/(?P<card_id>[\w\d]+)/update/$', views.UpdateCard.as_view(), name='update_card'),
url(r'^lists/card/(?P<card_id>[\w\d]+)/move/$', views.move_card, name='move_card'),
url(r'^lists/card/(?P<card_id>[\w\d]+)/items/new/$', views.CreateTodoItem.as_view(), name='new_item'),
url(r'^lists/card/(?P<item_id>[\w\d]+)/items/update/$', views.UpdateTodoItem.as_view(), name='update_item'),
] |
on = False
self.vtkSelector.addEnabled = False
self.vtkSelector.removeEnabled = False
self.vtkSelector.noneEnabled = True
self.vtkSelector.setMRMLScene(slicer.mrmlScene)
self.vtkSelector.setToolTip("Select the Fiber Bundle to filter")
self.vtkSelectorFrame.layout().addWidget(self.vtkSelector)
self.outputSelectorFrame = qt.QFrame(self.parent)
self.outputSelectorFrame.setLayout(qt.QHBoxLayout())
self.parent.layout().addWidget(self.outputSelectorFrame)
self.outputSelectorLabel = qt.QLabel("Output Fiber Bundle: ", self.outputSelectorFrame)
self.outputSelectorFrame.layout().addWidget(self.outputSelectorLabel)
self.outputSelector = slicer.qMRMLNodeComboBox(self.outputSelectorFrame)
self.outputSelector.nodeTypes = ("vtkMRMLFiberBundleNode","vtkMRMLFiberBundleNode")
self.outputSelector.selectNodeUponCreation = False
self.outputSelector.addEnabled = True
self.outputSelector.removeEnabled = True
self.outputSelector.noneEnabled = True
self.outputSelector.setMRMLScene(slicer.mrmlScene)
self.outputSelector.setToolTip("Select the output Fiber Bundle")
self.outputSelectorFrame.layout().addWidget(self.outputSelector)
self.thresholdFrame = qt.QFrame(self.parent)
self.thresholdFrame.setLayout(qt.QHBoxLayout())
self.parent.layout().addWidget(self.thresholdFrame)
self.thresholdMinLabel = qt.QLabel("Min: ",self.thresholdFrame)
self.thresholdFrame.layout().addWidget(self.thresholdMinLabel)
self.thresholdMin = qt.QSpinBox(self.thresholdFrame)
self.thresholdMin.setSingleStep(1)
self.thresholdMin.setRange(0,1000)
self.thresholdMin.setValue(0)
self.thresholdMin.enabled = False
self.thresholdFrame.layout().addWidget(self.thresholdMin)
self.thresholdMaxLabel = qt.QLabel("Max: ",self.thresholdFrame)
self.thresholdFrame.layout().addWidget(self.thresholdMaxLabel)
self.thresholdMax = qt.QSpinBox(self.thresholdFrame)
self.thresholdMax.setSingleStep(1)
self.thresholdMax.setRange(0,1000)
self.thresholdMax.setValue(1)
self.thresholdMax.enabled = False
self.thresholdFrame.layout().addWidget(self.thresholdMax)
self.calculateLengthButton = qt.QPushButton("Calculate Length Stats")
self.calculateLengthButton.enabled = False
self.parent.layout().addWidget(self.calculateLengthButton)
self.applyThresholdButton = qt.QPushButton("Apply Min-Max Threshold")
self.applyThresholdButton.enabled = False
self.parent.layout().addWidget(self.applyThresholdButton)
self.parent.layout().addStretch(1)
self.calculateLengthButton.connect('clicked()', self.onCalculateLength)
self.applyThresholdButton.connect('clicked()',self.onApplyThreshold)
self.vtkSelector.connect('nodeActivated(vtkMRMLNode*)',self.onVtkSelect)
self.outputSelector.connect('nodeActivated(vtkMRMLNode*)',self.onOutputSelect)
def onVtkSelect(self, node):
self.vtkNode = node
if node != None:
self.calculateLengthButton.enabled = | True
else:
self.calculateLengthButton.enabled = False
def onOutputSelect(self, node):
self.outputNode = node
self.outputNode.CreateDefaultDisplayNodes()
self.outputPolyData = vtk.vtkPolyData()
def onCalculateLength(self):
self.inputPolyData | = self.vtkNode.GetPolyData()
points = self.inputPolyData.GetPoints()
lines = self.inputPolyData.GetLines()
lines.InitTraversal()
self.distanceTable = list()
for i in range(self.inputPolyData.GetNumberOfCells()):
fiberLength = 0
ids = vtk.vtkIdList()
lines.GetNextCell(ids)
#print(ids.GetNumberOfIds())
for j in range(ids.GetNumberOfIds() - 1):
point1 = [0,0,0]
point2 = [0,0,0]
points.GetPoint(ids.GetId(j), point1)
points.GetPoint(ids.GetId(j+1), point2)
x = point2[0] - point1[0]
y = point2[1] - point1[1]
z = point2[2] - point1[2]
step = (x*x + y*y + z*z)**.5
fiberLength += step
self.distanceTable.append(fiberLength)
min,max=self.getDistanceBound()
self.thresholdMin.setValue(min)
self.thresholdMin.enabled = True
self.thresholdMax.setValue(max+1)
self.thresholdMax.enabled = True
self.applyThresholdButton.enabled = True
layoutNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLLayoutNode')
layoutNodes.InitTraversal()
layoutNode = layoutNodes.GetNextItemAsObject()
layoutNode.SetViewArrangement(slicer.vtkMRMLLayoutNode.SlicerLayoutConventionalQuantitativeView)
chartViewNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLChartViewNode')
chartViewNodes.InitTraversal()
chartViewNode = chartViewNodes.GetNextItemAsObject()
arrayNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLDoubleArrayNode())
array = arrayNode.GetArray()
array.SetNumberOfTuples(10)
step = (max-min)/10
interMin = min
interMax = min+step
for i in range(10):
numberOfFibers = 0
for length in self.distanceTable:
if length<=interMax and length>=interMin and length<=self.thresholdMax.value and length>=self.thresholdMin.value:
numberOfFibers += 1
array.SetComponent(i,0,(interMin+interMax)/2)
array.SetComponent(i,1,numberOfFibers)
array.SetComponent(i,2,0)
interMin += step
interMax += step
chartNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLChartNode())
chartNode.AddArray("Fiber Length",arrayNode.GetID())
chartViewNode.SetChartNodeID(chartNode.GetID())
chartNode.SetProperty('default', 'title', 'Length Distribution')
chartNode.SetProperty('default', 'xAxisLabel', 'Length')
chartNode.SetProperty('default', 'yAxisLabel', 'Distribution')
chartNode.SetProperty('default', 'type', 'Bar')
def getDistanceBound(self):
max = -1
min = 100000
for length in self.distanceTable:
if length>max:
max = length
if length<min:
min = length
return min,max
def onApplyThreshold(self):
min,max = self.getDistanceBound()
newPoints = vtk.vtkPoints()
newLines = vtk.vtkCellArray()
newTensors = vtk.vtkFloatArray()
newTensors.SetNumberOfComponents(9)
newScalars = vtk.vtkFloatArray()
points = self.inputPolyData.GetPoints()
lines = self.inputPolyData.GetLines()
tensors = self.inputPolyData.GetPointData().GetTensors()
lines.InitTraversal()
newId = 0
for length in self.distanceTable:
if length<=self.thresholdMax.value and length>=self.thresholdMin.value:
ids = vtk.vtkIdList()
lines.GetNextCell(ids)
newLine = vtk.vtkPolyLine()
#print(ids.GetNumberOfIds())
newLine.GetPointIds().SetNumberOfIds(ids.GetNumberOfIds())
#print(((length-min)/(max-min))*100)
for i in range(ids.GetNumberOfIds()):
newPoints.InsertNextPoint(points.GetPoint(ids.GetId(i)))
newLine.GetPointIds().SetId(i,newId)
newScalars.InsertNextValue(((length-min)/(max-min)))
newId += 1
tensorValue = [0]*9
if(tensors != None):
for j in range(9):
tensorValue[j] = tensors.GetComponent(ids.GetId(i),j)
|
"""
Th | is package contains different `unittests <https://docs.python.org/3/library/unittest.html>`_ for the project.
Those tests help to validate difficult pieces | of the software.
"""
__author__ = 'Wuersch Marcel'
__license__ = "GPLv3" |
from __future__ import absolute_import
import platform
import datetime
import sunpy
__all__ = ['system_info']
def system_info():
"""Prints system information.
Prints information about the runtime environment that SunPy lives in.
Information about the OS, architecture, Python, and all major dependencies
are included.
The goal of this function is to provide enough information for someone
running SunPy code or replicating a bug to setup a comparible environment
to that which was originally used.
Author: `Keith Hughitt <keith.hughitt@nasa.gov>`
"""
print("==========================================================")
print(" SunPy Installation Information\n")
print(" " + datetime.datetime.utcnow().strftime("%A, %d. %B %Y %I:%M%p UT"))
print("==========================================================\n")
system = platform.system()
proc = platform.processor()
print("###########")
print(" General")
print("###########")
| # OS and architecture information
if system == "Linux":
distro = " ".join(platform.linux_distribution())
print("OS: %s (Linux %s %s)" % (distro, platform.release(), proc))
elif system == "Darwin":
print("OS: Mac OS X %s (%s)" % (platform.mac_ver()[0], proc))
elif system == "Windows":
print("OS: Windows %s %s (%s)" % (platform.release(),
| platform.version(), proc))
else:
print ("Unknown OS (%s)" % proc)
# Python version
arch = platform.architecture()[0]
print("Python: %s (%s)\n" % (platform.python_version(), arch))
# Dependencies
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = "NOT INSTALLED"
try:
from scipy import __version__ as scipy_version
except ImportError:
scipy_version = "NOT INSTALLED"
try:
from matplotlib import __version__ as matplotlib_version
except ImportError:
matplotlib_version = "NOT INSTALLED"
try:
from pyfits import __version__ as pyfits_version
except ImportError:
pyfits_version = "NOT INSTALLED"
try:
from pandas import __version__ as pandas_version
except ImportError:
pandas_version = "NOT INSTALLED"
try:
from bs4 import __version__ as bs4_version
except ImportError:
bs4_version = "NOT INSTALLED"
try:
from PyQt4.QtCore import PYQT_VERSION_STR as pyqt_version
except ImportError:
pyqt_version = "NOT INSTALLED"
try:
from suds import __version__ as suds_version
except ImportError:
suds_version = "NOT INSTALLED"
print("####################")
print(" Required libraries")
print("####################")
print("SunPy: %s" % sunpy.__version__)
print("NumPy: %s" % numpy_version)
print("SciPy: %s" % scipy_version)
print("Matplotlib: %s" % matplotlib_version)
print("PyFITS: %s" % pyfits_version)
print("pandas: %s" % pandas_version)
print("")
print("#######################")
print(" Recommended libraries")
print("#######################")
print("beautifulsoup4: %s" % bs4_version)
print("PyQt: %s" % pyqt_version)
print("SUDS: %s" % suds_version)
print("")
|
from hitchhttp import http_request
from ruamel.yaml import dump
from ruamel.yaml.dumper import RoundTripDumper
from ruamel.yaml.comments import CommentedMap
from hitchhttp.models import Database
from os import path
import tornado.web
import tornado
import requests
import random
import json
import time
import sys
class MockHTTPHandler(tornado.web.RequestHandler):
"""Mock REST server request handling."""
default_response = (
"""
<html><head><title>Nothing configured!</title></head>
<body>No matching URI found for {0}<br/><br/>
See <a href="http://hitchtest.readthedocs.org/">the docs</a>
for more information.</body>\n
"""
)
def log_json(self, name, request, response):
"""JSON to log to indicate what just happened."""
pair = {}
pair['match'] = name
pair['request'] = request
pair['response'] = response
sys.stdout.write(u"{0}\n".format(json.dumps(pair)))
sys.stdout.flush()
def process(self):
self.actual_request = http_request.MockRequest(self.request)
if self.settings['record']:
headers_to_request_with = self.actual_request.headers_without_host
if self.settings['intercept'] is not None:
headers_to_request_with.update(self.settings['intercept'])
self.response = requests.request(
self.request.method,
"{}{}".format(self.settings['redirection_url'], self.request.uri),
headers=headers_to_request_with,
data=self.actual_request.request_data,
)
#if len(response_content) < 1000:
#yaml_snip['response']['content'] = response_content
#else:
#response_filename = "{}.content".format(random.randrange(1, 99999999))
#full_response_filename = path.join(
#path.dirname(
#path.abspath(
#self.settings['record_to_filename']
#)
#),
#response_filename
#)
#with open(full_response_filename, 'w') as handle:
#handle.write(response_content)
#yaml_snip['response']['content'] = {"file": response_filename}
#with open(self.settings['record_to_filename'], | 'a') as handle:
#handle.write("\n{}".format(
#dum | p([yaml_snip], default_flow_style=False, Dumper=RoundTripDumper))
#)
for header_var, header_val in self.response.headers.items():
if header_var.lower() not in ["transfer-encoding", "content-encoding", ]:
self.set_header(header_var, header_val)
self.set_status(self.response.status_code)
if self.response.status_code != 304:
self.write(self.response.content)
else:
uri = self.settings['config'].get_matching_uri(self.actual_request)
if uri is not None:
time.sleep(uri.wait)
self.set_status(uri.return_code)
for header_var, header_val in uri.response_headers.items():
if header_var.lower() not in [
"transfer-encoding", "content-encoding", "set-cookie",
]:
self.set_header(header_var, header_val)
if uri.return_code != 304:
self.write(uri.response_content.encode('utf8'))
#self.log_json(
#uri.name, actual_request.to_dict(uri.name), uri.response_content
#)
else:
self.set_status(404)
self.set_header('Content-type', 'text/html')
self.write(
self.default_response.format(self.request.path).encode('utf8')
)
#self.log_json(
#None,
#actual_request.to_dict(None),
#self.default_response.format(self.request.path)
#)
self.response_content = {}
def on_finish(self):
if self.settings['record']:
yaml_snip = {}
yaml_snip['request'] = {}
yaml_snip['request']['path'] = self.request.uri
yaml_snip['request']['method'] = self.request.method
yaml_snip['request']['headers'] = self.actual_request.headers_without_host
if self.actual_request.request_data is not None:
yaml_snip['request']['data'] = self.actual_request.body.strip()
yaml_snip['response'] = {}
yaml_snip['response']['code'] = self.response.status_code
yaml_snip['response']["headers"] = {
item[0]: item[1] for item in dict(self.response.headers).items()
if item[0].lower() not in ["transfer-encoding", "content-encoding", ]
}
#response_content = self.resp.text
database = Database(self.settings['record_to_filename'])
db_request = database.Request(
order=1,
request_path=self.request.uri,
request_method=self.request.method,
request_data=self.actual_request.body.strip(),
response_code=self.response.status_code,
response_content=self.response.text,
)
db_request.save()
for header_var, header_val in yaml_snip['request']['headers'].items():
db_request_header = database.RequestHeader(
request=db_request,
name=header_var,
value=header_val,
)
db_request_header.save()
for header_var, header_val in self.response.headers.items():
if header_var.lower() not in ["transfer-encoding", "content-encoding", ]:
db_response_header = database.ResponseHeader(
request=db_request,
name=header_var,
value=header_val,
)
db_response_header.save()
self.log_json("record", yaml_snip['request'], yaml_snip['response'])
else:
uri = self.settings['config'].get_matching_uri(self.actual_request)
if uri is not None:
#time.sleep(uri.wait)
#self.set_status(uri.return_code)
#for header_var, header_val in uri.response_headers.items():
#if header_var.lower() not in [
#"transfer-encoding", "content-encoding", "set-cookie",
#]:
#self.set_header(header_var, header_val)
#if uri.return_code != 304:
#self.write(uri.response_content.encode('utf8'))
self.log_json(
uri.name, self.actual_request.to_dict(uri.name), uri.response_content
)
else:
#self.set_status(404)
#self.set_header('Content-type', 'text/html')
#self.write(
#self.default_response.format(self.request.path).encode('utf8')
#)
self.log_json(
None,
self.actual_request.to_dict(None),
self.default_response.format(self.request.path)
)
def get(self):
self.process()
def post(self):
self.process()
def put(self):
self.process()
def delete(self):
self.process()
def options(self):
self.process()
|
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
## \addtogroup modules
# __chipsec/modules/secureboot/keys.py__ - verify protections of Secure Boot key EFI variables
from chipsec.module_common import *
from chipsec.file import *
from chipsec.hal.uefi import *
# ############################################################
# SPECIFY PLATFORMS THIS MODULE IS APPLICABLE TO
# ############################################################
_MODULE_NAME = 'keys'
TAGS = [MTAG_SECUREBOOT]
class keys(BaseModule):
SECURE = 0x1
INSECURE = 0x2
ERROR = 0x4
def __init__(self):
BaseModule.__init__(self)
self._uefi = UEFI( self.cs.helper )
def is_supported(self):
supported = self.cs.helper.EFI_supported()
if not supported: self.logger.log_skipped_check( "OS does not support UEFI Runtime API" )
return supported
def check_EFI_variable_authentication( self, name, guid ):
self.logger.log( "[*] Checking EFI variable %s {%s}.." % (name, guid) )
orig_var = self._uefi.get_EFI_variable( name, guid, None )
if not orig_var:
self.logger.log( "[*] EFI variable %s {%s} doesn't exist" % (name, guid) )
return keys.ERROR
fname = name + '_' + guid + '.bin'
if self.logger.VERBOSE: write_file( fname, orig_var )
origvar_len = len(orig_var)
mod_var = chr( ord(orig_var[0]) ^ 0xFF ) + orig_var[1:]
if origvar_len > 1: mod_var = mod_var[:origvar_len-1] + chr( ord(mod_var[origvar_len-1]) ^ 0xFF )
if self.logger.VERBOSE: write_file( fname + '.mod', mod_var )
status = self._uefi.set_EFI_variable( name, guid, mod_var )
if not status: self.logger.log( '[*] Writing EFI variable %s did not succeed. Verifying contents..' % name )
new_var = self._uefi.get_EFI_variable( name, guid, None )
if self.logger.VERBOSE: write_file( fname + '.new', new_var )
ok = (origvar_len == len(new_var))
for i in range( origvar_len ):
if not (new_var[i] == orig_var[i]):
ok = keys.INSECURE
break
if ok == keys.INSECURE:
self.logger.log_bad( "EFI variable %s is not protected! It has been modified. Restoring original contents.." % name )
self._uefi.set_EFI_variable( name, guid, orig_var )
else: |
self.logger.log_good( "Could not modify EFI variable %s {%s}" % (name, guid) )
return ok
# checks authentication of Secure Boot EFI variables
def check_secureboot_key_variables(self):
sts = 0
sts |= self.check_EFI_variable_authentication( EFI_VAR_NAME_PK, EFI_VARIABLE_DICT[EFI_VAR_NAME_PK] )
sts |= self.chec | k_EFI_variable_authentication( EFI_VAR_NAME_KEK, EFI_VARIABLE_DICT[EFI_VAR_NAME_KEK] )
sts |= self.check_EFI_variable_authentication( EFI_VAR_NAME_db, EFI_VARIABLE_DICT[EFI_VAR_NAME_db] )
sts |= self.check_EFI_variable_authentication( EFI_VAR_NAME_dbx, EFI_VARIABLE_DICT[EFI_VAR_NAME_dbx] )
sts |= self.check_EFI_variable_authentication( EFI_VAR_NAME_SecureBoot, EFI_VARIABLE_DICT[EFI_VAR_NAME_SecureBoot] )
sts |= self.check_EFI_variable_authentication( EFI_VAR_NAME_SetupMode, EFI_VARIABLE_DICT[EFI_VAR_NAME_SetupMode] )
#sts |= self.check_EFI_variable_authentication( EFI_VAR_NAME_CustomMode, EFI_VARIABLE_DICT[EFI_VAR_NAME_CustomMode] )
if (sts & keys.ERROR) != 0: self.logger.log_important( "Some Secure Boot variables don't exist" )
ok = ((sts & keys.INSECURE) == 0)
self.logger.log('')
if ok: self.logger.log_passed_check( 'All existing Secure Boot EFI variables seem to be protected' )
else: self.logger.log_failed_check( 'One or more Secure Boot variables are not protected' )
return ok
# --------------------------------------------------------------------------
# run( module_argv )
# Required function: run here all tests from this module
# --------------------------------------------------------------------------
def run( self, module_argv ):
#self.logger.VERBOSE = True
self.logger.start_test( "Protection of Secure Boot Key and Configuration EFI Variables" )
return self.check_secureboot_key_variables()
|
"""
This example outputs a custom waveform and records the waveform on Channel A.
The output of the AWG must be connected to Channel A.
"""
import os
import numpy as np
# if matplotlib is available then plot the results
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from msl.equipment import (
EquipmentRecord,
ConnectionRecord,
Backend,
)
record = EquipmentRecord(
manufacturer='Pico Technology',
model='5244B', # update for your PicoScope
serial='DY135/055', # update for your PicoScope
connection=ConnectionRecord(
backend=Backend.MSL,
address='SDK::ps5000a.dll', # update for your PicoScope
properties={
'resolution': '14bit', # only used for a ps5000a series PicoScope
'auto_select_power': True, # for PicoScopes that can be powered by an AC adaptor or a USB cable
},
)
)
# optional: ensure that the PicoTech DLLs are available on PATH
os.environ['PATH'] += os.pathsep + r'C:\Program Files\Pico Technology\SDK\lib'
print('Example :: Acquire AWG custom waveform')
# connect to the PicoScope
scope = record.connect()
# configure the PicoScope
scope.set_channel('A', scale='2V') # enable Channel A and set the voltage range to be +/-2V
dt, num_samples = scope.set_timebase(10e-3, 5.0) # sample the voltage on Channel A every 10 ms for 5 s
scope.set_trigger('A', -0.2, timeout=5.0, direction='falling') # use Channel A as the trigger source
# simulate the Lennard-Jones Potential
x = np.linspace(0.88, 2, 500)
awg = (1/x)**12 - 2*(1/x)**6
scope.set_sig_gen_arbitrary(awg, repetition_rate=1e3, index_mode='quad', pk | _to_pk=2.0)
scope.run_block(pre_trigger=2.5) # start acquisition
scope.wait_until_ready() # wait until all requested samples are collected
scope.set_data_buffer('A') # set the data buffer for Channel A
scope.get_values() # fill the data buffer of Channel A with the values saved in the PicoScope's internal memory
scope.stop() # stop the oscilloscope from sampling data
print('Channel A input')
t = np.arange(-scope.pre_trigger, dt*num_samples-scope.pre_trigger, dt)
for i in range(num_samples | ):
print('{0:f}, {1:f}'.format(t[i], scope.channel['A'].volts[i]))
if plt is not None:
plt.plot(t, scope.channel['A'].volts, 'bo')
plt.show()
|
"value": '**',
"first": {
"type": "name",
"value": 'a'
},
"second": {
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'b'
},
"second": {
"type": "name",
"value": 'c',
},
"first_formatting": [],
"second_formatting": []
},
"first_formatting": [],
"second_formatting": []
}
])
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**'),
('NAME', 'b'),
('DOUBLE_STAR', '**'),
('NAME', 'c')
], [
{
"type": "binary_operator",
"value": '**',
"first": {"type": "name", "value": 'a'},
"second": {
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'b'
},
"second": {
"type": "name",
"value": 'c',
},
"first_formatting": [],
"second_formatting": []
},
"first_formatting": [],
"second_formatting": []
}
])
def test_power_factor():
"a ** +b"
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('PLUS', '+'),
('NAME', 'b')
], [
{
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'a',
},
"second": {
"type": "unitary_operator",
"value": '+',
"target": {
"type": "name",
"value": 'b'
},
"formatting": [],
},
"first_formatting": [{"type": "space", "value": " "}],
"second_formatting": [{"type": "space", "value": " "}]
}
])
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('PLUS', '+'),
('NAME', 'b')
], [
{
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'a'
},
"second": {
"type": "unitary_operator",
"value": '+',
"target": {
"type": "name",
"value": 'b',
},
"formatting": [],
},
"first_formatting": [{"type": "space", "value": " "}],
"second_formatting": [{"type": "space", "value": " "}]
}
])
def test_power_factor_minus():
"a ** -b"
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('MINUS', '-'),
('NAME', 'b')
], [
{
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'a',
},
"second": {
"type": "unitary_operator",
"value": '-',
"target": {
"type": "name",
"value": 'b'
},
"formatting": [],
},
"first_formatting": [{"type": "space", "value": " "}],
"second_formatting": [{"type": "space", "value": " "}]
}
])
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('MINUS', '-'),
('NAME', 'b')
], [
{
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'a'
},
"second": {
"type": "unitary_operator",
"value": '-',
"target": {
"type": "name",
"value": 'b',
},
"formatting": [],
},
"first_formatting": [{"type": "space", "value": " "}],
"second_formatting": [{"type": "space", "value": " "}]
}
])
def test_power_factor_tild():
"a ** ~b"
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('TILDE', '~'),
('NAME', 'b')
], [
{
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'a',
},
"second": {
"type": "unitary_operator",
"value": '~',
"target": {
"type": "name",
"value": 'b'
},
"formatting": [],
},
"first_formatting": [{"type": "space", "value": " "}],
"second_formatting": [{"type": "space", "value": " "}]
}
])
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('TILDE', '~'),
('NAME', 'b')
], [
{
"type": "binary_opera | tor",
"value": '**',
"first": {
"type": "name",
"value": 'a'
},
"second": {
"type": "unitary_operator",
"value": '~', |
"target": {
"type": "name",
"value": 'b',
},
"formatting": [],
},
"first_formatting": [{"type": "space", "value": " "}],
"second_formatting": [{"type": "space", "value": " "}]
}
])
def test_power_operator_madness():
"a ** ~+-b"
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('TILDE', '~'),
('PLUS', '+'),
('MINUS', '-'),
('NAME', 'b')
], [
{
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'a',
},
"second": {
"type": "unitary_operator",
"value": '~',
"target": {
"type": "unitary_operator",
"value": '+',
"target": {
"type": "unitary_operator",
"value": "-",
"target": {
"type": "name",
"value": 'b'
},
"formatting": [],
},
"formatting": [],
},
"formatting": []
},
"first_formatting": [{"type": "space", "value": " "}],
"second_formatting": [{"type": "space", "value": " "}]
}
])
parse_simple([
('NAME', 'a'),
('DOUBLE_STAR', '**', [('SPACE', ' ')], [('SPACE', ' ')]),
('TILDE', '~'),
('PLUS', '+'),
('MINUS', '-'),
('NAME', 'b')
], [
{
"type": "binary_operator",
"value": '**',
"first": {
"type": "name",
"value": 'a'
},
"second": {
"type": "unitary_operator",
"value": '~',
"target": {
"type": "unitary_operator",
"value": '+',
"target": {
"type": "unitary_operator",
"value": "-",
"target": {
"type": "name",
"value" |
from others import sms_request
print(sms_request('1 | 5683000435', '1234 | 56')) |
###############################################################################
# tetrapy.py
# Tetrapy
#
# Copyright (C) 2013 Kesara Rathnayake
#
# Tetrapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tetrapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tetrapy. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import pygame
import random
import sys
# Screen Size
X = 240
Y = 400
# Colours
BLACK = (000,000,000)
WHITE = (255,255,255)
RED = (255,000,000)
GREEN = (000,255,000)
BLUE = (000,000,255)
class Tetromino(object):
def __init__(self, colour):
self.colour = colour
self.locked = False
def draw(self, screen):
pass
def move(self, matrix, direction, rotaion):
pass
def isLocked(self, matrix):
pass
def getMatrix(self):
pass
def collied(self, matrix): |
pass
class TetrominoI(Tetromino):
def __init__(self, colour):
super(TetrominoI, self).__init__(colour)
self.W = 20
self.H = 80
self.x = 0
self.y = 0
def draw(self, screen):
rect = pygame.Rect(self.x, self.y, self.W, self.H)
pygame.draw.rect(screen, self.colour, rect)
def move(self, matrix, direction, rotaion=None):
if not self.isLocked(matrix):
if | self.x + direction <= X-20 and self.x + direction >= 0:
self.x += direction
if self.y <= Y-80:
self.y += 20
def isLocked(self, matrix):
if self.y + 80 == Y:
self.locked = True
return True
elif (self.collied(matrix)):
return True
return False
def getMatrix(self):
matrix = []
for i in range(0, 4):
matrix.append((self.x, self.y+20*i))
return matrix
def collied(self, matrix):
for i in range(0, 4):
if (self.x, self.y+20*i) in matrix:
return True
return False
screen = pygame.display.set_mode((X, Y))
colours = [WHITE, RED, GREEN, BLUE]
active = None
tetrominos = []
matrix = []
while True:
print matrix
screen.fill(BLACK)
if not active:
active = TetrominoI(random.choice(colours))
elif active.isLocked(matrix):
matrix.extend(active.getMatrix())
tetrominos.append(active)
active = TetrominoI(random.choice(colours))
active.draw(screen)
pygame.display.flip()
k_left = k_right = 0
direction = 0
for event in pygame.event.get():
if not hasattr(event, 'key'): continue
if event.key == pygame.K_RIGHT: k_right += 10
elif event.key == pygame.K_LEFT: k_left += -10
elif event.key == pygame.K_ESCAPE: sys.exit(0)
direction += (k_right + k_left)
screen.fill(BLACK)
for tetromino in tetrominos:
tetromino.draw(screen)
active.move(matrix, direction)
active.draw(screen)
pygame.display.flip()
pygame.time.wait(100)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 14:09:59 2017
@author: SaintlyVi
"""
import pandas as pd
import numpy as np
from support import writeLog
def uncertaintyStats(submodel):
"""
Creates a dict with statistics for observed hourly profiles for a given year.
Use evaluation.evalhelpers.observedHourlyProfiles() to generate the input dataframe.
"""
allstats = list()
for c in submodel['class'].unique():
stats = submodel[submodel['class']==c].describe()
stats['customer_class'] = c
stats.reset_index(inplace=True)
stats.set_index(['customer_class','index'], inplace=True)
allstats.append(stats)
df = pd.concat(allstats)
return df[['AnswerID_count','valid_obs_ratio']]
def dataIntegrity(submodels, min_answerid, min_obsratio):
"""
This function returns the slice of submodels that meet the specified minimum uncertainty requirements. Submodels must form part of the same experiment (eg demand summary and hourly profiles).
"""
if isinstance(submodels, list):
models = submodels
else:
models = [submodels]
validmodels = pd.DataFrame(columns = ['submodel_name','valid_data','uncertainty_index',
'valid_unit_count', 'unit'])
for m in models:
name = m.name
valid_data = m[(m.AnswerID_count>=min_answerid) & (m.valid_obs_ratio>=min_obsratio)]
uix = len(valid_data) / len(m)
try:
| valid_unit_count = valid_data['valid_hours'].sum()
unit = 'total_valid_hours'
except:
valid_unit_count = valid_data['AnswerID_count'].sum()
unit = 'valid_AnswerID_count'
validmodels = | validmodels.append({'submodel_name':name,
'valid_data':valid_data,
'uncertainty_index':uix,
'valid_unit_count':valid_unit_count,
'unit':unit}, ignore_index=True)
validmodels.set_index('submodel_name', drop=True, inplace=True)
return validmodels
def modelSimilarity(ex_submodel, ex_ts, valid_new_submodel, new_ts, submod_type):
"""
This function calcualtes the evaluation measure for the run.
ex_submodel = (DataFrame) either existing/expert demand_summary or hourly_profiles submodel
valid_new_submodel = (DataFrame) output from dataIntegrity function
-> only want to compare valid data
submod_type = (str) one of [ds, hp]
-> ds=demand_summary, hp=hourly_profiles
"""
if submod_type == 'ds':
index_cols = ['class','YearsElectrified']
elif submod_type == 'hp':
index_cols = ['class','YearsElectrified','month','daytype','hour']
else:
return(print('Valid submod_type is one of [ds, hp] -> ds=demand_summary, hp=hourly_profiles.'))
merged_sub = ex_submodel.merge(valid_new_submodel, how='left', on=index_cols)
simvec = merged_sub[new_ts] - merged_sub[ex_ts]
simvec.dropna(inplace=True)
simveccount = len(simvec)
eucliddist = np.sqrt(sum(simvec**2))
return eucliddist, simveccount, merged_sub
def logCalibration(bm_model, year, exp_model, min_answerid = 2, min_obsratio = 0.85):
"""
This function logs the evaluation results of the run.
ex_model = [demand_summary, hourly_profiles, ds_val_col_name, hp_val_col_name]
"""
#Generate data model
ods = pd.read_csv('data/experimental_model/'+exp_model+'/demand_summary_'+year+'.csv')
ohp = pd.read_csv('data/experimental_model/'+exp_model+'/hourly_profiles_'+year+'.csv')
#Check data integrity
ods.name = 'demand_summary'
ohp.name = 'hourly_profiles'
validmodels = dataIntegrity([ods, ohp], min_answerid, min_obsratio)
valid_new_ds = validmodels.at['demand_summary','valid_data']
valid_new_hp = validmodels.at['hourly_profiles','valid_data']
new_dsts = 'M_kw_mean'
new_hpts = 'kva_mean'
#Fetch benchmark model
bm_ds = bm_model[0]
bm_hp = bm_model[1]
bm_dsts = bm_model[2]
bm_hpts = bm_model[3]
#Calculate model similarity
euclid_ds, count_ds, slice_ex_ds = modelSimilarity(bm_ds, bm_dsts, valid_new_ds, new_dsts, 'ds')
euclid_hp, count_hp, sliced_ex_hp = modelSimilarity(bm_hp, bm_hpts, valid_new_hp, new_hpts, 'hp')
#Prepare and write logs
ds_uix = validmodels.at['demand_summary','uncertainty_index']
ds_vuc = validmodels.at['demand_summary','valid_unit_count']
ds_unit = validmodels.at['demand_summary','unit']
hp_uix = validmodels.at['hourly_profiles','uncertainty_index']
hp_vuc = validmodels.at['hourly_profiles','valid_unit_count']
hp_unit = validmodels.at['hourly_profiles','unit']
loglineds = [year, exp_model, ods.name, min_answerid, min_obsratio, ds_uix, ds_vuc,
ds_unit, euclid_ds, count_ds]
loglinehp = [year, exp_model, ohp.name, min_answerid, min_obsratio, hp_uix, hp_vuc,
hp_unit, euclid_hp, count_hp]
log_lines = pd.DataFrame([loglineds, loglinehp], columns = ['year','experiment',
'submodel','min_answerid_count','min_valid_obsratio',
'uncertainty_ix','valid_unit_count','unit','sim_eucliddist','sim_count'])
writeLog(log_lines,'log_calibration')
|
import testtools
from keystoneclient.middleware import memcache_crypt
class MemcacheCryptPositiveTests(testtools.TestCase):
def _setup_keys(self, strategy):
return memcache_crypt.derive_keys('token', 'secret', strategy)
def test_constant_time_compare(self):
# make sure it works as a compare, the "constant time" aspect
# isn't appropriate to test in unittests
ctc = memcache_crypt.constant_time_compare
self.assertTrue(ctc('abcd', 'abcd'))
self.assertTrue(ctc('', ''))
self.assertFalse(ctc('abcd', 'efgh'))
self.assertFalse(ctc('abc', 'abcd'))
self.assertFalse(ctc('abc', 'abc\x00'))
self.assertFalse(ctc('', 'abc'))
def test_derive_keys(self):
keys = memcache_crypt.derive_keys('token', 'secret', 'strategy')
self.assertEqual(len(keys['ENCRYPTION']),
len(keys['CACHE_KEY']))
self.assertEqual(len(keys['CACHE_KEY']),
len(keys['MAC']))
self.assertNotEqual(keys['ENCRYPTION'],
keys['MAC'])
self.assertIn('strategy', keys.keys())
def test_key_strategy_diff(self):
k1 = self._setup_keys('MAC')
k2 = self._setup_keys('ENCRYPT')
self.assertNotEqual(k1, k2)
def test_sign_data(self):
keys = self._setup_keys('MAC')
sig = memcache_crypt.sign_data(keys['MAC'], 'data')
self.assertEqual(len(sig), memcache_crypt.DIGEST_LENGTH_B64)
def test_encryption(self):
keys = self._setup_keys('ENCRYPT')
# what you put in is what you get out
for data in ['data', '1234567890123456', '\x00\xFF' * 13
] + [chr(x % 256) * x for x in range(768)]:
crypt = memcache_crypt.encrypt_data(keys['ENCRYPTION'], data)
decrypt = memcache_crypt.decrypt_data(keys['ENCRYPTION'], crypt)
self.assertEqual(data, decrypt)
self.assertRaises(memcache_crypt.DecryptError,
memcache_crypt.decrypt_data,
keys['ENCRYPTION'], crypt[:-1])
| def test_protect_wrappers(self):
data = 'My Pretty Little Data'
for strategy in ['MAC', 'ENCRYPT']:
keys = self._setup_keys(strategy)
protected = memcache_crypt.protect_data(keys, data)
self.assertNotEqual(protected, data)
if strategy = | = 'ENCRYPT':
self.assertNotIn(data, protected)
unprotected = memcache_crypt.unprotect_data(keys, protected)
self.assertEqual(data, unprotected)
self.assertRaises(memcache_crypt.InvalidMacError,
memcache_crypt.unprotect_data,
keys, protected[:-1])
self.assertIsNone(memcache_crypt.unprotect_data(keys, None))
def test_no_pycrypt(self):
aes = memcache_crypt.AES
memcache_crypt.AES = None
self.assertRaises(memcache_crypt.CryptoUnavailableError,
memcache_crypt.encrypt_data, 'token', 'secret',
'data')
memcache_crypt.AES = aes
|
en = True,
))
s3db.configure(tablename,
crud_form = crud_form,
)
if r.representation == "geojson":
from s3 import S3Represent
s3db.vehicle_vehicle.vehicle_type_id.represent = S3Represent(lookup="vehicle_vehicle_type",
fields=("code",))
settings.customise_asset_asset_resource = customise_asset_asset_resource
# -------------------------------------------------------------------------
def customise_cms_post_resource(r, tablename):
s3db = current.s3db
table = s3db.cms_post
table.title.comment = None
s3db.cms_post_organisation.organisation_id.represent = \
s3db.org_OrganisationRepresent(acronym=False)
if r.function == "newsfeed":
# Inject Bootstrap JS for the attachments dropdown menu
s3 = current.response.s3
if s3.debug:
s3.scripts.append("/%s/static/scripts/bootstrap.js" % r.application)
elif s3.cdn:
s3.scripts.append("http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.2/js/bootstrap.min.js")
else:
s3.scripts.append("/%s/static/scripts/bootstrap.min.js" % r.application)
elif r.representation == "plain":
# Map Popups
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
from s3 import s3_auth_user_represent_name
table.created_by.represent = s3_auth_user_represent_name
# Used by default popups
series = table.series_id.represent(r.record.series_id)
current.response.s3.crud_strings["cms_post"].title_display = "%(series)s Details" % dict(series=series)
s3db.configure("cms_post",
popup_url="",
)
table.avatar.readable = False
table.body.label = ""
table.expired.readable = False
table.replies.readable = False
table.created_by.readable = True
table.created_by.label = T("Author")
# Used by cms_post_popup
#table.created_on.represent = datetime_represent
settings.customise_cms_post_resource = customise_cms_post_resource
# -------------------------------------------------------------------------
def cms_post_popup(r, output):
"""
Customised Map popup for cms_post resource
- include Photo if-present
@ToDo: Much better checking!
"""
doc_id = r.record.doc_id
table = current.s3db.doc_document
query = (table.deleted == False) & \
(table.doc_id == doc_id)
row = current.db(query).select(table.file,
limitby=(0, 1)
).first()
if row and row.file:
from gluon import IMG, URL
image = IMG(_src=URL(c="default", f="download", args=[row.file]))
output["image"] = image
# -------------------------------------------------------------------------
def customise_cms_post_controller(**attr):
s3 = current.response.s3
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.representation == "plain":
# Map Popups
cms_post_popup(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_cms_post_controller = customise_cms_post_controller
# -------------------------------------------------------------------------
def customise_cr_shelter_controller(**attr):
# Default Filter
# Org and all Branches & SubBranches
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_and_children_default_filter,
tablename = "cr_shelter")
s3db = current.s3db
field = s3db.cr_shelter.shelter_type_id
field.readable = field.writable = False
list_fields = s3db.get_config("cr_shelter", "list_fields")
list_fields.remove("shelter_type_id")
return attr
settings.customise_cr_shelter_controller = customise_cr_shelter_controller
# -------------------------------------------------------------------------
def customise_deploy_assignment_controller(**attr):
s3db = current.s3db
# Labels
#table = s3db.deploy_assignment
#table.job_title_id.label = T("RDRT Type")
#table.start_date.label = T("Deployment Date")
#table.end_date.label = T("EOM")
# List fields
list_fields = [(T("Mission"), "mission_id$name"),
(T("Appeal Code"), "mission_id$code"),
(T("Country"), "mission_id$location_id"),
(T("Incident Type"), "mission_id$event_type_id"),
# @todo: replace by date of first alert?
(T("Date"), "mission_id$created_on"),
"job_title_id",
#(T("Member"), "human_resource_id$person_id"),
(T("Member"), "human_resource_id$person_id"),
(T("Deploying Branch"), "human_resource_id$organisation_id"),
"start_date",
"end_date",
"appraisal.rating",
# @todo: Comments of the mission (=>XLS only)
]
# Report options
report_fact = [(T("Number of Deployments"), "count(human_resource_id)"),
(T("Average Rating"), "avg(appraisal.rating)"),
]
report_axis = [(T("Appeal Code"), "mission_id$code"),
(T("Country"), "mission_id$location_id"),
(T("Incident Type"), "mission_id$event_type_id"),
"job_title_id",
(T("Deploying Branch"), "human_resource_id$organisation_id"),
]
report_options = Storage(
rows=report_axis,
cols=report_axis,
fact=report_fact,
defaults=Storage(rows="mission_id$location_id",
cols="mission_id$event_type_id",
fact="count(human_resource_id)",
totals=True
)
)
s3db.configure("deploy_assignment",
list_fields = list_fields,
report_options = report_options,
)
return attr
settings.customise_deploy_assignment_controller = customise_deploy_assignment_controller
# -------------------------------------------------------------------------
def customise_deploy_mission_controller(**attr):
s3db = current.s3db
table = s3db.deploy_mission
table.code.label = T("Appeal Code")
table.event_type_id | .label = T("Incident Type")
table.organisation_id.readable = table.organisation_id.writable = False
# Report options
report_fact = [(T("Number of Missions"), "count(id)"),
| (T("Number of Countries"), "count(location_id)"),
(T("Number of Incident Types"), "count(event_type_id)"),
(T("Number of Responses"), "sum(response_count)"),
(T("Number of Deployments"), "sum(hrquantity)"),
]
report_axis = ["code",
"location_id",
"event_type_id",
"status",
]
report_options = Storage(rows = report_axis,
cols = report_axis,
|
from . imp | ort Checker
class ExactChecker(Checker):
async def check(self, sandbox, task):
output = await sandbox.read("/tmp/output.txt")
task.accepted = output == task.testcase.output
task.verdict = "AC" | if task.accepted else "WA"
|
""" Cardiac Arrhythmia Database
The original dataset and further information can be found here:
https://archive.ics.uci.edu/ml/datasets/Arrhythmia
Brief description
-----------------
This data contains 452 observations on 279 variables (206 linear valued
+ 73 nominal) on ECG readings. The data was collected to determine the
type of arrhythmia based on the ECG.
7. Attribute Information:
-- Complete attribute documentation:
1 Age: Age in years , linear
2 Sex: Sex (0 = male; 1 = female) , nominal
3 Height: Height in centimeters , linear
4 Weight: Weight in kilograms , linear
5 QRS duration: Average of QRS duration in msec., linear
6 P-R interval: Average duration between onset of P and Q waves
in msec., linear
7 Q-T interval: Average duration between onset of Q and offset
of T waves in msec., linear
8 T interval: Average duration of T wave in msec., linear
9 P interval: Average duration of P wave in msec., linear
Vector angles in degrees on front plane of:, linear
10 QRS
11 T
12 P
13 QRST
14 J
15 Heart rate: Number of heart beats per minute ,linear
Of channel DI:
Average width, in msec., of: linear
16 Q wave
17 R wave
18 S wave
19 R' wave, small peak just after R
| 20 S' wave
21 Number of intrinsic deflections, linear
22 Existence of ragged R wave, nominal
23 Existence of diphasic derivation of R wave, nominal
24 Existence of ragged P wave, nominal
25 Existence of diphasic derivation of P wave, nominal
26 Existence of ragged T wave, nominal
27 Existence of diphasic derivation | of T wave, nominal
Of channel DII:
28 .. 39 (similar to 16 .. 27 of channel DI)
Of channels DIII:
40 .. 51
Of channel AVR:
52 .. 63
Of channel AVL:
64 .. 75
Of channel AVF:
76 .. 87
Of channel V1:
88 .. 99
Of channel V2:
100 .. 111
Of channel V3:
112 .. 123
Of channel V4:
124 .. 135
Of channel V5:
136 .. 147
Of channel V6:
148 .. 159
Of channel DI:
Amplitude , * 0.1 milivolt, of
160 JJ wave, linear
161 Q wave, linear
162 R wave, linear
163 S wave, linear
164 R' wave, linear
165 S' wave, linear
166 P wave, linear
167 T wave, linear
168 QRSA , Sum of areas of all segments divided by 10,
( Area= width * height / 2 ), linear
169 QRSTA = QRSA + 0.5 * width of T wave * 0.1 * height of T
wave. (If T is diphasic then the bigger segment is
considered), linear
Of channel DII:
170 .. 179
Of channel DIII:
180 .. 189
Of channel AVR:
190 .. 199
Of channel AVL:
200 .. 209
Of channel AVF:
210 .. 219
Of channel V1:
220 .. 229
Of channel V2:
230 .. 239
Of channel V3:
240 .. 249
Of channel V4:
250 .. 259
Of channel V5:
260 .. 269
Of channel V6:
270 .. 279
8. Missing Attribute Values: Several. Distinguished with '?'.
9. Class Distribution:
Database: Arrhythmia
Class code : Class : Number of instances:
01 Normal 245
02 Ischemic changes (Coronary Artery Disease) 44
03 Old Anterior Myocardial Infarction 15
04 Old Inferior Myocardial Infarction 15
05 Sinus tachycardy 13
06 Sinus bradycardy 25
07 Ventricular Premature Contraction (PVC) 3
08 Supraventricular Premature Contraction 2
09 Left bundle branch block 9
10 Right bundle branch block 50
11 1. degree AtrioVentricular block 0
12 2. degree AV block 0
13 3. degree AV block 0
14 Left ventricule hypertrophy 4
15 Atrial Fibrillation or Flutter 5
16 Others 22
Original Owner and Donor
------------------------
H. Altay Guvenir, PhD., and, Burak Acar, M.S., and Haldun Muderrisoglu, M.D., Ph.D.,
Bilkent University,
06533 Ankara, Turkey
Email: guvenir@cs.bilkent.edu.tr
Email: buraka@ee.bilkent.edu.tr
References
----------
H. Altay Guvenir, Burak Acar, Gulsen Demiroz, Ayhan Cekin
"A Supervised Machine Learning Algorithm for Arrhythmia Analysis"
Proceedings of the Computers in Cardiology Conference,
Lund, Sweden, 1997.
#TODO: explain that we use class=14
"""
# Authors: Joan Massich and Guillaume Lemaitre
# License: MIT
from os.path import join, exists
from os import makedirs
try:
# Python 2
from urllib2 import urlretrieve
except ImportError:
# Python 3+
from urllib import urlretrieve
import numpy as np
DATA_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/"\
"arrhythmia/arrhythmia.data"
RAW_DATA_LABEL = 'arrhythmia'
def get_dataset_home(data_home=None, dir=RAW_DATA_LABEL):
return join(get_data_home(data_home=data_home), dir)
def fetch_arrhythmia(data_home=None, download_if_missing=True):
"""Fetcher for xxxxxxxxxxxxxxxxxxxxx.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
the original datasets for this `data_balance` study are stored at
`../data/raw/` subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
data_home = get_dataset_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
print('downloading Arrhythmia data from %s to %s' % (DATA_URL, data_home))
urlretrieve(DATA_URL, join(data_home,'data.csv'))
def process_arrhythmia(target=14):
"""Process data of the CoIL 2000 dataset.
Parameters
----------
target: the target class [0..16]
Returns
-------
(data, label)
#TODO: check if files exist
#TODO: a generic file managing using get_data_home
#TODO:
"""
#TODO: assert target
f = join(get_data_home, 'data.csv')
tmp_input = np.loadtxt(f, delimiter=',')
return (tmp_input[:, :-1], tmp_input[:, -1])
def convert_arrhythmia_14():
d, l = process_arrhythmia(target=14)
np.savez('../data/clean/uci-arrythmia_14.npz', data=d, label=l)
if __name__ == '__main__':
convert_arrhythmia_14()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# li | cense information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# ------------------------------------------------------------------------- | -
from .error import Error
from .api_error import APIError, APIErrorException
from .face_rectangle import FaceRectangle
from .coordinate import Coordinate
from .face_landmarks import FaceLandmarks
from .facial_hair import FacialHair
from .head_pose import HeadPose
from .emotion import Emotion
from .hair_color import HairColor
from .hair import Hair
from .makeup import Makeup
from .occlusion import Occlusion
from .accessory import Accessory
from .blur import Blur
from .exposure import Exposure
from .noise import Noise
from .face_attributes import FaceAttributes
from .detected_face import DetectedFace
from .find_similar_request import FindSimilarRequest
from .similar_face import SimilarFace
from .group_request import GroupRequest
from .group_result import GroupResult
from .identify_request import IdentifyRequest
from .identify_candidate import IdentifyCandidate
from .identify_result import IdentifyResult
from .verify_face_to_person_request import VerifyFaceToPersonRequest
from .verify_face_to_face_request import VerifyFaceToFaceRequest
from .verify_result import VerifyResult
from .persisted_face import PersistedFace
from .face_list import FaceList
from .person_group import PersonGroup
from .person import Person
from .update_person_face_request import UpdatePersonFaceRequest
from .training_status import TrainingStatus
from .name_and_user_data_contract import NameAndUserDataContract
from .image_url import ImageUrl
from .face_api_enums import (
Gender,
GlassesType,
HairColorType,
AccessoryType,
BlurLevel,
ExposureLevel,
NoiseLevel,
FindSimilarMatchMode,
TrainingStatusType,
FaceAttributeType,
AzureRegions,
)
__all__ = [
'Error',
'APIError', 'APIErrorException',
'FaceRectangle',
'Coordinate',
'FaceLandmarks',
'FacialHair',
'HeadPose',
'Emotion',
'HairColor',
'Hair',
'Makeup',
'Occlusion',
'Accessory',
'Blur',
'Exposure',
'Noise',
'FaceAttributes',
'DetectedFace',
'FindSimilarRequest',
'SimilarFace',
'GroupRequest',
'GroupResult',
'IdentifyRequest',
'IdentifyCandidate',
'IdentifyResult',
'VerifyFaceToPersonRequest',
'VerifyFaceToFaceRequest',
'VerifyResult',
'PersistedFace',
'FaceList',
'PersonGroup',
'Person',
'UpdatePersonFaceRequest',
'TrainingStatus',
'NameAndUserDataContract',
'ImageUrl',
'Gender',
'GlassesType',
'HairColorType',
'AccessoryType',
'BlurLevel',
'ExposureLevel',
'NoiseLevel',
'FindSimilarMatchMode',
'TrainingStatusType',
'FaceAttributeType',
'AzureRegions',
]
|
ng_list_property('action_time')
followed_age = long_list_property('followed_age')
followed_gender = long_list_property('followed_gender')
followed_time = long_list_property('followed_time')
@classmethod
def default_statistics(cls):
stats = cls()
for prop in ('reached', 'rogered', 'action', 'followed'):
for statistic in ('age', 'gender', 'time'):
default_statistics = getattr(cls, 'default_%s_stats' % statistic)()
setattr(stats, '%s_%s' % (prop, statistic), default_statistics)
return stats
@staticmethod
def get_age_index(age):
i = int(age / 5) if age and age >= 0 else 0
if i > 20:
return 20
return i
@staticmethod
def get_gender_index(gender):
if gender == UserProfile.GENDER_MALE:
return 1
if gender == UserProfile.GENDER_FEMALE:
return 2
return 0
@staticmethod
def get_time_index(news_item_created_datetime, action_datetime):
# type: (datetime, datetime) -> int
diff = action_datetime - news_item_created_datetime
return int(diff.total_seconds() / 3600)
@staticmethod
def gender_translation_key(gender_index):
if gender_index == 1:
return u'gender-male'
elif gender_index == 2:
return u'gender-female'
else:
return u'unknown'
@classmethod
def get_age_label(cls, age_index):
azzert(age_index >= 0, 'Expected age_index to be positive, got %s' % age_index)
start_age = age_index * 5
end_age = start_age + 5
return u'%s - %s' % (start_age, end_age)
def _serialize_news_item_statistics(stream, stats):
"""
Args:
stream (StringIO)
stats (NewsItemStatistics)
"""
s_long(stream, 1) # version
s_long_list(stream, stats.reached_age)
s_long_list(stream, stats.reached_gender)
s_long_list(stream, stats.reached_time)
s_long_list(stream, stats.rogered_age)
s_long_list(stream, stats.rogered_gender)
s_long_list(stream, stats.rogered_time)
s_long_list(stream, stats.action_age)
s_long_list(stream, stats.action_gender)
s_long_list(stream, stats.action_time)
s_long_list(stream, stats.followed_age)
s_long_list(stream, stats.followed_gender)
s_long_list(stream, stats.followed_time)
def _deserialize_news_item_statistics(stream):
ds_long(stream) # version
stats = NewsItemStatistics()
stats.reached_age = ds_long_list(stream)
stats.reached_gender = ds_long_list(stream)
stats.reached_time = ds_long_list(stream)
stats.rogered_age = ds_long_list(stream)
stats.rogered_gender = ds_long_list(stream)
stats.rogered_time = ds_long_list(stream)
stats.action_age = ds_long_list(stream)
stats.action_gender = ds_long_list(stream)
stats.action_time = ds_long_list(stream)
stats.followed_age = ds_long_list(stream)
stats.followed_gender = ds_long_list(stream)
stats.followed_time = ds_long_list(stream)
return stats
class NewsItemStatisticsProperty(db.UnindexedProperty):
data_type = NewsStatisticPerApp
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
stream = StringIO()
_serialize_news_statistic_per_app(stream, super(NewsItemStatisticsProperty, self)
.get_value_for_datastore(model_instance))
return db.Blob(stream.getvalue())
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return _deserialize_news_item_statistic_per_app(StringIO(value))
def validate(self, value):
if value is not None and not isinstance(value, self.data_type):
raise ValueError(
'Property %s must be convertible to a %s instance (%s)' % (self.name, self.data_type.__name__, value))
return value
class NewsButton(object):
id = unicode_property('1')
caption = unicode_property('2')
action = unicode_property('3')
flow_params = unicode_property('4')
index = long_property('5')
def __init__(self, btn_id=None, caption=None, action=None, flow_params=None, index=0):
self.id = btn_id
self.caption = caption
self.action = action
self.flow_params = flow_params
self.index = index
def _serialize_news_button(stream, b):
s_unicode(stream, b.id)
s_unicode(stream, b.caption)
s_unicode(stream, b.action)
s_unicode(stream, b.flow_params)
s_long(stream, b.index)
def _deserialize_news_button(stream, version):
b = NewsButton()
b.id = ds_unicode(stream)
b.caption = ds_unicode(stream)
b.action = ds_unicode(stream)
b.flow_params = ds_unicode(stream) if version >= 2 else None
b.index = ds_long(stream) if version >= 3 else 0
return b
def _serialize_news_buttons(stream, buttons):
s_long(stream, 3) # version
_serialize_news_button_list(stream, buttons)
def _deserialize_news_buttons(stream):
version = ds_long(stream)
buttons = NewsButtons()
for b in _deserialize_news_button_list(stream, version):
buttons.add(b)
return buttons
_serialize_news_button_list = get_list_serializer(_serialize_news_button)
_deserialize_news_button_list = get_list_deserializer(_deserialize_news_button, True)
class NewsButtons(SpecializedList):
def add(self, button):
if button.id in self._table:
raise DuplicateButtonIdException()
self._table[button.id] = button
def __iter__(self):
for b in sorted(self._table.itervalues(), key=lambda x: x.index):
yield b
def values(self):
return list(self)
class | NewsButtonsProperty(db.UnindexedProperty):
# Tell what the user type is.
data_type = NewsButtons |
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
with closing(StringIO()) as stream:
_serialize_news_buttons(stream, super(NewsButtonsProperty, self).get_value_for_datastore(model_instance))
return db.Blob(stream.getvalue())
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return _deserialize_news_buttons(StringIO(value))
def validate(self, value):
if value is not None and not isinstance(value, NewsButtons):
raise ValueError('Property %s must be convertible to a NewsButtons instance (%s)' % (self.name, value))
return super(NewsButtonsProperty, self).validate(value)
def empty(self, value):
return not value
class NewsFeed(object):
app_id = unicode_property('1')
name = unicode_property('2')
def __init__(self, app_id=None, name=None):
self.app_id = app_id
self.name = name
def _serialize_news_feed(stream, f):
s_unicode(stream, f.app_id)
s_unicode(stream, f.name)
def _deserialize_news_feed(stream, version):
f = NewsFeed()
f.app_id = ds_unicode(stream)
f.name = ds_unicode(stream)
return f
def _serialize_news_feeds(stream, feeds):
s_long(stream, 1) # version
_serialize_news_feed_list(stream, feeds)
def _deserialize_news_feeds(stream):
version = ds_long(stream)
feeds = NewsFeeds()
for f in _deserialize_news_feed_list(stream, version) or []:
feeds.add(f)
return feeds
_serialize_news_feed_list = get_list_serializer(_serialize_news_feed)
_deserialize_news_feed_list = get_list_deserializer(_deserialize_news_feed, True)
class NewsFeeds(SpecializedList):
def add(self, feed):
if feed.app_id in self._table:
raise DuplicateAppIdException()
self._table[feed.app_id] = feed
class NewsFeedsProperty(db.UnindexedProperty):
# Tell what the user type is.
data_type = NewsFeeds
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
with closing(StringIO()) as stream:
_serialize_news_feeds(stream, super(NewsFeedsProperty, self).get_value_for_datastore(model_instance))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks, analog
import numpy as np
class qa_random_uniform_source(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_byte(self):
minimum = 0
maximum = 5
seed = 3
n_items = 10000
rnd_src = analog.random_uniform_source_b(minimum, maximum, seed)
head = blocks.head(1, n_items)
snk = blocks.vector_sink_b(1)
self.tb.connect(rnd_src, head, snk)
# set up fg
self.tb.run()
# check data
res = snk.data()
self.assertGreaterEqual(minimum, np.mi | n(res))
self.assertLess(np.max(res), maximum)
def test_002_short(self):
minimum = 42
maximum = 1025
seed = 3
n_items = 10000
rnd_src = analog.random_uniform_source_s(minimum, maximum, seed)
head = blocks.head(2, n_items)
snk = blocks.vector_sink_s(1)
self.tb.connect(rnd_src, | head, snk)
# set up fg
self.tb.run()
# check data
res = snk.data()
self.assertGreaterEqual(minimum, np.min(res))
self.assertLess(np.max(res), maximum)
def test_003_int(self):
minimum = 2 ** 12 - 2
maximum = 2 ** 17 + 5
seed = 3
n_items = 10000
rnd_src = analog.random_uniform_source_i(minimum, maximum, seed)
head = blocks.head(4, n_items)
snk = blocks.vector_sink_i(1)
self.tb.connect(rnd_src, head, snk)
# set up fg
self.tb.run()
# check data
res = snk.data()
# plt.hist(res)
# plt.show()
self.assertGreaterEqual(np.min(res), minimum)
self.assertLess(np.max(res), maximum)
if __name__ == '__main__':
gr_unittest.run(qa_random_uniform_source, "qa_random_uniform_source.xml")
|
he individual algorithms.
It would be possible to use alternative implementations
"""
import numpy as np
import theano
from theano import tensor as T
from theano.tensor import nlinalg
class algorithms_numpy():
"""
The algorithms implemented in numpy.
This should be the base class for any extended implementations
global notes:
gather_try is a concept of executing planned matrix operations. Not implmented
global todos:
TODO: remove/alter ops.
TODO: better commenting of code
TODO: test output of functions with matlab code - find some input!
TODO: min function in matlab returns index as well as value. need to fix all calls as the argmin
# does not work in the same manner.
"""
def zca_whiten(self, data):
"""
zca_whiten the data
TODO: test with oct2py
"""
m = np.mean(data, 0) # take mean
_data = data - m # demean data
cov = np.dot(_data.T, _data) / (_data.shape[0] - 1) #dot product of data.T, data devide by len-1
U, S, _ = np.linalg.svd(cov) # svd of covariance matrix
s = np.sqrt(S) #S.clip(self.regularization))
s_inv = np.diag(1. / s)
s = np.diag(s)
_whiten = np.dot(np.dot(U, s_inv), U.T)
return np.dot(_data, _whiten.T)
#Main Loop
def alignW(self, W, ops):
"""
:param: W: 2d array: nt0, nFilt
TODO: test using oct2py?
TODO: find out the use of this function...
"""
nt0, nFilt = W.shape
imax = np.argmin(W, axis=0)
dmax = -(imax - ops.nt0min)
for i in range(nFilt):
if dmax[i]>0:
W[(dmax[i] + 1):nt0, i] = W[1:nt0-dmax[i], i]
else:
W[0:nt0+dmax[i], i] = W[(1-dmax[i]):nt0, 1]
return W
def alignWU(self, WU, ops):
"""
:param: WU: 3d array: nt0, nChan,nFilt
TODO: find out what this does
TODO: test using oct2py
"""
nt0, n_chan, n_filt = WU.shape
imin = np.argmin(WU.reshape(nt0*n_chan, n_filt), axis=0)
imin_chan = np.ceil(imin/nt0)
dmax = np.zeros((n_filt,nt0))
for i in range(n_filt):
wu = WU[:, imin_chan[i], i]
imin = np.argmin(wu)
dmax[i] = -(imin - ops.nt0min)
if dmax[i]>0:
WU[(dmax[i] + 1): nt0, :, i] = WU[:nt0-dmax[i],:,1]
else:
WU[:nt0+dmax[i],:,i] = WU[(1-dmax[i]):nt0,:,i]
return WU
def decompose_dWU(self, ops, dWU, n_rank, kcoords):
"""
:param: dWU: 3d array nt0, n_rank, n_filt
TODO: find out what this does
TODO: test using oct2py depends on get_svds and zero_out_kcoords
"""
nt0, n_chan, n_filt = dWU.shape
W = np.zeros((nt0, n_rank, n_filt)) #single precision in original code?
U = np.zeros((n_chan, n_rank, n_filt)) #single precision in orignal code?
mu = np.zeros((n_filt, 1)) #single precision in orignal code?
dWU[dWU == np.nan0] = 0 #replace nans
# original code parallel processing option
# TODO: add parallel processing
for k in range(n_filt):
a, b, c = self.get_svds(dWU[:, :, k], n_rank)
W[:, :, k] = a
U[:, :, k] = b
mu[k] = c
U = np.transpose(U, [1,3,2]) # TODO: improve this?
W = np.transpose(W, [1,3,2])
U[U == np.nan] = 0 #replace nans
if len(np.unique(kcoords)[0]) > 0:
U = self.zero_out_K_coords(U, kcoords, ops.criterionNoiseChannels)
UtU = np.abs(U[:,:,1].T * U[:,:,1]) > 0.1
# TODO: change. This seems like a strange function
Wdiff = np.concatenate((W, np.zeros(2, n_filt,n_rank)), 0) - np.concatenate((np.zeros(2, n_filt, n_rank), W), axis=0)
nu = np.sum( np.sum(Wdiff ** 2, axis=1), axis=3)
return (W, U, mu, UtU, nu)
def get_svd(self, dWU, n_rank):
"""
:param dWU: array to apply svd to.
TODO: find out what this function does
TODO: test using oct2py
"""
Wall, Sv, Uall = np.linalg.svd(dWU) #gather_try?
imax = np.argmax(np.abs(Wall[:,1]))
def sign(x):
x[x > 0] = 1
x[x < 0] = -1
return x
Uall[:,0] = - Uall[:, 0] * sign(Wall[imax, 0])
Wall[:,0] = - Wall[:, 0] * sign(Wall[imax, 0])
Wall = Wall * Sv
Sv = np.diag(Sv)
mu = np.sum(Sv[1:n_rank] ** 2) ** 0.5
Wall = Wall/mu
W = Wall[:, 0:n_rank]
U = Uall[:, 0:n_rank]
return (W, U, mu)
def merge_spikes_in(self, uBase, nS, uS, crit):
"""
TODO: find out what this function does
check if spikes already in uBase?
nS is a histogram of some description?
crit is a criteria for exclusion (similarity?)
TODO: test using oct2py
"""
if uBase is None:
# if uBase is empty then return all the positions
return ([], np.arange(uS.shape[1]))
cdot = uBase[:,:,0].T * uS[:,:,0]
for j in range(1,uBase.shape[2]):
cdot = cdot + uBase[:,:,j].T * uS[:,:,j]
base_norms = np.sum(np.sum(uBase**2, axis=2), axis=0)
new_norms = np.sum(np.sum(uS**2, axis=2), axis=0)
c_norms = 1e-10 + np.tile(baseNorms.T, (1, len(new_norms))) \
+ tile(new_norms, (len(base_norms), 1))
cdot = 1 - 2*(cdot/c_norms)
imin = np.argmin(cdot, axis=0)
cdotmin = cdot[imin]
i_match = cdotmin < crit
nS_new = np.histogram(imin[i_match], np.arange(0, uBase.shape[1])) #not sure this will work
nS = nS = nS_new
i_non_match = np.where(cdotmin > crit)
return (nS, i_non_match)
def mexMPregMUcpu(self, Params, data_raw, fW, data, UtU, mu, lam, dWU, nu, ops):
"""
I believe this function does the heavy lifting. When using theano this is probably
the one to reimplement
get spike times and coefficients
:params: Params: [NT, n_filt, Th, , , , , pm]
TODO: figure out what this function does
TODO: test with oct2py
TODO: rename
| TODO: change call signature to make more pythonic
TODO: use a data structure for the raw data
"""
nt0 = ops.nt0
NT, n_Filt, Th = Params[0:2]
pm = Params[8]
fft_data = np.fft.fft(data,axis=0)
proj = np.fft.i | fft(fft_data * fW[:,:]).real #convolution
proj = np.sum(proj.reshape(NT, n_filt,3), 2)
Ci = proj + (mu * lam).T
Ci = (Ci**2) / (1 + lam.T)
Ci = Ci - (lam*mu**2).T
imax = np.argmax(Ci, axis=1)
mX = Ci[imax]
maX = -my_min(-mX,31,1) # Err... my_min? This function seems odd.
#TODO: convert my_min. or remove?
st = np.where((maX < mX + 1e-3) & (mX>Th**2))
st[st>(NT-nt0)]
imax = imax[st]
x = []
cost = []
nsp = []
if len(imax)>0:
inds = st.T + np.arange(nt0).T
dspk = dataRaw[inds,:].reshape(nt0, len(st), ops.n_chan)
dspk = np.transpose(dspk, [0, 2, 1])
x = np.zeros(len(id))
cost = np.zeros(len(id))
nsp = np.zeros((n_filt, 1))
for j in range(dspk.shape[2]):
dWU[:, :, imax[j]] = pm * dWU[:, :, imin[j]] + (1 - pm) * dspk[:, :, j]
x[j] = proj[st[j], imin[j]]
cost[j] = maX[st[j]]
nsp[imin[j]] = nsp[imin[j]] + 1
imin = imin - 1
return (dWU, st, id, x, cost, nsp)
def reduce_clusters(self, uS, crit):
"""
:param uS: 3d array
TODO: work out what this function does
TODO: test using matlab/oct2py
/mainLoop/reduce_clusters.m
"""
cdot = uS[:, :, 0].T * uS[:, :, 0]
for j in range(us.shape[2]):
cdot += uS[:, :, j].T * uS[:, :, j]
# compute norms of each spike
newNorms = np.sum(np.sum(uS**2, 3), |
def | bar():
| print('bar!')
|
#!/usr/ | bin/python
#coding:utf-8
print 'start to load...'
import sys
name = 42
def func(): pass
class kclass : pass
print 'done | loading.' |
import wx
from service.fit import Fit
import gui.mainFrame
from gui import globalEvents as GE
from .calc.fitRemoveBooster import FitRemoveBoosterCommand
class GuiRemoveBoosterCommand(wx.Command):
def __init__(self, fitID, position):
wx.Command.__init__(self, True, "")
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.sFit = Fit.getInstance()
self.internal_history = wx.CommandProcessor()
self. | fitID = fitID
self.position = position
def Do(self):
if self.internal_history.Submit(FitRemoveBoosterCommand(self.fitID, self.position)):
self.sFit.recalc(self.fitID)
wx.PostEvent(self.ma | inFrame, GE.FitChanged(fitID=self.fitID))
return True
return False
def Undo(self):
for _ in self.internal_history.Commands:
self.internal_history.Undo()
self.sFit.recalc(self.fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=self.fitID))
return True
|
= PypmWrapper.printDeviceSummary
FUNCTIONS = {}
FUNCTION_ARITY = {}
def _add_global(name, arity, v):
globals()[name] = v
FUNCTIONS[v] = name
FUNCTION_ARITY[v] = arity
__all__.append(name)
for i in range(16):
no = i + 1
_add_global('NOTEOFF_CHAN%d' % no, 2, 0x80 + i)
_add_global('NOTEON_CHAN%d' % no, 2, 0x90 + i)
_add_global('POLYA | FTERTOUCH_CHAN%d' % no, 2, 0xA0 + i)
_add_global('CONTROLCHANGE_CHAN%d' % no, 2, 0xB0 + i)
_add_global('PROGRAMCHANGE_CHAN%d' % no, 2, 0xC0 + i)
_add_global('CHANAFTERTOUCH_CHAN%d' % no, 2, 0xD0 + i)
_add_global('PITCHWHEEL_CHAN%d' % no, 2, 0xE0 | + i)
_add_global('SYSTEMEXCL', 2, 0xf0)
_add_global('MTC_QFRAME', 2, 0xf1)
_add_global('SONGPOSPOINTER', 2, 0xf2)
_add_global('SONGSELECT', 2, 0xF3)
_add_global('RESERVED1', 2, 0xF4)
_add_global('RESERVED2', 2, 0xF5)
_add_global('TUNEREQ', 2, 0xF6)
_add_global('EOX', 2, 0xF7)
_add_global('TIMINGCLOCK', 2, 0xF8)
_add_global('RESERVED3', 2, 0xF9)
_add_global('START', 2, 0xFA)
_add_global('CONTINUE', 2, 0xFB)
_add_global('STOP', 2, 0xFC)
_add_global('ACTIVESENSING', 2, 0xFE)
_add_global('SYSTEMRESET', 2, 0xFF)
del _add_global
# pyflakes
START = globals()['START']
TIMINGCLOCK = globals()['TIMINGCLOCK']
class MidiDispatcher(object):
"""
Dispatcher for events received from a midi input channel.
Example usage:
init()
input = getInput(3)
def debug_event(event):
print event
disp = MidiDispatcher(input, [debug_event, NoteOnOffHandler(instr)])
disp.start()
"""
def __init__(self, midiInput, handlers, clock=None):
self.clock = getClock(clock)
self.midiInput = midiInput
self.handlers = handlers
def start(self):
"""
Start the MidiDispatcher - this will schedule an event to call
all it's handlers every tick with any buffered events.
"""
nm = self.clock.meter.nm
n = self.clock.meter.dtt
self._event = self.clock.schedule(self).startAfterTicks(
nm(self.clock.ticks, 1) - self.clock.ticks,
n(1, 96))
def __call__(self):
"""
Call all our handlers with buffered events (max of 32 per call
are processed).
"""
for message in self.midiInput.Read(32):
for call in self.handlers:
call(message)
class MidiHandler(object):
def __call__(self, message):
"""
Parse method and call method on self based on midi function. For
example, if function is NOTEON_CHAN1, this will call our method
noteon(), etc. If a message has a channel as part of it's function,
this will be the first argument. After the first optional channel
argument, remaining positional arguments are passed to the method in
the same order as specified in MIDI. Not all MIDI functions need to be
supplied or implemented in subclass.
"""
packet, timestamp = message
func, arg1, arg2, _pad = packet
args = [arg1, arg2][:FUNCTION_ARITY.get(func, 0)]
args.append(timestamp)
funcname = FUNCTIONS[func]
tokens = funcname.split('_')
if len(tokens) == 2:
type, channel = tokens
channel = int(channel[4:])
method = getattr(self, type.lower(), None)
if method is None:
debug('No handler defined for midi event of type: %s' % type)
method(channel, *args)
def noteon(self, channel, note, velocity, timestamp):
pass
def noteoff(self, channel, note, velocity, timestamp):
pass
class _DummyInstrument:
@classmethod
def playnote(cls, note, velocity):
pass
@classmethod
def stopnote(cls, note):
pass
class MonitorHandler(MidiHandler):
"""
A simple MidiHandler which takes a mapping of channels to instruments.
"""
def __init__(self, instrs):
self.instrs = instrs
def noteon(self, channel, note, velocity, timestamp):
"""
Immediately play instrument at channel with given note and velocity.
The timestamp is ignored. This is a noop if no instrument is mapped
to the given channel.
"""
self.instrs.get(channel, _DummyInstrument).playnote(note, velocity)
def noteoff(self, channel, note, velocity, timestamp):
"""
Immediately stop instrument at channel with given note. The velocity
and timestamp arguments are ignored. This is a noop if no instrument
is mapped to the given channel.
"""
self.instrs.get(channel, _DummyInstrument).stopnote(note)
NoteOnOffHandler = MonitorHandler
class ChordHandler(MidiHandler):
"""
A chord handler is a simple MidiHandler which recognizes chords and sends
to its callback.
todo: Currently this implementation doesn't care about channels; but this
behavior should likely change in the near future.
"""
def __init__(self, callback, sustain=False):
"""
callback: handler to receive chords
sustain: if True, only call our callback with noteon events
"""
self.callback = callback
self.sustain = sustain
self._chord = []
def noteon(self, channel, note, velocity, timestamp):
"""
Add note to chord and call our callback with updated chord.
Note that channel, velocity and timestamp arguments are ignored.
"""
debug('noteon channel=%s note=%s velocity=%s t=%s' % (
channel, note, velocity, timestamp))
if note not in self._chord:
self._chord.append(note)
debug('calling %s' % self.callback)
self.callback(list(self._chord))
def noteoff(self, channel, note, velocity, timestamp):
"""
Remove note from chord.
If the attribute `sustain` is `True` then we do not
call callback with the updated chord.
Note that channel, velocity and timestamp arguments are ignored.
"""
debug('noteoff channel=%s note=%s velocity=%s t=%s' % (
channel, note, velocity, timestamp))
if note in self._chord:
self._chord.remove(note)
if not self.sustain:
debug('calling %s' % self.callback)
self.callback(list(self._chord))
class NoteEventHandler(MidiHandler):
"""
A generic note event handler which sends noteon/noteoff events to some
registered callbacks.
Note that that noteon callback should take two arguments
(note, velocity) and noteoff callback should take one argument (note).
"""
def __init__(self, noteonCallback, noteoffCallback):
self.noteonCallback = noteonCallback
self.noteoffCallback = noteoffCallback
def noteon(self, channel, note, velocity, timestamp):
"""
Call noteonCallback with the note and velocity.
"""
# TODO - maybe do something smarter with the timestamp
# ... like normalize to ticks
self.noteonCallback(note, velocity)
def noteoff(self, channel, note, velocity, timestamp):
"""
Call noteoffCallback with the note.
"""
self.noteoffCallback(note)
class ClockSender(object):
"""
A simple midi beat clock sender which can be used to synchronize external
MIDI devices.
"""
def __init__(self, midiOut, clock=None):
self.clock = getClock(clock)
self.midiOut = midiOut
self._started = False
def start(self):
"""
Start the ClockSender - on the next measure begin sending MIDI beat
clock events. The first run will send a START and TIMINGCLOCK event.
Subsequent calls (24 per quarter note), will send bare TIMINGCLOCK
events.
"""
nm = self.clock.meter.nm
n = self.clock.meter.dtt
self._event = self.clock.schedule(self).startAfterTicks(
nm(self.clock.ticks, 1) - self.clock.ticks,
n(1, 96))
def __call__(self):
# START and TIMINGCLOCK are added to globals during mo |
class EnvironmentNotBootst | rapped(Exception):
def __init__(self, environment):
self.environment = environment
def __s | tr__(self):
return "environment %s is not bootstrapped" % self.environment
|
# Enthought library imports
from traits.api import HasTraits, Int, Bool
from kiva.trait_defs.api import KivaFont
from enable.colors import ColorTrait
class TextFieldStyle(HasTraits):
""" This class holds style settings for rendering an EnableTextField.
fixme: See docstring on EnableBoxStyle
"""
# The color of the text
text_color = ColorTrait((0,0,0,1.0))
# The font for the t | ext (must be monospaced!)
font = KivaFont("Courier 12")
# The color of highlighted text
highlight_color = ColorTrait((.65,0,0,1.0))
# The background color of highlighted items
highlight_bgcolor = ColorTrait("lightgray")
# The font for flagged text (must be monospaced!)
highlight_font = KivaFont("Courier 14 bold")
# The number o | f pixels between each line
line_spacing = Int(3)
# Space to offset text from the widget's border
text_offset = Int(5)
# Cursor properties
cursor_color = ColorTrait((0,0,0,1))
cursor_width = Int(2)
# Drawing properties
border_visible = Bool(False)
border_color = ColorTrait((0,0,0,1))
bgcolor = ColorTrait((1,1,1,1))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
[tests/stdlib/test_cd.py]
Test the cd command.
"""
import unittest
import os
import tempfile
from ergonomica import ergo
class TestCd(unittest.TestCase):
"""Tests the cd command."""
def test_cd(self):
"""
Tests the cd command.
" | ""
olddir = os.getcwd()
newdir = tempfile.mkdtemp()
ergo("cd {}".format(newdir))
| self.assertEqual(os.getcwd(), newdir)
ergo("cd {}".format(olddir))
self.assertEqual(os.getcwd(), olddir)
|
"""Django admin interface for the shopping cart models. """
from django.contrib import admin
from shoppingcart.models import (
Coupon,
CourseRegistrationCodeInvoiceItem,
DonationConfiguration,
Invoice,
InvoiceTransaction,
PaidCourseRegistrationAnnotation
)
class SoftDeleteCouponAdmin(admin.ModelAdmin):
"""
Admin for the Coupon table.
soft-delete on the coupons
"""
fields = ('code', 'description', 'course_id', 'percentage_discount', 'created_by', 'created_at', 'is_active')
raw_id_fields = ("created_by",)
readonly_fields = ('created_at',)
actions = ['really_delete_selected']
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site - used by changelist_view.
"""
qs = super(SoftDeleteCouponAdmin, self).get_queryset(request)
return qs.filter(is_active=True)
def get_actions(self, request):
actions = super(SoftDeleteCouponAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
def really_delete_selected(self, request, queryset):
"""override the default behavior of selected delete method"""
for obj in queryset:
obj.is_active = False
obj.save()
if queryset.count() == 1:
message_bit = "1 coupon entry was"
else:
message_bit = "%s coupon entries were" % queryset.count()
self.message_user(request, "%s successfully deleted." % message_bit)
def delete_model(self, request, obj):
"""override the default behavior of single instance of model delete method"""
obj.is_active = False
obj.save()
really_delete_selected.short_description = "Delete s selected entries"
class CourseRegistrationCodeInvoiceItemInline(admin.StackedInline):
"""Admin for course registration code invoice items.
Displayed inline within the invoice admin UI.
"""
model = CourseRegistrationCodeInvoiceItem
extra = 0
can_delete = False
readonly_fields = (
'qty',
'unit_price',
'currency',
'course_id',
)
def has_add_permission(self, request):
return False
class InvoiceTransactionInline(admin.StackedInline):
"""Admin for invoice transactions.
Displayed inline within the invoice admin UI.
"""
model = InvoiceTransaction
extra = 0
readonly_fields = (
'created',
'modified',
'created_by',
'last_modified_by'
)
class InvoiceAdmin(admin.ModelAdmin):
"""Ad | min for invoices.
This is intended for the internal finance team
to be able to view and update invoice information,
including payments and refunds.
"""
date_hierarchy = 'created'
can_delete = False
readonly_fields = ('created', 'modified')
search_field | s = (
'internal_reference',
'customer_reference_number',
'company_name',
)
fieldsets = (
(
None, {
'fields': (
'internal_reference',
'customer_reference_number',
'created',
'modified',
)
}
),
(
'Billing Information', {
'fields': (
'company_name',
'company_contact_name',
'company_contact_email',
'recipient_name',
'recipient_email',
'address_line_1',
'address_line_2',
'address_line_3',
'city',
'state',
'zip',
'country'
)
}
)
)
readonly_fields = (
'internal_reference',
'customer_reference_number',
'created',
'modified',
'company_name',
'company_contact_name',
'company_contact_email',
'recipient_name',
'recipient_email',
'address_line_1',
'address_line_2',
'address_line_3',
'city',
'state',
'zip',
'country'
)
inlines = [
CourseRegistrationCodeInvoiceItemInline,
InvoiceTransactionInline
]
def save_formset(self, request, form, formset, change):
"""Save the user who created and modified invoice transactions. """
instances = formset.save(commit=False)
for instance in instances:
if isinstance(instance, InvoiceTransaction):
if not hasattr(instance, 'created_by'):
instance.created_by = request.user
instance.last_modified_by = request.user
instance.save()
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(PaidCourseRegistrationAnnotation)
admin.site.register(Coupon, SoftDeleteCouponAdmin)
admin.site.register(DonationConfiguration)
admin.site.register(Invoice, InvoiceAdmin)
|
'''
This module searches the shodan data for IPs using a user-specified https certificate
'''
from common import helpers
class Analytics:
def __init__(self, cli_options):
self.cli_name = "CertSearch"
self.description = "Searches for user-provided HTTPS certificate"
self.https_cert = ''
self.found_ips = []
def analyze(self, all_ip_objects):
if self.https_cert == '':
print "Please provide the HT | TPS certificate you want to search for."
self.https_cert = raw_input(' \n\n[>] HTTPS Cert (including start and end tags): ').strip()
for path, single_ip in all_ip_objects.iteritems():
if single_ip[0].shodan_info is not '' and\
'No available information within Shodan about' not in\
single_ip[0].shodan_info:
for item in single_ip[0].shodan_info['data']:
| if 'opts' in item:
if 'pem' in item['opts']:
if self.https_cert.strip() in item['opts']['pem'].encode('utf-8').replace('\n', '').replace('\r', ''):
self.found_ips.append(single_ip[0].ip_address)
if len(self.found_ips) > 0:
print helpers.color("\nCertificate Found!")
print "===================================="
for ip in self.found_ips:
print helpers.color(ip)
print
else:
print helpers.color("\nCertificate is not found within the currently loaded data!\n", warning=True)
self.https_cert = ''
self.found_ips = []
return
|
import sys
import numpy as np
import cv2
def main():
w, h = map(int, (sys.argv[1] if len(sys.argv) > 1 else '2048x1944').split('x'))
imgfile = sys.argv[2] if len(sys.argv) > 2 else r'D:\Downloads\example-navcam-imgs\navcamTests0619\rubbleML-def-14062019-25.raw'
imgout = sys.argv[3] if len(sys.argv) > 3 else r'D:\Downloads\example-navcam-imgs\navcamTests0619\rubbleML-def-14062019-25.png'
with open(imgfile, 'rb') as fh:
raw_img = np.fromfile(fh, | dtype=np.uint16, count=w * h)
raw_img = raw_img.reshape((h, w))
# flip rows pairwise
final_img = raw_img[:, np.array([(i*2+1, i*2) for i in range(0, w//2)]).flatten()]
cv2.imshow('img', final_img)
cv2.waitKey()
cv2.imwrite(imgout, final_img)
if __name__ == '__main__':
main() | |
from .classification import Clas | sification
from .ranking import | Ranking
|
from srqi.core import inquiry
import matplotlib.pyplot as plt
import numpy as np
def get_accumulation_fig(proc):
fig = plt.figure()
plt.title("Accumulation During Procedure for Patient " + str(proc.PatientID) + " on " + str(proc.StudyDate))
event_starts = [e.DateTime_Started for e in proc.get_events()]
# plot doses
dose_ax = plt.subplot(311)
dose_ax.plot(event_starts,
np.cumsum([e.Dose_RP for e in proc.get_events()])
)
plt.ylabel('Dose (Gy)')
# plot frames
frames_ax = plt.subplot(312, sharex = dose_ax)
frames_ax.plot(event_starts,
np.cumsum([e.Number_of_Pulses for e in proc.get_events()])
)
plt.ylabel('# of Frames')
# plot mag
mag_ax = plt.subplot(313, sharex = dose_ax)
| mag_ax.plot(event_starts,
[e.iiDiameter for e in proc.get_events()])
plt.ylim((200,500))
plt.ylabel('iiDiameter')
# plot the event type on top of the mag plot
a_events = [e for e in proc.get_event | s() if e.Irradiation_Event_Type =='Stationary Acquisition']
s_events= [e for e in proc.get_events() if e.Acquisition_Protocol=='Spot']
f_events = [e for e in proc.get_fluoro_events()]
o_events = [e for e in proc.get_events() if not (e.Irradiation_Event_Type=="Stationary Acquisition" or e.Acquisition_Protocol=="Spot" or e.Irradiation_Event_Type=="Fluoroscopy")]
if len(f_events)>0:
plt.scatter([e.DateTime_Started for e in f_events],
[e.iiDiameter for e in f_events],
marker='+', c='blue')
if len(a_events)>0:
collection = plt.scatter([e.DateTime_Started for e in a_events],
[e.iiDiameter for e in a_events],
marker='o', c='red')
collection.set_edgecolor('red')
if len(s_events)>0:
collection = plt.scatter([e.DateTime_Started for e in s_events],
[e.iiDiameter for e in s_events],
marker='o', c='yellow')
collection.set_edgecolor('yellow')
if len(o_events)>0:
collection = plt.scatter([e.DateTime_Started for e in o_events],
[e.iiDiameter for e in o_events],
marker='o', c='cyan')
collection.set_edgecolor('cyan')
# format xlabels
fig.autofmt_xdate()
return fig
class High_Cases(inquiry.Inquiry):
NAME = "High Cases"
description = """Finds and analyzes cases where the dose exceeds a specified limit
Data required:
DICOM-SR xml
"""
LIMIT = inquiry.Inquiry_Parameter(5.0,"Dose Limit", "The doseage above-which cases should be analyzed")
DATE_RANGE_START = inquiry.get_standard_parameter("DATE_RANGE_START")
DATE_RANGE_END = inquiry.get_standard_parameter("DATE_RANGE_END")
def run(self, procs, context, extra_procs):
high_cases = {}
for proc in procs:
total_dose = sum([e.Dose_RP for e in proc.get_events()])
if total_dose > self.LIMIT.value:
high_cases[proc] = {'total dose' : total_dose}
for proc in high_cases.keys():
high_cases[proc]['acquisition dose'] = sum([e.Dose_RP for e in proc.get_events() if e.Irradiation_Event_Type =='Stationary Acquisition'])
high_cases[proc]['spot dose'] = sum([e.Dose_RP for e in proc.get_events() if e.Acquisition_Protocol=='Spot'])
high_cases[proc]['fluoro dose'] = sum([e.Dose_RP for e in proc.get_fluoro_events()])
high_cases[proc]['acquisition frames'] = sum([e.Number_of_Pulses for e in proc.get_events() if e.Irradiation_Event_Type =='Stationary Acquisition'])
high_cases[proc]['spot frames'] = sum([e.Number_of_Pulses for e in proc.get_events() if e.Acquisition_Protocol=='Spot'])
high_cases[proc]['fluoro frames'] = sum([e.Number_of_Pulses for e in proc.get_fluoro_events()])
high_cases[proc]['total frames'] = sum([e.Number_of_Pulses for e in proc.get_events()])
self.high_cases = high_cases
def get_text(self):
if len(self.high_cases) == 0:
return "No cases exceeding the dose limit found in the specified date range."
else:
return ''
def get_figures(self):
hc = self.high_cases
figs = []
pies = []
for proc in hc.keys():
# Pie chart of dosages by modality
fig = plt.figure()
plt.title("Dose (Gy) By Modality Patient " + str(proc.PatientID) + " on " + str(proc.StudyDate))
def my_autopct(pct):
total=hc[proc]['total dose']
val=pct*total/100.0
return '{p:.2f}% ({v:.3f} Gy)'.format(p=pct,v=val)
other_dose = hc[proc]['total dose'] - hc[proc]['spot dose'] - hc[proc]['acquisition dose'] - hc[proc]['fluoro dose']
if other_dose <0:
other_dose = 0
plt.pie((hc[proc]['acquisition dose'],
hc[proc]['spot dose'],
hc[proc]['fluoro dose'],
other_dose),
labels = ('acquisition','spot','fluoro ', 'other'),
autopct = my_autopct)
figs.append(fig)
# Pie chart of frame counts by modality
fig = plt.figure()
plt.title("Frame Count by Modality for Patient " + str(proc.PatientID) + " on " + str(proc.StudyDate))
def my_autopct(pct):
total=hc[proc]['total frames']
val=pct*total/100.0
return '{p:.2f}% ({v:.0f})'.format(p=pct,v=val)
other_frames = hc[proc]['total frames'] - (hc[proc]['spot frames'] + hc[proc]['acquisition frames'] + hc[proc]['fluoro frames'])
if other_frames < 0:
other_frames = 0
plt.pie((hc[proc]['acquisition frames'],
hc[proc]['spot frames'],
hc[proc]['fluoro frames'],
other_frames),
labels = ('acquisition','spot','fluoro', 'other'),
autopct = my_autopct)
figs.append(fig)
# dose/frame accumulation plot
figs.append(get_accumulation_fig(proc))
return figs
def get_tables(self):
out = []
hc = self.high_cases
for proc in self.high_cases.keys():
heading = ["Patient " + str(proc.PatientID) + " on " + str(proc.StudyDate),
'fluoro','acqusition','spot', 'other','total']
doses = ['Dose (Gy)', hc[proc]['fluoro dose'],
hc[proc]['acquisition dose'],
hc[proc]['spot dose'],
hc[proc]['total dose'] - hc[proc]['acquisition dose'] - hc[proc]['spot dose'] - hc[proc]['fluoro dose'],
hc[proc]['total dose']]
frames = ['Frame Count', hc[proc]['fluoro frames'],
hc[proc]['acquisition frames'],
hc[proc]['spot frames'],
hc[proc]['total frames'] - hc[proc]['spot frames'] - hc[proc]['acquisition frames'] - hc[proc]['fluoro frames'],
hc[proc]['total frames']
]
out.append([heading, doses, frames])
return out
|
def _lsbStrToInt(str):
return ord(str[0]) + (o | rd(str[1]) << 8) + (ord(str[2]) << 16) + (ord(str[3]) << 24)
| |
rver.environment']
== 'production')
tmpl = self.tmpl_lookup.get_template(tmpl_fname)
# Render the template
# If an exception occurs, render an error page showing the traceback
try:
return tmpl.render(**kwargs)
except:
traceback_string = "<h1>IPOL template rendering error</h1>"
traceback_string += "<h2>Template: %s</h2>" % tmpl_fname
traceback_string += "<h2>kwargs: %s</h2>" % kwargs
traceback = RichTraceback()
for (filename, lineno, function, line) in traceback.traceback:
traceback_string += \
"File <b>%s</b>, line <b>%d</b>, in <b>%s</b><br>" % \
(filename, lineno, function)
traceback_string += line + "<br><br>"
traceback_string += "%s: %s" % \
(str(traceback.error.__class__.__name__), \
traceback.error) + "<br>"
return traceback_string
#
# INDEX
#
def index(self):
"""
demo presentation and input menu
"""
# read the | input index as a dict
inputd = config.file_dict(self.input_dir)
tn_size = int(cherrypy.config.get('input.thumbnail.size', '192'))
# TODO: build via list-comprehension
for (input_id, input_info) in inputd.it | ems():
# convert the files to a list of file names
# by splitting at blank characters
# and generate thumbnails and thumbnail urls
fname = input_info['files'].split()
tn_fname = [thumbnail(self.input_dir + f, (tn_size, tn_size))
for f in fname]
inputd[input_id]['url'] = [self.input_url + os.path.basename(f)
for f in fname]
inputd[input_id]['tn_url'] = [self.input_url + os.path.basename(f)
for f in tn_fname]
return self.tmpl_out("input.html",
inputd=inputd)
#
# INPUT HANDLING TOOLS
#
def save_image(self, im, fullpath):
'''
Save image object given full path
'''
im.save(fullpath)
def convert_and_resize(self, im):
'''
Convert and resize an image object
'''
im.convert(self.input_dtype)
# check max size
resize = self.input_max_pixels and prod(im.size) > self.input_max_pixels
if resize:
self.log("input resize")
im.resize(self.input_max_pixels)
def process_input(self):
"""
pre-process the input data
"""
msg = None
for i in range(self.input_nb):
# open the file as an image
try:
im = image(self.work_dir + 'input_%i' % i)
except IOError:
raise cherrypy.HTTPError(400, # Bad Request
"Bad input file")
threads = []
# convert to the expected input format
im_converted = im.clone()
threads.append(threading.Thread(target=self.convert_and_resize, args = (im_converted, )))
# Save the original file as PNG
#
# Do a check before security attempting copy.
# If the check fails, do a save instead
if im.im.format != "PNG" or \
im.size[0] > 20000 or \
im.size[1] > 20000 or \
len(im.im.getbands()) > 4:
# Save as PNG (slow)
threads.append(threading.Thread(target=self.save_image,
args = (im, self.work_dir + 'input_%i.orig.png' % i)))
else:
# Copy file (fast)
shutil.copy(self.work_dir + 'input_%i' % i,
self.work_dir + 'input_%i.orig.png' % i)
# Execute threads and wait for them
for t in threads:
t.start()
for t in threads:
t.join()
threads = []
# save a working copy:
if self.input_ext != ".png":
threads.append(threading.Thread(target=self.save_image,
args = (im_converted, self.work_dir + 'input_%i' % i + self.input_ext)))
# save a web viewable copy
threads.append(threading.Thread(target=self.save_image,
args = (im_converted, self.work_dir + 'input_%i.png' % i)))
# Execute threads and wait for them
for t in threads:
t.start()
for t in threads:
t.join()
# delete the original
os.unlink(self.work_dir + 'input_%i' % i)
if im.size != im_converted.size:
msg = "The image has been resized for a reduced computation time."
return msg
def clone_input(self):
"""
clone the input for a re-run of the algo
"""
self.log("cloning input from %s" % self.key)
# get a new key
old_work_dir = self.work_dir
old_cfg_meta = self.cfg['meta']
self.new_key()
self.init_cfg()
# copy the input files
fnames = ['input_%i' % i + self.input_ext
for i in range(self.input_nb)]
fnames += ['input_%i.png' % i
for i in range(self.input_nb)]
fnames += ['input_%i.orig.png' %i
for i in range(self.input_nb)]
for fname in fnames:
shutil.copy(old_work_dir + fname,
self.work_dir + fname)
# copy cfg
self.cfg['meta'].update(old_cfg_meta)
self.cfg.save()
return
#
# INPUT STEP
#
def input_select_callback(self, fnames):
'''
Callback for the users to give the opportunity
to process non-standard input
'''
pass # May be redefined by the subclass
def input_select(self, **kwargs):
"""
use the selected available input images
"""
# When we arrive here, self.key should be empty.
# If not, it means that another execution is concurrent.
# In that case, we need to clone the app object
key_is_empty = (self.key == "")
if key_is_empty:
self2 = base_app(self.base_dir)
self2.__class__ = self.__class__
self2.__dict__.update(self.__dict__)
else:
self2 = self
self2.new_key()
self2.init_cfg()
# Add to app object pool
if key_is_empty:
pool = AppPool.get_instance() # Singleton pattern
pool.add_app(self2.key, self2)
# kwargs contains input_id.x and input_id.y
input_id = kwargs.keys()[0].split('.')[0]
assert input_id == kwargs.keys()[1].split('.')[0]
# get the images
input_dict = config.file_dict(self2.input_dir)
fnames = input_dict[input_id]['files'].split()
for i in range(len(fnames)):
shutil.copy(self2.input_dir + fnames[i],
self2.work_dir + 'input_%i' % i)
msg = self2.process_input()
self2.log("input selected : %s" % input_id)
self2.cfg['meta']['original'] = False
self2.cfg.save()
# Let users copy non-standard input into the work dir
self2.input_select_callback(fnames)
# jump to the params page
return self2.params(msg=msg, key=self2.key)
def input_upload(self, **kwargs):
"""
use the uploaded input images
"""
self.new_key()
self.init_cfg()
for i in range(self.input_nb):
file_up = kwargs['file_%i' % i]
file_save = file(self.work_dir + 'input_%i' % i, 'wb')
if '' == file_up.filename:
# missing file
raise cherrypy.HTTPError(400, # Bad Request
"Missing input file")
size = 0
while True:
|
import random
import numpy as np
import tensorflow as tf
from language_model import LM
from hparams import HParams
def get_test_hparams():
return HParams(
batch_size=21,
num_steps=12,
num_shards=2,
num_layers=1,
learning_rate=0.2,
max_grad_norm=1.0,
vocab_size=1000,
emb_size=14,
state_size=17,
projected_size=15,
num_sampled=500,
num_gpus=1,
average_params=True,
run_profiler=False,
)
def simple_data_generator(batch_size, num_steps):
x = np.zeros([batch_size, num_steps], np.int32)
y = n | p.zeros([batch_size, num_steps], np.int32)
for i in range(batch_size):
first = random.randrange(0, 20)
for j in range(num_steps):
x[i, j] = first + j
y[i, j] = first + j + 1
return x, y, np.ones([batch_size, num_steps], np.uint8)
class TestLM(tf.test.test_util.TensorF | lowTestCase):
def test_lm(self):
hps = get_test_hparams()
with tf.variable_scope("model"):
model = LM(hps)
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_local_variables().run()
loss = 1e5
for i in range(50):
x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
if np.isnan(loss):
print("NaN detected")
break
self.assertLess(loss, 1.0)
|
from flask import render_template
from . import auth
@auth.app_errorhandler(403)
def page_not_found(e):
return render_template('403.html'), 403
@auth.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 4 | 04
@auth.app_errorhandler( | 500)
def internal_server_error(e):
return render_template('500.html'), 500
|
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
# Here's a UDP version of the simplest possible protocol
cl | ass EchoUDP(DatagramProtocol):
def datagramReceived(self, datagram, address):
| self.transport.write(datagram, address)
def main():
reactor.listenUDP(8000, EchoUDP())
reactor.run()
if __name__ == '__main__':
main()
|
umerator, expected_denominator)
self.assertEqual(expected_repr, repr(r))
def test_float(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_value = expected_numerator / (expected_denominator * 1.0)
self.assertEqual(expected_value, float(r))
def test_int(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_value = expected_numerator // expected_denominator
self.assertEqual(expected_value, int(r))
def test_neg(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = -Rational(numerator, denominator)
self.assertEqual(-expected_numerator, r.numerator)
self.assertEqual(expected_denominator, r.denominator)
def test_pos(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = +Rational(numerator, denominator)
self.assertEqual(expected_numerator, r.numerator)
self.assertEqual(expected_denominator, r.denominator)
def test_abs(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = abs(Rational(numerator, denominator))
self.assertEqual(abs(expected_numerator), r.numerator)
self.assertEqual(expected_denominator, r.denominator)
def test_invert_zero_division_error(self):
r = Rational(0)
with self.assertRaises(ZeroDivisionError):
~r
def test_invert(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = ~Rational(numerator, denominator)
if 0 > expected_numerator:
expected_inverted_numerator = -expected_denominator
expected_inverted_denominator = -expected_numerator
else:
expected_inverted_numerator = expected_denominator
expected_inverted_denominator = expected_numerator
self.assertEqual(expected_inverted_numerator, r.numerator)
self.assertEqual(expected_inverted_denominator, r.denominator)
def test_lt(self):
true_test_cases = [(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4))]
false_test_cases = [(Rational(), Rational()),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 < r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 < r2)
def test_le(self):
true_test_cases = [(Rational(), Rational()),
(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4))]
false_test_cases = [(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 <= r2)
for r1, r2 in false_test_cases:
with self.subTest( | r1=r1, r2=r2, result=False):
self.assertFalse(r1 <= r2)
def test_eq(self):
true_test_cases = [(Rational(), Rational()),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4))]
false_test_cases = [(Rational(-1, 2), Rational()) | ,
(Rational(), Rational(1, 2)),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 == r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 == r2)
def test_ne(self):
true_test_cases = [(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
false_test_cases = [(Rational(), Rational()),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 != r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 != r2)
def test_ge(self):
true_test_cases = [(Rational(), Rational()),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
false_test_cases = [(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4))]
for r1, r2 in true_test_cases: |
# Copyright (c) 2020 Bartosz Szczesny <bszcz@bszcz.org>
# This program is free software under the MIT license.
print('\n# avoid new line at the beginning')
s = """\
test
"""
print(s)
print('\n# string are immutable')
s = 'string'
try:
s[1] = 'p'
except TypeError as e:
print(e)
print('\n# enumerate() function')
for n, c in enumerate(['a', 'b', 'c']):
print(n, c)
print('\n# list() is an iterator')
print(list(range(10)))
print('\n# else clause in loops')
for i in range(10):
if n == 2:
break
else:
print('loop did not break')
print('\n# docstrings')
def documented():
"This function is documented."
pass
# now can run: help(documented)
print(documented.__doc__)
print('\n# unpacking arguments')
def unpack(n, c):
print('unpacked:', n, c)
arg_list = [1, 'a']
arg_dict = {'n': 1, 'c': 'a'}
unpack(*arg_list)
unpack(**arg_dict)
print('\n# function annotations')
def annotated(i: int, s: str) -> str:
return 's'
print(annotated.__annotations__)
print('\n# not feeling myself')
class NotSelf():
def __init__(o, n):
o.n = n
def get_n(o):
return o.n
ns = NotSelf(10)
print(ns.get_n())
print('\n# lists operations')
print("""\
a = list()
a.copy() => a[:] # return shallow copy
a.clear() => del a[:]
a.append(item) => a[len(a):] = [item]
a.extend(iterable) => a[len(a):] = iterable
""")
print('\n# set comprehension')
a = 'abracadabra'
s = {x for x in a}
print(a, '->', s)
print('\n# keys can be any immutable type')
d = dict()
d[('a', 1)] = 100
d[('b', 2)] = 200
print(d)
print('\n# dictionary comprehension')
d = {x: 'got ' + str(x) for x in range(3)}
print(d)
print('\n# simple strings as keys')
d = dict(a=1, b=2, c=3)
print(d)
print('\n# reversed() function')
a = reversed(range(10)) # iterator
print(list(a))
print('\n# reload import')
# reload a module without
# restarting the interpreter
# or an already running script
import math
import importlib
importlib.reload(math)
print('\n# dir() function')
import builtins
print(dir()) # currently defined
print()
print(dir(math)) # defined by the module
print()
print(dir(builtins)) # build-in objects
print('\n# string formatting')
c = 299_792_458
print(f'Speed of light is {c} m/s.')
print('Speed of light is {c:.0f} km/s.'.format(c=c/1000))
pi = 3.14159
print(f'Pi is {pi:.2f}.')
d = {'a': 1, 'b': 2}
print('A: {a}, B: {b}.'.format(**d))
print('\n# exceptions')
class E1(Exception):
pass
class E2(E1):
pass
for e in [E1, E2, Exception]:
try:
raise e # no need for ()
except E1: # will catch E2 as well
print('E1.')
except E2:
print('E2.')
except: # will catch anything
print('Exception.')
finally:
print('Finally.')
print()
try:
pass
except:
pass
else: # if not exception raised
print('No exception.')
finally:
print('Finally.')
print()
try:
try:
raise E1
except E2:
print('E2.')
except: # will catch anything
raise # re-raise
finally:
print('Finally (E2).')
except E1:
print('E1.')
finally:
print('Finally (E1).')
print('\n# global and nonlocal scope')
def scope_test():
def do_local():
s = 'local'
def do_nonlocal():
nonlocal s
s = 'nonlocal'
def do_global():
global s
s = 'global'
s = 's'
do_local()
print(s)
do_nonlocal()
print(s)
do_global()
print(s)
scope_test()
print(s)
print('\n# instance and subclass')
print(isinstance(1, int))
print(isinstance(1.0, int))
print(issubclass(bool, int))
print('\n# struct')
class Struct:
pass
s = Struct()
s.x = 1
s.y = 2
print(s.x, s.y)
print('\n# generator')
def rev(s):
for i in range(len(s) - 1, -1, -1):
yield s[i]
for c in rev('abc'):
print(c)
print('\n# generator expression')
# like list comprehension
# but with parenthe | ses
s = sum(i * i for i in range(10))
print(s)
print('\n# regex')
import re
# can use \1 in regex string
r = re.sub(r'([0-9]) \1', r'\1', '1 2 2 3 3 3')
print(r)
print('\n# array')
# store numbers of the same type efficiently
import sys
from array import array
l = list([1, 2, 3, 4, 5])
a = array('B', [1, 2, 3, 4, 5]) # B - unsigned byte
print(sys.getsizeof(l))
print(sys.getsizeof(a))
print('\n# float as ratio')
p | i = 3.14159
print(pi.as_integer_ratio())
print('\n# float as hex')
pi = 3.14159
print(pi.hex())
print(float.fromhex('0x1.921f9f01b866ep+1'))
print('\n# precise sum')
a = [0.3, 0.3, 0.3, 0.1]
print(sum(a) == 1)
print(math.fsum(a) == 1)
|
# coding: utf-8
from setuptools import setup, find_packages
setup(
name='tc_librato',
version="0 | .0.1",
description='Thumbor Librato extensions',
author='Peter Schröder, Sebastian Eichner',
author_email='peter.schroeder@jimdo.com, sebastian.eichner@jimdo.com',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=[
| 'thumbor',
'librato-metrics',
]
)
|
# coding=utf-8
fr | om django import http
from django.conf import settings
from django.contrib.auth import authenticate
from django.uti | ls.cache import patch_vary_headers
from registry.exceptions import RegistryException
__author__ = 'pivstone'
class CustomHeaderMiddleware(object):
"""
增加自定头
"""
def process_response(self, request, response):
for key, value in settings.CUSTOM_HEADERS.items():
response[key] = value
return response
class ExceptionsHandleMiddleware(object):
"""
统一错误返回
"""
def process_exception(self, request, exception):
if isinstance(exception, RegistryException):
response = http.JsonResponse(status=exception.status, data=exception.errors())
return response
|
import os
import sys
import signal
import time
import subprocess
WHO = None
def handler(signum, frame):
global WHO
print('Signal handler', signum, WHO, frame)
print('Disable handler', signum, WHO, frame)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def main(argv):
global WHO
WHO = argv[1]
if WHO == 'parent':
signal.signal(signal.SIGINT, handler)
p = subprocess.Popen('python3 signal_propagation.py child',
shell=True)
for index in range(0, 10):
time.sleep(1)
print('Sleep', | index, WHO)
if WHO == 'parent':
p.send_signal(signal.SIGINT)
p.communicate()
else:
while True:
time.sleep(1)
print('S | leep 1 infinity')
if __name__ == '__main__':
main(sys.argv)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-14 12:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comissoes', '0018_auto_20180924_1724'),
| ]
operations = [
migrations.AlterField(
model_name='reuniao',
| name='hora_fim',
field=models.TimeField(blank=True, null=True, verbose_name='Horário de Término (hh:mm)'),
),
]
|
# Exceptions used throughout feedparser
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRE | CT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN AN | Y WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__all__ = [
'ThingsNobodyCaresAboutButMe',
'CharacterEncodingOverride',
'CharacterEncodingUnknown',
'NonXMLContentType',
'UndeclaredNamespace',
]
class ThingsNobodyCaresAboutButMe(Exception):
pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe):
pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe):
pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe):
pass
class UndeclaredNamespace(Exception):
pass
|
# -*- coding: utf-8 -*-
ifac | e = | None
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Flash'
db.create_table('cmsplugin_flash', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
| ('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('width', self.gf('django.db.models.fields.CharField')(max_length=6)),
('height', self.gf('django.db.models.fields | .CharField')(max_length=6)),
))
db.send_create_signal('flash', ['Flash'])
def backwards(self, orm):
# Deleting model 'Flash'
db.delete_table('cmsplugin_flash')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'flash.flash': {
'Meta': {'object_name': 'Flash', 'db_table': "'cmsplugin_flash'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'width': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['flash'] |
#!/usr/bin/python
#
# linearize.py: Construct a linear, no-fork, best version of the blockchain.
#
#
# Copyright (c) 2013 The Carboncoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import htt | plib
import sys
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
class CarboncoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID | += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def getblock(rpc, settings, n):
hash = rpc.getblockhash(n)
hexdata = rpc.getblock(hash, False)
data = hexdata.decode('hex')
return data
def get_blocks(settings):
rpc = CarboncoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
outf = open(settings['output'], 'ab')
for height in xrange(settings['min_height'], settings['max_height']+1):
data = getblock(rpc, settings, height)
outhdr = settings['netmagic']
outhdr += struct.pack("<i", len(data))
outf.write(outhdr)
outf.write(data)
if (height % 1000) == 0:
sys.stdout.write("Wrote block " + str(height) + "\n")
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'output' not in settings:
settings['output'] = 'bootstrap.dat'
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 279000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_blocks(settings)
|
if h:
for i in range(16): x[i] += y[i]
for i in range(4):
x[5*i] -= ld32(c[4*i:])
x[6+i] -= ld32(in_[4*i:])
for i in range(4):
out[4*i:] = st32(out[4*i:], x[5*i])
out[16 + 4*i:] = st32(out[16 + 4*i:], x[6 + i])
else:
for i in range(16):
out[4*i:] = st32(out[4*i:], x[i] + y[i])
def crypto_core_salsa20_tweet(out, in_, k, c):
'''int crypto_core_salsa20_tweet(u8*out, const u8*in, const u8*k, const u8*c)'''
core(out, in_, k, c, False)
return 0
def crypto_core_hsalsa20_tweet(out, in_, k, c):
'''int crypto_core_hsalsa20_tweet(u8*out, const u8*in, const u8*k, const u8*c)'''
core(out, in_, k, c, True)
return 0
sigma = IntArray(u8, size=16, init=b'expand 32-byte k')
def crypto_stream_salsa20_tweet_xor(c, m, b, n, k):
'''int crypto_stream_salsa20_tweet_xor(u8*c, const u8*m, u64 b, const u8*n, const u8*k)'''
z = IntArray(u8, size=16)
x = IntArray(u8, size=64)
if not b: return 0
for i in range(8): z[i] = n[i]
c_off = 0 ; m_off = 0
while b >= 64:
crypto_core_salsa20_tweet(x, z, k, sigma)
for i in range(64): c[i + c_off] = (m[i + m_off] if m else 0) ^ x[i]
u = u32(1)
for i in range(8, 16):
u += u32(z[i])
z[i] = u
u >>= 8
b -= 64
c_off += 64
if m: m_off += 64
if b:
crypto_core_salsa20_tweet(x, z, k, sigma)
for i in range(b): c[i + c_off] = (m[i + m_off] if m else 0) ^ x[i]
return 0
def crypto_stream_salsa20_tweet(c, d, n, k):
'''int crypto_stream_salsa20_tweet(u8*c, u64 d, const u8*n, const u8*k)'''
return crypto_stream_salsa20_tweet_xor(c, IntArray(u8), d, n, k)
def crypto_stream_xsalsa20_tweet(c, d, n, k):
'''int crypto_stream_xsalsa20_tweet(u8*c, u64 d, const u8*n, const u8*k)'''
s = IntArray(u8, size=32)
crypto_core_hsalsa20_tweet(s, n, k, sigma)
return crypto_stream_salsa20_tweet(c, d, n[16:], s)
def crypto_stream_xsalsa20_tweet_xor(c, m, d, n, k):
'''int crypto_stream_xsalsa20_tweet_xor(u8*c, const u8*m, u64 d, const u8*n, const u8*k)'''
s = IntArray(u8, size=32)
crypto_core_hsalsa20_tweet(s, n, k, sigma)
return crypto_stream_salsa20_tweet_xor(c, m, d, n[16:], s)
def add1305(h, c):
'''void add1305(u32*h, const u32*c)'''
u = u32()
for j in range(17):
u += u32(h[j] + c[j])
h[j] = u & 255
u >>= 8
minusp = IntArray(u32, size=17, init=(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252))
def crypto_onetimeauth_poly1305_tweet(out, m, n, k):
'''int crypto_onetimeauth_poly1305_tweet(u8*out, const u8*m, u64 n, const u8*k)'''
s = u32()
u = u32()
x = IntArray(u32, size=17)
r = IntArray(u32, size=17)
h = IntArray(u32, size=17)
c = IntArray(u32, size=17)
g = IntArray(u32, size=17)
for j in range(16): r[j] = k[j]
r[3] &= 15
r[4] &= 252
r[7] &= 15
r[8] &= 252
r[11] &= 15
r[12] &= 252
r[15] &= 15
while n > 0:
c[:] = 17*[u32()]
for j in range(16):
if j >= n: j -= 1 ; break
c[j] = m[j]
j | += 1
c[j] = 1
m = m[j:]; n -= j
add1305(h, c)
for i in range(17):
x[i] = 0
for j in range(17): x[i] += h[j]*(r[i - j] if j <= i else 320*r[i + 17 - j])
for i in range(17): h[i] = x[i]
u = 0
for j | in range(16):
u += h[j]
h[j] = u & 255
u >>= 8
u += h[16]; h[16] = u & 3
u = 5*(u >> 2)
for j in range(16):
u += h[j]
h[j] = u & 255
u >>= 8
u += h[16]; h[16] = u
for j in range(17): g[j] = h[j]
add1305(h, minusp)
s = -(h[16] >> 7)
for j in range(17): h[j] ^= s & (g[j] ^ h[j])
for j in range(16): c[j] = k[j + 16]
c[16] = 0
add1305(h, c)
for j in range(16): out[j] = h[j]
return 0
def crypto_onetimeauth_poly1305_tweet_verify(h, m, n, k):
'''int crypto_onetimeauth_poly1305_tweet_verify(const u8*h, const u8*m, u64 n, const u8*k)'''
x = IntArray(u8, size=16)
crypto_onetimeauth_poly1305_tweet(x, m, n, k)
return crypto_verify_16_tweet(h, x)
def crypto_secretbox_xsalsa20poly1305_tweet(c, m, d, n, k):
'''int crypto_secretbox_xsalsa20poly1305_tweet(u8*c, const u8*m, u64 d, const u8*n, const u8*k)'''
if d < 32: return -1
crypto_stream_xsalsa20_tweet_xor(c, m, d, n, k)
c_out = c[16:]
crypto_onetimeauth_poly1305_tweet(c_out, c[32:], d - 32, c)
c[16:] = c_out
c[:16] = 16*[u8()]
return 0
def crypto_secretbox_xsalsa20poly1305_tweet_open(m, c, d, n, k):
'''int crypto_secretbox_xsalsa20poly1305_tweet_open(u8*m, const u8*c, u64 d, const u8*n, const u8*k)'''
x = IntArray(u8, size=32)
if d < 32: return -1
crypto_stream_xsalsa20_tweet(x, 32, n, k)
if crypto_onetimeauth_poly1305_tweet_verify(c[16:], c[32:], d - 32, x) != 0: return -1
crypto_stream_xsalsa20_tweet_xor(m, c, d, n, k)
m[:] = 32*[u8()]
return 0
def set25519(r, a):
'''void set25519(gf r, const gf a)'''
for i in range(16): r[i] = a[i]
def car25519(o):
'''void car25519(gf o)'''
c = i64()
for i in range(16):
o[i] += (i64(1) << 16)
c = o[i] >> 16
o[(i + 1)*(i < 15)] += c - 1 + 37*(c - 1)*(i == 15)
o[i] -= c << 16
def sel25519(p, q, b):
'''void sel25519(gf p, gf q, int b)'''
t = i64()
c = i64(~(b - 1))
for i in range(16):
t = c & (p[i] ^ q[i])
p[i] ^= t
q[i] ^= t
return p, q
def pack25519(o, n):
'''void pack25519(u8*o, const gf n)'''
b = int()
m = gf()
t = gf()
for i in range(16): t[i] = n[i]
car25519(t)
car25519(t)
car25519(t)
for j in range(2):
m[0] = t[0] - 0xffed
for i in range(1,15):
m[i] = t[i] - 0xffff - ((m[i - 1] >> 16) & 1)
m[i-1] &= 0xffff
m[15] = t[15] - 0x7fff - ((m[14] >> 16) & 1)
b = (m[15] >> 16) & 1
m[14] &= 0xffff
sel25519(t, m, 1 - b)
for i in range(16):
o[2*i] = t[i] & 0xff
o[2*i + 1] = t[i] >> 8
def neq25519(a, b):
'''int neq25519(const gf a, const gf b)'''
c = IntArray(u8, size=32)
d = IntArray(u8, size=32)
pack25519(c, a)
pack25519(d, b)
return crypto_verify_32_tweet(c, d)
def par25519(a):
'''u8 par25519(const gf a)'''
d = IntArray(u8, size=32)
pack25519(d, a)
return d[0] & 1
def unpack25519(o, n):
'''void unpack25519(gf o, const u8*n)'''
for i in range(16): o[i] = n[2*i] + (i64(n[2*i + 1]) << 8)
o[15] &= 0x7fff
def A(o, a, b):
'''void A(gf o, const gf a, const gf b)'''
for i in range(16): o[i] = a[i] + b[i]
def Z(o, a, b):
'''void Z(gf o, const gf a, const gf b)'''
for i in range(16): o[i] = a[i] - b[i]
def M(o, a, b):
'''void M(gf o, const gf a, const gf b)'''
t = IntArray(i64, size=31)
for i in range(16):
for j in range(16): t[i + j] += a[i]*b[j]
for i in range(15): t[i] += 38*t[i + 16]
for i in range(16): o[i] = t[i]
car25519(o)
car25519(o)
return o
def S(o, a):
'''void S(gf o, const gf a)'''
M(o, a, a)
def inv25519(o, i):
'''void inv25519(gf o, const gf i)'''
c = gf()
for a in range(16): c[a] = i[a]
for a in range(253, -1, -1):
S(c, c)
if a != 2 and a != 4: M(c, c, i)
for a in range(16): o[a] = c[a]
return o
def pow2523(o, i):
'''void pow2523(gf o, const gf i)'''
c = gf()
for a in range(16): c[a] = i[a]
for a in range(250, -1, -1):
S(c, c)
if a != 1: M(c, c, i)
for a in range(16): o[a] = c[a]
def crypto_scalarmult_curve25519_tweet(q, n, p):
'''int crypto_scalarmult_curve25519_tweet(u8*q, const u8*n, const u8*p)'''
z = IntArray(u8, size=32)
x = IntArray(i64, size=80)
r = i64()
a = gf()
b = gf()
c = gf()
d = gf()
e = gf()
f = gf()
for i in range(31): z[i] = n[i]
z[31] = (n[31] & 127) | 64
z[0] &= 248
unpack25519(x, p)
for |
ion.remove(component)
else:
selection.append(component)
def setVerifiedValue(self, row, field, value):
"""Sets the *value* for *field* in the parameter
indexed by *row*, only if the value is within set limits
:param row: the ith parameter number
:type row: int
:param field: detail of the parameter to set
:type field: str
:param value: pre-scaled value to assign to field
"""
if self._parameters[row]['parameter'] == 'filename':
return # cannot be set this way?
if field == 'parameter':
self.setParamValue(row, parameter=value)
elif field in ['start', 'stop', 'step']:
if self.checkLimits(row, value):
kwd = {field : value}
self.setParamValue(row, **kwd)
def setParamValue(self, row, **kwargs):
"""Sets the arguments as field=val for parameter
indexed by *row*
:param row: the ith parameter number
:type row: int
"""
param = self._parameters[row]
for key, val in kwargs.items():
param[key] = val
def paramValue(self, row, field):
"""Gets the value for *field* for parameter indexed by
*row*
:param row: the ith parameter number
:type row: int
:param field: detail of the parameter to set
:type field: str
:returns: value -- type appropriate to parameter
"""
if field == 'nsteps':
return self.numSteps(row)
if field in ['start', 'stop', 'step'] and self._parameters[row]['parameter'] == 'filename':
return '-'
else:
param = self._parameters[row]
return param[field]
def overwriteParam(self, row, param):
"""Assigns *param* to index *row*, overwritting the
parameter at that location
:param row: the ith parameter number
:type row: int
:param param: parameter to set
:type param: dict
"""
if row == -1:
row = self.nrows() - 1
self._parameters[row] = param
def numSteps(self, row):
"""Gets the number of steps for the parameter at
index *row* will yeild
"""
param = self._parameters[row]
return self.nStepsForParam(param)
def nStepsForParam(self, param):
"""Gets the number of steps *parameter* will yeild
:param param: parameter to get the expansion count for
:type param: dict
"""
if param['parameter'] == 'filename':
return len(param['names'])
else:
if param['step'] > 0:
if abs(param['start'] - param['stop']) < param['step']:
return 0
# print 'range', param['start'] - param['stop']
nsteps = np.around(abs(param['start'] - param['stop']), 4) / float(param['step'])
nsteps = int(np.ceil(nsteps)+1)
elif param['start'] == param['stop']:
nsteps = 1
else:
nsteps = 0
return nsteps
def getDetail(self, row, detail_field):
"""Gets the value of the detail *detail_field* of paramter
at index *row* from its selected components `auto_details`.
All of the selected components value for *detail_field* must
match
:param row: the ith parameter number
:type row: int
:param detail_field: auto_details member key
:type detail_field: str
:returns: value type appropriate for parameter
"""
param = self._parameters[row]
param_type = param['parameter']
components = param['selection']
if len(components) == 0 or param_type == '':
return None
# all components must match
matching_details = []
# for comp in components:
for comp in components:
alldetails = comp.auto_details()
if not param_type in alldetails:
# self.hintRequested.emit('INCOMPATABLE COMPONENTS FOR PARAMETER TYPE {}'.format(param_type))
return None
details = alldetails[param_type]
matching_details.append(details[detail_field])
matching_details = set(matching_details)
if len(matching_details) > 1:
print 'Components with mis-matched units!'
return None
return matching_details.pop()
def isFieldValid(self, row, field):
"""Verifies the value in *field* for parameter at index
*row*
:param row: the ith parameter number
:type row: int
:param field: detail of the parameter to check
:type field: str
:returns: bool -- True if valid
"""
param = self._parameters[row]
if param['parameter'] == '':
return False
if field == 'nsteps':
return self.numSteps(row) != 0
if param['parameter'] == 'filename':
# do something here... check filenames?
return True
if field == 'parameter':
return True
# else check that value is between min and max allowed
return self.checkLimits(row, param[field])
def findFileParam(self, comp):
"""Finds the filename auto-parameter that component *comp* is
in, and returns all the filenames for that parameter. Notes this
assumes that *comp* will only be in a single filename auto-parameter.
:param comp: Component to search parameter membership for
:type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:returns: list<str> -- filenames the found parameter will loop through
"""
for p in self._parameters:
if p['parameter'] == 'filename' and comp in p['selection']:
return p['names']
def checkLimits(self, row, value):
"""Check that *value* is within the minimum and maximum allowable
range for the parameter at index *row*
:param row: the ith parameter number
:type row: int
:param value: the candidate value to for start or stop fields
:returns: bool -- True if *value* within range
"""
# extract the selected component names
param = self._parameters[row]
components = param['selection']
if len(components) == 0:
return False
ptype = param['parameter']
mins = []
maxs = []
for comp in components:
# get the limit details for the currently selected parameter type
try:
details = comp.auto_details()[ptype]
mins.append(details['min'])
maxs.append(details['max'])
except KeyError:
raise
return False
lower = max(mins)
upper = min(maxs)
if lower <= value <= upper:
return True
else:
# print 'value out of bounds:'
# print 'lower', lower, 'upper', upper, 'value', value
return False
|
def setParameterList(self, paramlist):
"""Clears and sets all parameters to *paramlist*
:param paramlist: all parameters for this model to have
:type paramlist: list<dict>
"""
self._parameters = paramlist
def insertRow(self, position):
"""Inserts an em | pty parameter at index *position*
:param position: order to insert new parameter to
:type position: int
"""
if position == -1:
position = self.nrows()
defaultparam = { 'start': 0,
'step': 0,
'stop': 0,
'parameter': '',
'selection' : [],
}
self._parameters.insert(position, defaultparam)
def removeRow(self, position):
"""Removes the parameter at index *position*
:param position: the parameter index
:type position: int
:returns: dict -- the remov |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('Alarma', '0004_auto_20151025_0206'),
]
operations = [
migrations.CreateModel(
name='DjkombuMessage',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('visible', models.BooleanField( | )),
('sent_at', models.DateTimeField(null=True, blank=True)),
('payload', models.TextField()),
| ],
options={
'db_table': 'djkombu_message',
'managed': False,
},
),
migrations.CreateModel(
name='DjkombuQueue',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(unique=True, max_length=200)),
],
options={
'db_table': 'djkombu_queue',
'managed': False,
},
),
]
|
btitlesgr, title_query),
executor.submit(self.vipsubs, title_query),
executor.submit(self.podnapisi, title_query),
executor.submit(self.subtitlesgr, season_episode_query)
]
)
elif year != '': # movie
query = '{0} ({1})'.format(title, year)
threads = [
executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query),
executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query)
]
else: # file
query, year = control.cleanmovietitle(title)
if year != '':
query = '{0} ({1})'.format(query, year)
threads = [
executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query),
executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, | query)
]
for future in concurrent_futures.as_completed(threads):
item = future.result()
if not item:
| continue
self.list.extend(item)
if not dup_removal:
log_debug('Query used for subtitles search: ' + query)
self.query = query
self.query = py3_dec(self.query)
else: # Manual query
with concurrent_futures.ThreadPoolExecutor(5) as executor:
query = match_title = py3_dec(query)
threads = [
executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query),
executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query)
]
for future in concurrent_futures.as_completed(threads):
item = future.result()
if not item:
continue
self.list.extend(item)
if len(self.list) == 0:
control.directory(self.syshandle)
return
f = []
# noinspection PyUnresolvedReferences
f += [i for i in self.list if i['source'] == 'xsubstv']
f += [i for i in self.list if i['source'] == 'subtitlesgr']
f += [i for i in self.list if i['source'] == 'podnapisi']
f += [i for i in self.list if i['source'] == 'vipsubs']
self.list = f
if dup_removal:
self.list = [dict(t) for t in {tuple(d.items()) for d in self.list}]
for i in self.list:
try:
if i['source'] == 'xsubstv':
i['name'] = u'[xsubstv] {0}'.format(i['name'])
elif i['source'] == 'podnapisi':
i['name'] = u'[podnapisi] {0}'.format(i['name'])
elif i['source'] == 'vipsubs':
i['name'] = u'[vipsubs] {0}'.format(i['name'])
except Exception:
pass
if control.setting('sorting') == '1':
key = 'source'
elif control.setting('sorting') == '2':
key = 'downloads'
elif control.setting('sorting') == '3':
key = 'rating'
else:
key = 'title'
self.list = sorted(self.list, key=lambda k: k[key].lower(), reverse=control.setting('sorting') in ['1', '2', '3'])
for i in self.list:
u = {'action': 'download', 'url': i['url'], 'source': i['source']}
u = '{0}?{1}'.format(self.sysaddon, urlencode(u))
item = control.item(label='Greek', label2=i['name'])
item.setArt({'icon': str(i['rating'])[:1], 'thumb': 'el'})
if ratio(splitext(i['title'].lower())[0], splitext(match_title)[0]) >= int(control.setting('sync_probability')):
item.setProperty('sync', 'true')
else:
item.setProperty('sync', 'false')
item.setProperty('hearing_imp', 'false')
control.addItem(handle=self.syshandle, url=u, listitem=item, isFolder=False)
control.directory(self.syshandle)
def subtitlesgr(self, query=None):
if not query:
query = self.query
try:
if control.setting('subtitles') == 'false':
raise TypeError
result = subtitlesgr.Subtitlesgr().get(query)
return result
except TypeError:
pass
def podnapisi(self, query=None):
if not query:
query = self.query
try:
if control.setting('podnapisi') == 'false':
raise TypeError
result = podnapisi.Podnapisi().get(query)
return result
except TypeError:
pass
def vipsubs(self, query=None):
if not query:
query = self.query
try:
if control.setting('vipsubs') == 'false':
raise TypeError
result = vipsubs.Vipsubs().get(query)
return result
except TypeError:
pass
def xsubstv(self, query=None):
if not query:
query = self.query
try:
if control.setting('xsubs') == 'false':
raise TypeError
result = xsubstv.Xsubstv().get(query)
self.list.extend(result)
except TypeError:
pass
class Download:
def __init__(self, syshandle, sysaddon):
self.syshandle = syshandle
self.sysaddon = sysaddon
# noinspection PyUnboundLocalVariable
def run(self, url, source):
log_debug('Source selected: {0}'.format(source))
path = control.join(control.dataPath, 'temp')
try:
path = path.decode('utf-8')
except Exception:
pass
control.deleteDir(control.join(path, ''), force=True)
control.makeFile(control.dataPath)
control.makeFile(path)
if control.setting('keep_subs') == 'true' or control.setting('keep_zips') == 'true':
if not control.get_info_label('ListItem.Path').startswith('plugin://') and control.setting('destination') == '0':
output_path = control.get_info_label('Container.FolderPath')
elif control.setting('output_folder').startswith('special://'):
output_path = control.transPath(control.setting('output_folder'))
else:
output_path = control.setting('output_folder')
if not exists(output_path):
control.makeFile(output_path)
if source == 'subtitlesgr':
subtitle = subtitlesgr.Subtitlesgr().download(path, url)
elif source == 'xsubstv':
subtitle = xsubstv.Xsubstv().download(path, url)
elif source == 'podnapisi':
subtitle = podnapisi.Podnapisi().download(path, url)
elif source == 'vipsubs':
subtitle = vipsubs.Vipsubs().download(path, url)
else:
subtitle = None
if subtitle is not None:
if control.setting('keep_subs') == 'true':
# noinspection PyUnboundLocalVariable
try:
if control.setting('destination') in ['0', '2']:
if control.infoLabel('{0}.Title'.format(infolabel_prefix)).startswith('plugin://'):
copy(subtitle, control.join(output_path, os_split(subtitle)[1]))
log_debug('Item currently selected is not a local file, cannot save subtitle next to it')
else:
output_filename = control.join(
output_path, ''.join(
[
splitext(control.infoLabel('ListItem.FileName'))[0],
splitext(os_split(subtitle)[1])[1]
|
UTH_URL='{}'".format( v2AuthUrl ) )
print ";".join( export )
if persist:
rcfile = os.environ[ 'HOME' ] + "/.swiftrc"
logging.debug( "persisting environment variables" )
_persist( export, rcfile )
def csh(creds, auth_version, savepw=False, persist=False ):
export = []
if auth_version == 'v1':
export.append(
"unsetenv OS_USERNAME OS_PASSWORD OS_TENANT_NAME OS_AUTH_URL" )
export.append(
"unsetenv OS_AUTH_TOKEN OS_STORAGE_URL" )
export.append( "setenv ST_USER '{}'".format( creds['account'] ) )
export.append( "setenv ST_KEY '{}'".format( creds['password'] ) )
export.append( "setenv ST_AUTH '{}'".format( v1AuthUrl ) )
else:
export.append(
"unsetenv ST_USER ST_KEY ST_AUTH" )
export.append( "setenv OS_USERNAME '{}'".format( creds['user'] ) )
export.append(
"setenv OS_TENANT_NAME 'AUTH_Swift_{}'".format( creds['account'] ) )
if savepw:
export.append(
"setenv OS_PASSWORD '{}'".format( creds['password'] ) )
export.append( "setenv OS_AUTH_URL '{}'".format( v2AuthUrl ) )
print ";".join( export )
if persist:
rcfile = os.environ[ 'HOME' ] + "/.swift.cshrc"
logging.debug( "persisting environment variables" )
_persist( export, rcfile )
shell_output = {
'sh': sh,
'ksh': sh,
'bash': sh,
'zsh': sh,
'csh': csh,
'tcsh': csh
}
class LocalParser( argparse.ArgumentParser ):
def error( self, message ):
sys.stderr.write( "Error: too few arguments\n" )
sys.stderr.write( "usage: sw2account lastname_f\n" )
sys.stderr.write(
"use \"sw2account --help\" for full help information\n" )
sys.exit(1)
def print_help( self ):
self._print_message( self.format_help(), sys.stderr )
sys.exit(0)
def return_v1_auth( args ):
# If server URL is unspecified, look for "SW2_URL" in current environment
account = args.account
server_url = args.server_url
logging.debug(
'asking {} for credentials for {}'.format( server_url, account )
)
if not server_url:
try:
server_url = os.environ[ 'SW2_URL' ]
except KeyError:
logging.error( "Server URL is unset (not in arguments or SW2_URL)" )
sys.exit(1)
# Add account name to URL
server_url = '/'.join( [ server_url, account ] )
logging.debug( 'final url is {}'.format( server_url ) )
# Get user authentication credentials
user = getpass.getuser()
passwd = getpass.getpass( 'Enter password for {}: '.format(user) )
# Get account credentials from server_url
r = requests.get( server_url, verify = args.verify_ssl, auth=( user, passwd ) )
if r.status_code == 200:
creds = r.json()
logging.debug(
"got credentials for account {}".format( creds['account'] )
)
creds['url'] = 'https://tin/some/crap'
shell_output[ args.shell ](
creds=creds,
persist=args.persist,
auth_version=args.auth_version
)
elif r.status_code == 401:
logging.error(
"invalid username/password supplied to server"
)
elif r.status_code == 403:
logging.error(
"user {} is not permitted to use {} ({})".format(
user, account, r.status_code
)
)
elif r.status_code == 404:
try:
message = r.json()['message']
except KeyError:
logging.error( "404 returned from server with no message" )
sys.exit(1)
logging.error("{} (HTTP{})".format(
message, r.status_code
)
)
else:
logging.error(
"error {} retrieving credentials from server".format(
r.status_code
)
)
def return_v2_auth( args ):
creds = {}
# authentication is done using Swiftstack version 2 authentication
# requires additional "tenant name" in addition to username and password
creds['account'] = args.account
# take username password from currently logged in user
creds['user'] = getpass.getuser()
if args.savepw:
logging.warning( "Saving passwords is insecure and not recommended." )
creds['password'] = getpass.getpass(
'Enter password for {}: '.format( creds['user'] ) )
logging.debug(
"got credentials for account {}".format( creds['account'] )
)
if args.savepw:
logging.debug( 'saving password in rc and environment' )
shell_output[ args.shell ](
creds=creds,
persist=args.persist,
savepw=args.savepw,
auth_version=args.auth_version
)
def add_common_args( aparser ):
aparser.add_argument(
'shell',
help = "format output for shell <shell>",
choices = shell_output.keys()
)
aparser.add_argument(
'account',
help = "retrieve credentials for account <account>"
)
aparser.add_argument(
'--config',
default = "/etc/sw2account.cfg",
help = "configuration file to use (default=/etc/sw2account.cfg)"
)
aparser.add_argument(
'--stack',
default = "default",
help = "stack name to authentication against (see configfile)"
)
aparser.add_argument(
'--save', '--persist',
dest = 'persist',
action = 'store_true',
help = "write credentials to $HOME/.swiftrc"
)
aparser.add_argument(
'--no-save', '--no-persist',
| dest = 'persist',
action = 'store_false',
help = "do not write credentials to $HOME/.swiftrc"
)
aparser.add_argument(
'--version', '-v',
help = "show script version",
action = 'version',
version = "sw2account version {}".format( __version__)
| )
aparser.add_argument(
'--debug',
action = "store_true",
help = "log level for client"
)
if __name__ == "__main__":
# Get the config first
# Need to prime the pump to find defaults
tparse = argparse.ArgumentParser()
tparse.add_argument(
'--config',
default = "/etc/sw2account.cfg",
help = "configuration file to use (default=/etc/sw2account.cfg)"
)
tparse.add_argument(
'--stack',
default = "default",
help = "stack name to authenticate against (see configfile)"
)
args, unknown = tparse.parse_known_args()
# Read config file with defaults
if not os.path.isfile( args.config ):
logging.error( "missing config file %s", args.config )
sys.exit(1)
appdefaults = ConfigParser.ConfigParser()
try:
appdefaults.read( args.config )
logging.debug( "reading config from %s", args.config )
except ConfigParser.ParsingError:
logging.error(
"error reading configuration file %s - check format", args.config
)
sys.exit(1)
try:
v1AuthUrl = appdefaults.get( args.stack, 'v1AuthUrl' )
v2AuthUrl = appdefaults.get( args.stack, 'v2AuthUrl' )
auth_version_default = appdefaults.get(
args.stack, 'auth_version_default' )
except ConfigParser.NoSectionError:
logging.error( "Stack '%s' not configured in configfile %s",
args.stack, args.config )
sys.exit(1)
except ConfigParser.NoOptionError:
logging.error(
"Configfile %s does not contain correct entries for stack '%s'",
args.config, args.stack
)
sys.exit(1)
# Fix argument order so that v1/v2 is first argument
try:
if sys.argv[1] not in ['v1','v2'] and (
'-h' not in sys.argv or '--help' not in sys.argv ):
if 'v1' in sys.argv:
logging.debug( "reordering arguments to put v1 arg at head" )
sys.argv.remove('v1')
sys.argv.insert(1, 'v1')
elif 'v2' in sys.argv:
logging.debug( "reordering arguments to put v2 arg at head" )
|
import numpy as np
from . import finiteelements as fe
from . import matrices
from math import cos
class Result:
def __init__(self):
pass
def __init__(self, freq, u1, u2, u3, mesh, geometry):
self.freq = freq
self.u1 = u1
self.u2 = u2
self.u3 = u3
self.mesh = mesh
self.geometry = geometry
def rad_per_sec_to_Hz(self, rps):
return rps/(2*np.pi)
def get_displacement_and_deriv(self, x1, x2, x3, time):
element = self.mesh.get_element(x1, x3)
if (element is None):
print ("x1 = {}, x2 = {}".format(x1, x3))
u_nodes = np.zeros((8))
u_nodes[0] = self.u1[element.top_left_index]
u_nodes[1] = self.u1[element.top_right_index]
u_nodes[2] = self.u1[element.bottom_right_index]
u_nodes[3] = self.u1[element.bottom_left_index]
u_nodes[4] = self.u3[element.top_left_index]
u_nodes[5] = self.u3[element.top_right_index]
u_nodes[6] = self.u3[element.bottom_right_index]
u_nodes[7] = self.u3[element.bottom_left_index]
h_e = matrices.element_aprox_functions(element, x1, x2, x3)
return h_e.dot(u_nodes) * self.fi(time)
def get_strain(self, x1, x2, x | 3, time):
B = matrices.deriv_to_grad(self.geometry, x1, x2, x3)
u = self.get_displacement_and_deriv(x1, x2, x3, time)
grad_u = B.dot(u)
E = matrices.grad_to_strain()
# E_NL = grad_to_strain_nonlinear_matrix | (alpha1, alpha2, geometry, grad_u)
return E.dot(grad_u)
def get_strain_nl(self, x1, x2, x3, time):
B = matrices.deriv_to_grad(self.geometry, x1, x2, x3)
u = self.get_displacement_and_deriv(x1, x2, x3, time)
grad_u = B.dot(u)
E = matrices.grad_to_strain()
E_NL = matrices.deformations_nl(self.geometry, grad_u, x1, x2, x3)
return (E + E_NL).dot(grad_u)
def fi(self, time):
return cos(self.freq * time)
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestHPCP(TestCase):
def testEmpty(self):
hpcp = HPCP()([], [])
self.assertEqualVector(hpcp, [0.]*12)
def testZeros(self):
hpcp = HPCP()([0]*10, [0]*10)
self.assertEqualVector(hpcp, [0.]*12)
def testSin440(self):
# Tests whether a real audio signal of one pure tone gets read as a
# single semitone activation, and gets read into the right pcp bin
sampleRate = 44100
audio = MonoLoader(filename = join(testdata.audio_dir, 'generated/synthesised/sin440_0db.wav'),
sampleRate = sampleRate)()
speaks = SpectralPeaks(sampleRate = sampleRate,
maxPeaks = 1,
maxFrequency = sampleRate/2,
minFrequency = 0,
magnitudeThreshold = 0,
orderBy = 'magnitude')
(freqs, mags) = speaks(Spectrum()(audio))
hpcp = HPCP()(freqs, mags)
self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])
def testAllSemitones(self):
# Tests whether a spectral peak output of 12 consecutive semitones
# yields a HPCP of all 1's
tonic = 440
freqs = [(tonic * 2**(x/12.)) for x in range(12)]
mags = [1] * 12
hpcp = HPCP()(freqs, mags)
self.assertEqualVector(hpcp, [1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.])
def testSubmediantPosition(self):
# Make sure that the submediant of a key based on 440 is in the
# correct location (submediant was randomly selected from all the
# tones)
tonic = 440
submediant = tonic * 2**(9./12.)
hpcp = HPCP()([submediant], [1])
self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,0.,0.])
def testMaxShifted(self):
# Tests whether a HPCP reading with only the dominant semitone
# activated is correctly shifted so that the dominant is at the
# position 0
tonic = 440
dominant = tonic * 2**(7./12.)
hpcp = HPCP(maxShifted=True)([dominant], [1])
self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])
def chordHelper(self, half_steps, tunning, strength):
notes = [tunning*(2.**(half_steps[i]/12.)) for i in range(len(half_steps))]
hpcp = HPCP(maxShifted=False)([notes[0], notes[1], notes[2]], strength)
for i in range(len(hpcp)):
if i in half_steps: self.assertTrue(hpcp[i]>0)
elif (i - 12) in half_steps: self.assertTrue(hpcp[i]>0)
else: self.assertEqual(hpcp[i], 0)
def testChord(self):
tunning = 440
AMajor = [0, 4, 7] # AMajor = A4-C#5-E5
self.chordHelper(AMajor, tunning, [1,1,1])
CMajor = [3, -4, -2] # CMajor = C5-F4-G4
self.chordHelper(CMajor, tunning, [1,1,1])
CMajor = [-4, 3, -2] # CMajor = C5-F4-G4
self.chordHelper(CMajor, tunning, [1,0.5,0.2])
CMajor = [-4, -2, 3] # CMajor = C5-F4-G4
self.chordHelper(CMajor, tunning, [1,0.5,0.2])
CMajor = [3, 8, 10] # CMajor = C5-F5-G5
self.chordHelper(CMajor, tunning, [1,0.5,0.2])
AMinor = [0, 3, 7] # AMinor = A4-C5-E5
self.chordHelper(AMinor, tunning, [1,0.5,0.2])
CMinor = [3, 6, 10] # CMinor = C5-E5-G5
self.chordHelper(CMinor, tunning, [1,0.5,0.2])
# Test of various parameter logical bounds
def testLowFrequency(self):
hpcp = HPCP(minFrequency=100, maxFrequency=1000)([99], [1])
self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])
def testHighFrequency(self):
hpcp = HPCP(minFrequency=100, maxFrequency=1000)([1001], [1])
self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])
def testSmallMinRange(self):
self.assertConfigureFails(HPCP(), {'minFrequency':1, 'splitFrequency':200})
def testSmallMaxRange(self):
self.assertConfigureFails(HPCP(), {'maxFrequency':1199, 'splitFrequency':1000})
def testSmallMinMaxRange(self):
self.assertConfigureFails(HPCP(), {'bandPreset':False, 'maxFrequency':200, 'minFrequency':1})
def testSizeNonmultiple12(self):
self.assertConfigureFails(HPCP(), {'size':13})
def testHarmonics(self):
# Regression test for the 'harmonics' parameter
tone = 100. # arbitrary frequency [Hz]
freqs = [tone, tone*2, tone*3, tone*4]
mags = [1]*4
hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3)
hpcp = hpcpAlg(freqs, mags)
expected = [0., 0., 0., 0.1340538263, 0., 0.2476127148, 0., 0., 0., 0., 1., 0.]
self.assertAlmostEqualVector(hpcp, expected, 1e-4)
def testRegression(self):
# Just makes sure algorithm does not crash on a real data source. This
# test is not really looking for correctness. Maybe consider revising
# it.
inputSize = 512
sampleRate = 44100
audio = MonoLoader(filename = join(testdata.audio_dir, join('recorded', 'musicbox.wav')),
sampleRate = sampleRate)()
fc = FrameCutter(frameSize = inputSize,
hopSize = inputSize)
windowingAlg = Windowing(type = 'blackmanharris62')
specAlg = Spectrum(size=inputSize)
sPeaksAlg = SpectralPeaks(sampleRate = sampleRate,
maxFrequency = sampleRate/2,
minFrequency = 0,
orderBy = 'magnitude')
hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3)
frame = fc(audio)
w | hile len(frame) != 0:
spectrum = specAlg(windowingAlg(frame))
(freqs, mags) = sPeaksAlg(spectrum)
hpcp = hpcpAlg(freqs,mags)
self.assertTrue(not any(numpy.isnan(hpcp)))
self.assertTrue(not any(numpy | .isinf(hpcp)))
frame = fc(audio)
suite = allTests(TestHPCP)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
required_sta | tes = ['accept', 'reject', 'init']
class TuringMachine(object):
|
def __init__(self, sigma, gamma, delta):
self.sigma = sigma
self.gamma = gamma
self.delta = delta
self.state = None
self.tape = None
self.head_position = None
return
def initialize(self, input_string):
for char in input_string:
assert char in self.sigma
self.tape = list(input_string)
self.state = 'init'
self.head_position = 0
return
def simulate_one_step(self, verbose=False):
if self.state in ['accept', 'reject']:
print "# %s " % self.state
cur_symbol = self.tape[self.head_position]
transition = self.delta[(self.state, cur_symbol)]
if verbose:
self.print_tape_contents()
template = "delta({q_old}, {s_old}) = ({q}, {s}, {arr})"
print(template.format(q_old=self.state,
s_old=cur_symbol,
q=transition[0],
s=transition[1],
arr=transition[2])
)
self.state = transition[0]
self.tape[self.head_position] = transition[1]
if(transition[2] == 'left'):
self.head_position = max(0, self.head_position - 1)
else:
assert(transition[2] == 'right')
if self.head_position == len(self.tape) - 1:
self.tape.append('#')
self.head_position +=1
if verbose:
self.print_tape_contents()
return
def print_tape_contents(self):
formatted = ''.join(char if i != self.head_position else '[%s]' % char
for i, char in enumerate(self.tape))
print(formatted)
def run(self, input_string, verbose=False):
self.initialize(input_string)
while self.state not in ['reject', 'accept']:
self.simulate_one_step(verbose)
return str(self.tape)
|
#!/usr/bin/python3
# Copyright (c) 2016-2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, ei | ther express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from ironicclient import client
from subprocess import check_output
from credential_helper import CredentialHelper |
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--power", required=True, default=None,
choices=["on", "off", "reset", "cycle"],
help="Control power state of all overcloud nodes")
args = parser.parse_args()
os_auth_url, os_tenant_name, os_username, os_password, \
os_user_domain_name, os_project_domain_name = \
CredentialHelper.get_undercloud_creds()
kwargs = {'os_username': os_username,
'os_password': os_password,
'os_auth_url': os_auth_url,
'os_tenant_name': os_tenant_name,
'os_user_domain_name': os_user_domain_name,
'os_project_domain_name': os_project_domain_name}
ironic = client.get_client(1, **kwargs)
for node in ironic.node.list(detail=True):
ip, username, password = \
CredentialHelper.get_drac_creds_from_node(node)
cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis power {}". \
format(ip, username, password, args.power)
print(cmd)
os.system(cmd)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from kombu.async.http.curl import READ, WRITE, CurlClient
from kombu.tests.case import (
HubCase, Mock, call, patch, case_requires, set_module_symbol,
)
@case_requires('pycurl')
class test_CurlClient(HubCase):
class Client(CurlClient):
Curl = Mock(name='Curl')
def test_when_pycurl_missing(self):
with set_module_symbol('kombu.async.http.curl', 'pycurl', None):
with self.assertRaises(ImportError):
self.Client()
def test_max_clients_set(self):
x = self.Client(max_clients=303)
self.assertEqual(x.max_clients, 303)
def test_init(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
self.assertIsNotNone(x._multi)
self.assertIsNotNone(x._pending)
self.assertIsNotNone(x._free_list)
self.assertIsNotNone(x._fds)
self.assertEqual(
x._socket_action, x._multi.socket_action,
)
self.assertEqual(len(x._curls), x.max_clients)
self.assertTrue(x._timeout_check_tref)
x._multi.setopt.assert_has_calls([
call(_pycurl.M_TIMERFUNCTION, x._set_timeout),
call(_pycurl.M_SOCKETFUNCTION, x._handle_socket),
])
def test_close(self):
with patch('kombu.async.http.curl.pycurl'):
x = self.Client()
x._timeout_check_tref = Mock(name='timeout_check_tref')
x.close()
x._timeout_check_tref.cancel.assert_called_with()
for _curl in x._curls:
_curl.close.assert_called_with()
x._multi.close.assert_called_with()
def test_add_request(self):
with patch('kombu.async.http.curl.pycurl'):
x = self.Client()
x._process_queue = Mock(name='_process_queue')
x._set_timeout = Mock(name='_set_timeout')
request = Mock(name='request')
x.add_request(request)
self.assertIn(request, x._pending)
x._process_queue.assert_called_with()
x._set_timeout.assert_called_with(0)
def test_handle_socket(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
hub = Mock(name='hub')
x = self.Client(hub)
fd = Mock(name='fd1')
# POLL_REMOVE
x._fds[fd] = fd
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
hub.remove.assert_called_with(fd)
self.assertNotIn(fd, x._fds)
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
# POLL_IN
hub = x.hub = Mock(name='hub')
fds = [fd, Mock(name='fd2'), Mock(name='fd3')]
x._fds = {f: f for f in fds}
x._handle_socket(_pycurl.POLL_IN, fd, x._multi, None, _pycurl)
hub.remove.assert_has_calls([call(fd)])
hub.add_reader.assert_called_with(fd, x.on_readable, fd)
self.assertEqual(x._fds[fd], READ)
# POLL_OUT
hub = x.hub = Mock(name='hub')
x._handle_socket(_pycurl.POLL_OUT, fd, x._multi, None, _pycurl)
hub.add_writer.assert_called_with(fd, x.on_writable, fd)
self.assertEqual(x._fds[fd], WRITE)
# POLL_INOUT
hub = x.hub = Mock(name='hub')
x._handle_socket(_pycurl.POLL_INOUT, fd, x._multi, None, _pycurl)
hub.add_reader.assert_called_with(fd, x.on_readable, fd)
hub.add_writer.assert_called_with(fd, x.on_writable, fd)
self.assertEqual(x._fds[fd], READ | WRITE)
# UNKNOWN EVENT
hub = x.hub = Mock(name='hub')
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
# FD NOT IN FDS
hub = x.hub = Mock(name='hub')
x._fds.clear()
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
self.assertFalse(hub.remove.called)
def test_set_timeout(self):
x = self.Client()
x._set_timeout(100)
def test_timeout_check(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
x._process_pending_requests = Mock(name='process_pending')
x._multi.socket_all.return_value = 333, | 1
_pycurl.error = KeyError
x._timeout_check(_pycurl=_pycurl)
x._multi.socket_all.return_value = None
x._multi.socket_all.side_effect = _pycurl.error(333)
x._timeout_check(_pycurl=_pycurl)
def test_on_readable_on_writeable(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
x._on_event = Mock(name='on_event')
fd = Mock(name='fd')
x.on_readable(fd, _pyc | url=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_IN)
x.on_writable(fd, _pycurl=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_OUT)
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Article(object):
def __init__(self):
# title of the article
self.title = u""
# stores the lovely, pure text from the article,
# stripped of html, formatting, etc...
# just raw text with paragraphs separated by newlines.
# This is probably what you want to use.
self.cleaned_text = u""
# meta description field in HTML source
self.meta_description = u""
# meta lang field in HTML source
self.meta_lang = u""
self.body_html = u""
# meta favicon field in HTML source
self.meta_favicon = u""
# meta keywords field in the HTML source
self.meta_keywords = u""
# The canonical link of this article if found in the meta data
self.canonical_link = u""
# holds the domain of this article we're parsing
self.domain = u""
# holds the top Element we think
# is a candidate for the main body of the article
self.top_node = None
# holds the top Image object that
# we think represents this article
self.top_image = None
# holds a set of tags that may have
# been in the artcle, these are not meta keywords
self.tags = []
# holds a dict of all opengrah data found
self.opengraph = {}
# holds twitter embeds
self.tweets = []
# holds a list of any movies
# we found on the page like youtube, vimeo
self.movies = []
# holds links found in the main article
self.links = []
# hold author names
self.authors = []
# stores the final URL that we're going to try
# and fetch content against, this would be expanded if any
self.final_url = u""
# stores the MD5 hash of the url
# to use for various identification tasks
self.link_hash = ""
# stores the RAW HTML
# straight from the network connection
self.raw_html = u""
# the lxml Document object
self.doc = None
# this is the original JSoup document that contains
# a pure object from the original HTML without any cleaning
# options done on it
self.raw_doc = None
# Sometimes useful to try and know when
# the publish date of an article was
self.publish_date = None
# A property bucket for consumers of goose to store custom data extractions.
self.additional_data = {}
@property
def infos(self):
data = {
"meta": {
"description": self.meta_description,
"lang": self.meta_lang,
"keywords": self.meta_keywords,
"favicon": self.meta_favicon,
"canonical": self.canonical_link,
},
"image": None,
"domain": self.domain,
"title": self.title,
"cleaned_text": self.cleaned_text,
"opengraph": self.opengraph,
"tags": self.tags,
"tweets": self.tweets,
"movies": [],
"links": self.links,
"authors": self.authors,
"publish_date": self.publish_date
}
# image
if self.top_image is not None:
data['image'] = {
'url': self.top_image.src,
'width': self.top_image.width,
'height': self.top_image.height,
'type': 'image'
}
# movies
for movie in self.movies:
data['movies'].append({
'embed_type': movie.embed_type,
'provider': movie.provider,
'width': movie.width,
'height': movie.height,
'embed_code': movie.embed_code,
| 'src': movie.src,
})
return d | ata
|
med view
Arguments:
x -- optional dataset or list of datasets for x values
y -- dataset or list of datasets
title -- title of plot
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
_process_line(x, y, title, name, 'add')
def updateline(x, y=None, title=None, name=None):
'''Update existing plot by changing displayed y dataset (or list of datasets), optionally against
any given x dataset in the named view
Arguments:
x -- optional dataset or list of datasets for x values
y -- dataset or list of datasets
title -- title of plot
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
n, t, xl, yl = _parselinearg(x, y, title, name)
_plot_updateline(n, t, xl, yl, None, None)
plot = line
updateplot = updateline
def image(im, x=None, y=None, name=None):
'''Plot a 2D dataset as an image in the named view with optional x and y axes
Arguments:
im -- image dataset
x -- optional dataset for x-axis
y -- optional dataset for y-axis
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if x is None:
y = None
if y is None:
x = None
_plot_image(name, x, y, im)
def images(im, x=None, y=None, name=None):
'''Plot 2D datasets as an image in the named view with optional x and y axes
Arguments:
im -- image datasets (one or more)
x -- optional dataset for x-axis
y -- optional dataset for y-axis
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if x is None:
y = None
if y is None:
x = None
_plot_images(name, x, y, _toList(im))
def surface(s, x=None, y=None, name=None):
'''Plot the 2D dataset as a surface in the named view with optional x and y axes
Arguments:
s -- surface (height field) dataset
x -- optional dataset for x-axis
y -- optional dataset for y-axis
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if x is None:
y = None
if y is None:
x = None
_plot_surface(name, x, y, s)
def stack(x, y=None, z=None, name=None):
'''Plot all of the given 1D y datasets against corresponding x as a 3D stack
with optional z coordinates in the named view
Arguments:
x -- optional dataset or list of datasets for x-axis
y -- dataset or list of datasets
z -- optional dataset for z-axis
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if not y:
y = _toList(x)
l = 0
for d in y:
if d.size > l:
l = d.size
x = [ _core.arange(l) ]
_plot_stack(name, _toList(x), _toList(y), z)
def updatestack(x, y=None, z=None, name=None):
'''Update existing 3D line stack by changing displayed y dataset (or list of datasets),
optionally against any given x dataset with optional z coordinates in the named view
Arguments:
x -- optional dataset or list of datasets for x-axis
y -- dataset or list of datasets
z -- optional dataset for z-axis
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if not y:
y = _toList(x)
l = 0
for d in y:
if d.size > l:
l = d.size
x = [ _core.arange(l) ]
_plot_updatestack(name, _toList(x), _toList(y), z)
def points(x, y=None, z=None, size=0, name=None):
'''Plot points with given coordinates. If y is missing then x must contain
a dataset of coordinate pairs or a list of such datasets
Arguments:
x -- dataset of x coords or coord pairs
y -- optional dataset of y coords
z -- optional dataset of z coords
size -- integer size or dataset of sizes
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
size = _core.asarray(size)
if z is None:
if y is None:
_plot_points2d(name, _toList(x), size)
else:
_plot_points2d(name, x, y, size)
else:
_plot_points3d(name, x, y, z, size)
def addpoints(x, y, z=None, size=0, name=None):
'''Update existing plot by adding points of given coordinates
Arguments:
x -- dataset of x coords
y -- dataset of y coords
z -- optional dataset of z coords
size -- integer size or dataset of sizes
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
size = _core.asarray(size)
if z is None:
_plot_updatepoints2d(name, x, y, size)
else:
_plot_updatepoints3d(name, x, y, z, size)
_IMAGEEXPNAME = "ImageExplorer View"
def scanforimages(path, order="none", prefix=None, suffices=None, columns=-1, rowMajor=True, name=_IMAGEEXPNAME):
'''Scan for images in path and load into gi | ven image explorer view
order can be "none", "alpha", "chrono"
prefix is the start of a regular expression to match the beginnings of filenames
suffices can be a list of filename endings (eg. ["png", "tif"], each can be a regex)
columns is the number of columns to use in grid (-1 makes grid square)
rowMajor determines whether images are laid out row-wise or column-wis | e
returns number of images loaded
'''
maxint = ((1<<30) - 1) + (1<<30) # maximum value for signed 32-bit integer
return _plot_scanforimages(name, path, _order(order), prefix, suffices, columns, rowMajor, maxint, 1)
def getbean(name=None):
'''Get GUI bean (contains information from named view)
Arguments:
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
return _plot_getbean(name)
def setbean(bean, name=None):
'''Set GUI bean
Arguments:
bean -- GUI bean
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if bean is not None:
_plot_setbean(name, bean)
def getdatabean(name=None):
'''Get data bean (contains data from named view)
Arguments:
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
return _plot_getdatabean(name)
def setdatabean(bean, name=None):
'''Set data bean
Arguments:
bean -- data bean
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if bean is not None:
_plot_setdatabean(name, bean)
def getroi(bean=None, roi=None, name=None):
'''Get region of interest from bean
Arguments:
bean -- GUI bean (if None, retrieve from plot view of given name)
roi -- class of ROI to retrieve. If None, then get anyway
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if bean is None:
bean = getbean(name)
if not parameters.roi in bean:
return None
r = bean[parameters.roi]
if roi is None:
return r
if r is None or not isinstance(r, roi):
return None
return r
def setroi(bean, roi=None, send=False, name=None):
'''Set region of interest in bean
Arguments:
bean -- GUI bean (if None, retrieve from and update to plot view of given name)
roi -- ROI to set
send -- flag to update plot
name -- name of plot view to use (if None, use default name)
'''
if name is None:
name = _PVNAME
if roi is None:
roi = bean
send = True
bean = getbean(name)
if isinstance(bean, _guibean):
bean[parameters.ro |
from __future__ import print_function
import os
import sys
import fnmatch
import mmap
def clean_files(path,pattern):
all_files = os.listdir(path)
filtered = fnmatch.filter(all_files,pattern+"*")
for element in filtered:
os.remove(os.path.join(path,element))
def find_files(path,target):
matches = []
for root, subFolders, files in os.walk(path):
| if target in files:
matches.append(root)
return matches
def find_dirs_files_pattern(path,pattern):
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pattern):
matches.appen | d([root,filename])
return matches
def return_value(filename,pattern):
if type(pattern) is str:
pattern = pattern.encode()
with open(filename, "r") as fin:
# memory-map the file, size 0 means whole file
m = mmap.mmap(fin.fileno(), 0, prot=mmap.PROT_READ)
# prot argument is *nix only
i = m.rfind(pattern)
try:
m.seek(i) # seek to the location
except ValueError:
return np.nan
line = m.readline() # read to the end of the line
return float(line.split()[-1])
|
import serial
import time
ser = serial.Serial(
port = "/dev/ttyACM0",
baudrate = 9600,
bytesize = serial.EIGHTBITS
)
time.sleep(2)
ser.write("\x36\x20\x00")
print(hex(ord(ser | .read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord | (ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
print(hex(ord(ser.read(1))))
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.domains = ['newmyvideolink.xyz', 'beta.myvideolinks.xyz']
self.base_link = 'http://newmyvideolink.xyz'
self.search_link = '/?s=%s'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
query = re.sub('(\\\|/|:|;|\*|\?|"|\'|<|>|\|)', '', data['title'])
query = self.search_link % urllib.quote_plus(query)
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(data['title'])
r = client.request(query)
r = client.parseDOM(r, 'ul', attrs = {'class': 'posts'})[0]
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'title': '.+?'}), client.parseDOM(i, 'a', attrs = {'rel': 'category tag'})) for i in r]
r = [(i[0][0], i[1][0], i[2]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
r = [(i[0], i[1]) for i in r if 'MOVIES' in i[2]]
r = [(i[0], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in r]
r = [(i[0], i[1], i[2][0][0], i[2][0][1]) for i in r if len(i[2]) > 0]
r = [(i[0], i[1], i[2], re.split('\.|\(|\)|\[|\]|\s|\-', i[3])) for i in r]
r = [i for i in r if t == cleantitle.get(i[1]) and data['year'] == i[2]]
r = [i for i in r if not any(x in i[3] for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])]
r = [i for i in r if urlparse.urlparse(self.base_link).netloc in i[0]]
l = [(i[0], '1080p') for i in r if '1080p' in i[3]]
l += [(i[0], 'HD') for i in r if '720p' in i[3]]
l = l[:4]
hostDict = hostprDict + hostDict
links = []
for i in l:
try:
r = urlparse.urljoin(self.base_link, i[0])
r = client.replaceHTMLCodes(r)
r = client.request(r)
r = client.parseDOM(r, 'div', attrs = {'class': 'post_content'})[0]
r = re.sub('\s\s+', ' ', r)
try:
size = re.findall('Size\s*:\s*(.+? [M|G]B) ', r)[-1]
div = 1 if size.endswith(' GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
info = '%.2f GB' % size
except:
info = ''
r = client.parseDOM(r, 'ul')[0]
r = client.parseDOM(r, 'a', ret= | 'href')
for url in r: links.append({'url': url, 'quality': i[1], 'info': info})
except:
pass
for i in links:
try:
url = i['url']
ur | l = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': i['quality'], 'provider': 'Newlinks', 'url': url, 'info': i['info'], 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
|
DictLink':'datadict_link',
'DataDictWhole':'datadict_whole',
'Pickle':'pickle',
'RCD_Whole':'RCD_whole',
'RCD_Dicts':'RCD_dicts',
'RCD_Dicts_Whole':'RCD_dicts_whole',
'ChunkFunc':'chunkfunc',
'ChunkDict':'chunkdict',
'Model':'model',
'Num':'num',
'extractValid_out':'extract_valid_out',
'pseudoMiss_out':'pseudomiss_out',
'scoreMC_out':'score_mc_out',
'baseEst_out':'base_est_out',
'baseResid_out':'base_resid_out',
'baseEAR_out':'base_ear_out',
'baseSE_out':'base_se_out',
'baseFit_out':'base_fit_out',
'finEst_out':'fin_est_out',
'est2Logit_out':'est2logit_out',
'itemDiff_out':'item_diff_out',
'fillMiss_out':'fillmiss_out',
'finResid_out':'fin_resid_out',
'finFit_out':'fin_fit_out',
'mergeAnsKey_out':'merge_anskey_out',
'restoreInvalid_out':'restore_invalid_out',
'summStat_out':'summstat_out',
'RowEnts':'row_ents',
'ColEnts':'col_ents',
'ObjPerDim':'objperdim',
'Stability':'stability',
'Objectivity':'objectivity',
'BestDim':'bestdim',
'MaxPosDim':'maxposdim',
'Accuracy':'accuracy',
'PsMsResid':'psmsresid',
'Fac0SE':'fac0_se',
'Fac1SE':'fac1_se',
'Fac0Infit':'fac0_infit',
'Fac1Infit':'fac1_infit',
'Fac0Outfit':'fac0_outfit',
'Fac1Outfit':'fac1_outfit',
'Reliability':'reliability',
'CellVar':'cellvar',
'CellFit':'cellfit',
'MsIndex':'msindex',
'PsMsIndex':'psmsindex',
'TrueMsIndex':'true_msindex',
'ParsedMsIndex':'parsed_msindex',
'ParsedTrueMsIndex':'parsed_true_msindex',
'ParsedPsMsIndex':'parsed_psmsindex',
'ObjEstimates':'obj_estimates',
'ObjCoords':'obj_coord',
'EARCoord':'ear_coord',
'EntCoord':'ent_coord',
'StepCoord':'step_coord',
'Facet0':'facet0',
'Facet1':'facet1',
'logitEAR_out':'logit_ear_out',
'logitSE_out':'logit_se_out',
'ObsPerCellFactor':'obspercell_factor',
'SECoord':'se_coord',
'Logit':'Logit',
'EquateParams':'equate_params',
'Ratio':'ratio',
'Interval':'interval',
'Sigmoid':'sigmoid',
'ChangeLog':'changelog',
'ObjParams':'obj_params',
'PyTable.hd5':'pytable.hd5',
'seedBank.pkl':'seedbank.pkl',
'MyDamonObj':'my_DamonObj',
'MyDmnObj':'my_obj',
'StdParams':'std_params',
'EAR':'EAR',
'Facet':'Facet',
'InputArray':'input_array',
'Array':'array',
'Arrays':'arrays',
'Data':'data',
'File':'file',
'U':'U',
'x':'x',
'X':'X',
'R':'R',
'C':'C',
'V':'V',
'E':'E',
'InitEArray':'init_earray',
'InvUTU':'invUTU_',
'invUTU':'invUTU',
'Range':'range_',
'Type':'type_',
'Return':'return_',
'ArrayNames':'array_names',
'CondFacet':'cond_facet',
'DataDict':'datadict',
'SolveMethod':'solve_meth',
'SolveMethSpecs':'solve_meth_specs',
'SourceIDs':'source_ids',
'TargetIDs':'target_ids',
'InclTarg':'targ_in_sum',
'SigmThresh':'sigma_thresh',
'PredAlpha':'pred_alpha',
'OrigObs':'orig_obs',
'BiasedEst':'biased_est',
'Shape':'shape',
'MissLeftColLabels':'fill_left',
'MissTopRowLabels':'fill_top',
'MinRating':'min_rating',
'RegRMSE':'rmse_reg',
'ErrArray':'st_err',
'SumSqRowPtBis':'row_ptbis',
'SumSqColPtBis':'col_ptbis',
'TargDataIndex':'targ_data_ind',
'TupData':'tup_data',
'PredKey':'pred_key',
'MissMethod':'miss_meth',
'AttRow':'att_row',
'CountChars':'count_chars',
'nKeyColHeaders':'nheaders4cols_key',
'ExtrEst':'extr_est',
'EARArray':'ear',
'DataRCD':'datadict',
'PyTables':'pytables',
'Format':'format_',
'MethSpecs':'meth_specs',
'NearestVal':'nearest_val',
'Median':'median_',
'EstShape':'est_shape', |
'Tests':'tests_',
'Val':'Val',
'Res':'Res',
'Locals':'_locals',
'Locals1':'_locals1',
'_baseEAR':'_base_ear',
'_finResid':'_fin_resid',
'_extractValid':'_extract_valid',
'_mergeAnsKey':'_merge_anskey',
'_scoreMC':'_score_mc',
'_finEst':'_fin_est',
'_baseFit':'_base_fit',
'_baseResid':'_base_resid',
'_baseSE':'_base_se',
| '_finFit':'_fin_fit',
'_baseEst':'_base_est',
'_restoreInvalid':'_restore_invalid',
'_itemdiff':'_item_diff',
}
#############
## Get ##
## Names ##
#############
if mode == 'inspect':
objs = []
names = []
# Import module
for i in range(len(files2inspect)):
stringmod = files2inspect[i].replace('.py','')
mod = __import__(stringmod)
modobjs = mod.__dict__.keys()
# Remove unneeded objects
for obj in removeit:
try:
modobjs.remove(obj)
except ValueError:
pass
# Include top-level function names in list
names.extend(modobjs)
# Get names automatically
for obj in modobjs:
try:
names.extend(inspect.getargspec(mod.__dict__[obj])[0])
except TypeError:
try:
subobjs = mod.__dict__[obj].__dict__.keys()
for subobj in removeit:
try:
subobjs.remove(subobj)
except ValueError:
pass
names.extend(subobjs)
for subobj in subobjs:
names.extend(inspect.getargspec(mod.__dict__[obj].__dict__[subobj])[0])
for name in removeit:
try:
names.remove(name)
except ValueError:
pass
except:
pass
#####################
## Build ##
## replace_ dict ##
#####################
replace_ = {}
for name in names:
replace_[name] = name.lower() # replace name with lowercase version
for specname in special.keys():
replace_[specname] = special[specname]
if mode == 'inspect':
print 'replace_ dictionary:\n',replace_
# Save as pickle
dbfile = open('replaceDB.pkl','wb')
cPickle.dump(replace_,dbfile)
dbfile.close()
###############
## Edit ##
## Modules ##
###############
if mode == 'replace':
print 'replace() is working...\n'
# Use replace dictionary in pickle db
dbfile = open('replaceDB.pkl','rb')
replace_ = cPickle.load(dbfile)
dbfile.close()
for filename in files2edit:
print 'Working on',filename
# Edit line
for line in fileinput.input(filename,inplace=True):
# Replace all specified names in line
for name in replace_.keys():
line = re.sub(r'\b'+name+r'\b',replace_[name],line)
# Replace line with fully edited line
print line,
print 'replace() is done.'
##############
## Run ##
## Module ##
##############
# To run functions that are defined in this module
##if __name__ == "__main__":
## A = MyFunc(...)
## print A
|
"""manager.py: Flask-Script launcher for services
using https://github.com/yabb85/ueki as prototype
"""
from os import path
from flask_script import Manager, Server
from publicAPI import create_app
import prosper.common.prosper_logging as p_logging
import prosper.common.prosper_config as p_config
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
CONFIG_FILEPATH = path.join(HERE, 'app.cfg')
CONFIG = p_config.ProsperConfig(CONFIG_FILEPATH)
SETTINGS = {
'PORT':8001
}
APP = create_app(SETTINGS, CONFIG)
MANAGER = Manager(APP)
MANAGER.add_command(
'runserver',
Server(
host='0.0.0.0',
port=CONFIG.get('PROD', 'PORT')
)
)
M | ANAGER.add_command(
'debug',
Server(
use_debugger=True,
port=CONFIG.get('DEBUG', 'PORT')
)
)
if __name__ == '__main__':
MANAGER.run | ()
|
#!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------- | ------
returns a list of pf tables (optional as a json container)
"""
import subprocess
import sys
import ujson
if __name__ == '__main__':
result = []
sp = subprocess.run(['/sbin/pfctl', '-sT'], capture_output=True, text=True)
for line in sp.stdout.strip().split('\n'):
result.append(line.strip())
# handle command line argument (type selection)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
print(ujson.dumps(result))
else:
# output plain
| for table in result:
print (table)
|
# check that we can do certain things without allocating heap memory
import gc
def f1(a):
print(a)
def f2(a, b=2):
print(a, b)
def f3(a, b, c, d):
x1 = x2 = a
x3 = x4 = b
x5 = x6 = c
x7 = x8 = d
print(x1, x3, x5, x7, x2 + x4 + x6 + x8)
global_var = 1
def test():
global global_var
global_var = 2 # set an existing global variable
for i in range(2): # for loop
f1(i) # function cal | l
f1(i * 2 + 1) # binary operation with small ints
f1(a=i) # keyword arguments
f2(i) # default arg (second one)
f2(i, i) # 2 args
f3(1, 2, 3, 4) # function with lots of local state
# call h with heap allocation disabled and all memory used up
g | c.disable()
try:
while True:
'a'.lower # allocates 1 cell for boundmeth
except MemoryError:
pass
test()
gc.enable()
|
self):
return self.uuid
def setValue(self, value):
self.value = value
return 0
def getValue(self, value):
return self.value
def undefine(self):
self.value = None
return 0
class LibvirtVolumeBaseTestCase(test.NoDBTestCase):
"""Contains common setup and helper methods for libvirt volume tests."""
def setUp(self):
super(LibvirtVolumeBaseTestCase, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.st | ubs.Set(utils, 'execute', fake_execute)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
class FakeLibvirtDriver(object):
def __init__(self):
self._host = host.Host("qemu:///system")
| def _get_all_block_devices(self):
return []
self.fake_conn = FakeLibvirtDriver()
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
'host': 'fake_host'
}
self.disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.name = 'volume-00000001'
self.location = '10.0.2.15:3260'
self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
self.vol = {'id': 1, 'name': self.name}
self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.user = 'foo'
def _assertFileTypeEquals(self, tree, file_path):
self.assertEqual('file', tree.get('type'))
self.assertEqual(file_path, tree.find('./source').get('file'))
class LibvirtISCSIVolumeBaseTestCase(LibvirtVolumeBaseTestCase):
"""Contains common setup and helper methods for iSCSI volume tests."""
def iscsi_connection(self, volume, location, iqn, auth=False,
transport=None):
dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
if transport is not None:
dev_name = 'pci-0000:00:00.0-' + dev_name
dev_path = '/dev/disk/by-path/%s' % (dev_name)
ret = {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
'device_path': dev_path,
'qos_specs': {
'total_bytes_sec': '102400',
'read_iops_sec': '200',
}
}
}
if auth:
ret['data']['auth_method'] = 'CHAP'
ret['data']['auth_username'] = 'foo'
ret['data']['auth_password'] = 'bar'
return ret
class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase):
def _assertDiskInfoEquals(self, tree, disk_info):
self.assertEqual(disk_info['type'], tree.get('device'))
self.assertEqual(disk_info['bus'], tree.find('./target').get('bus'))
self.assertEqual(disk_info['dev'], tree.find('./target').get('dev'))
def _test_libvirt_volume_driver_disk_info(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertDiskInfoEquals(tree, self.disk_info)
def test_libvirt_volume_disk_info_type(self):
self.disk_info['type'] = 'cdrom'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_dev(self):
self.disk_info['dev'] = 'hdc'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_bus(self):
self.disk_info['bus'] = 'scsi'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_driver_serial(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual('fake_serial', tree.find('./serial').text)
self.assertIsNone(tree.find('./blockio'))
self.assertIsNone(tree.find("driver[@discard]"))
def test_libvirt_volume_driver_blockio(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'logical_block_size': '4096',
'physical_block_size': '4096',
},
'serial': 'fake_serial',
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
blockio = tree.find('./blockio')
self.assertEqual('4096', blockio.get('logical_block_size'))
self.assertEqual('4096', blockio.get('physical_block_size'))
def test_libvirt_volume_driver_iotune(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'qos_specs': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
iotune = tree.find('./iotune')
# ensure invalid qos_specs is ignored
self.assertIsNone(iotune)
specs = {
'total_bytes_sec': '102400',
'read_bytes_sec': '51200',
'write_bytes_sec': '0',
'total_iops_sec': '0',
'read_iops_sec': '200',
'write_iops_sec': '200',
}
del connection_info['data']['qos_specs']
connection_info['data'].update(dict(qos_specs=specs))
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
def test_libvirt_volume_driver_readonly(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.assertRaises(exception.InvalidVolumeAccessMode,
libvirt_driver.get_config,
connection_info, self.disk_info)
connection_info['data']['access_mode'] = 'rw'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNone(readonly)
connection_info['data']['access_mode'] = 'ro'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNotNone(readonly)
@mock.patch('compute.virt |
# This file is part of Static Sauce <http://github.com/jdufresne/staticsauce>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import errno
import shutil
from staticsauce import commands
from staticsauce import routes
from staticsauce.conf import settings
from staticsauce.exceptions import AlreadyUpdatedError
from staticsauce.files import StaticFile
from staticsauce.utils import import_path, path_append, file_updated
class BuildCommand(commands.Command):
command = 'build'
def copy_public_dir(self):
for src_dir, dirnames, filenames in os.walk(settings.PUBLIC_DIR):
dest_dir = path_append(
settings.BUILD_DIR,
src_dir[len(settings.PUBLIC_DIR):]
)
try:
os.mkdir(dest_dir)
except OSError as err:
if err.errno != errno.EEXIST:
raise
for filename in filenames:
dest_path = os.path.join(dest_dir, filename)
src_path = os.path.join(src_dir, filename)
if file_updated(dest_path, src_path):
self.logger.info("[copy] %(src)s %(dest)s", {
'src': src_path,
'dest': dest_path,
})
shutil.copy(src_path, dest_dir)
def __call__(self):
self.copy_public_dir()
for name, route in routes.mapper:
filename = path_append(settings.BUILD_DIR, route.filename)
module, controller = route.controller.rsplit('.', 1)
module = import_path(module)
controller = getattr(module, controller)
permutations = route.permutations \
if route.permutations is not None else [{}]
for permutation in permutations:
fmt_filename = filename.format(**permutation)
try:
os.makedirs(os.path.dirname(fmt_filename))
except OSError as err:
if err.errno != errno.EEXIST:
raise
uri = 'http://{domain}{path}'.format(
| domain=settings.SITE_DOMAIN,
path=route.fil | ename
)
static_file = StaticFile(fmt_filename, uri)
kwargs = {}
if route.kwargs:
kwargs.update(route.kwargs)
kwargs.update(permutation)
try:
controller(static_file, **kwargs)
except AlreadyUpdatedError:
pass
else:
self.logger.info("[%(controller)s] %(filename)s", {
'controller': route.controller,
'filename': fmt_filename,
})
static_file.save(fmt_filename)
|
import os
from setuptools import find_packages
from setuptools import setup
version = '1.0'
project = 'kotti_mb'
install_requires=[
'Kotti',
],
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.r | st')).read()
| CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
setup(name=project,
version=version,
description="AddOn for Kotti",
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"License :: Repoze Public License",
],
keywords='kotti addon',
author='Christoph Boehner',
author_email='cb@vorwaerts-werbung.de',
url='http://pypi.python.org/pypi/',
license='bsd',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=[],
entry_points={
'fanstatic.libraries': [
'kotti_mb = kotti_mb.fanstatic:library',
],
},
extras_require={},
message_extractors={'kotti_mb': [
('**.py', 'lingua_python', None),
('**.zcml', 'lingua_xml', None),
('**.pt', 'lingua_xml', None),
]},
)
|
# This file is | part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
def serialize_ip_network_group(group):
"""Serialize group to JSON-like object"""
return {
'id': group.id,
'name': group.name,
| 'identifier': 'IPNetworkGroup:{}'.format(group.id),
'_type': 'IPNetworkGroup'
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import deque
import SokoMove
import Helper as hlp
class SokoGoodFloors():
# Interface:
# findGoodFloors() -> good-floor-List
def __init__(self, pgs, pgdp, zfl, pp):
self.pgs=pgs
self.pgdp=pgdp
self.zfl=zfl
self.pp=pp
self.bf=[]
self.sm=SokoMove.SokoMove()
self.pppi=[(-1, 0), ( 1, 0), ( 0, -1), ( 0, 1)] # Player-Pull-Pos
self.pdpi=[(-2, 0), ( 2, 0), ( 0, -2), ( 0, 2)] # Player-Desination-Pos
self.hrms=["L", "R", "U", "D"]
# ###########################################################
# Liefert eine gepackte Liste der möglichen Pulls an der
# Box auf (x, y) ohne Berücksichtigung der Spielfigur-Posi | tion.
def __pullableBordersForSingleBox(self, x, y):
pbl=[]
for i in range(4):
if self.pgs[y+self.ppp | i[i][1]][x+self.pppi[i][0]]!="#":
# das Feld, von dem der Player ziehen soll, ist frei
if self.pgs[y+self.pdpi[i][1]][x+self.pdpi[i][0]]!="#":
# das Feld, auf dem der Player landen soll, ist frei
pbl.append(hlp.mpack((x+self.pppi[i][0], y+self.pppi[i][1]), i))
return(pbl)
# ###########################################################
# Liefert True, wenn es einen Weg von pp nach dp gibt.
# Es darf sich nur eine Box an den Koordinaten bp im
# Spielfeld befinden. Der Inhalt des dynamischen Spielfeldes
# in self.pgdp wird nicht berücksichtigt!
def __testPlayerWayToPos(self, pp, dp, bp):
rc=self.__testPlayerWayToPosSubQueue(pp, dp, bp)
return(rc!=None)
# ###########################################################
# Liefert in einem Spielfeld mit nur einer Box (auf bp) eine
# Bewegungsfolge für den Weg von pp nach dp. Wird kein Weg
# gefunden, wird "" geliefert.
def __testPlayerWayToPosSubQueue(self, pp, dp, bp):
queue=deque([(pp, "")])
visited=[pp]
while queue:
((x, y), rc)=queue.popleft()
if (x, y)==dp:
return(rc)
for i in range(4):
nx=x+self.pppi[i][0]
ny=y+self.pppi[i][1]
if (nx, ny) not in visited:
if self.pgs[ny][nx]!="#" and (nx, ny)!=bp:
queue.append(((nx, ny), rc+self.hrms[i]))
visited.append((nx, ny))
return(None)
# ###########################################################
# wie __testPlayerWayToPosSubQueue - taugt hier aber nix.
# wird nicht genutzt!
def __testPlayerWayToPosSubStack(self, rc, pp, dp, visited):
if rc==True:
return(True, visited)
if pp==dp:
return(True, visited)
if self.pgs[pp[1]][pp[0]]=="#":
return(False, visited)
if pp in visited:
return(False, visited)
visited.append(pp)
rc, visited=self.__testPlayerWayToPosSub(rc, (pp[0]-1, pp[1]), dp, visited)
rc, visited=self.__testPlayerWayToPosSub(rc, (pp[0]+1, pp[1]), dp, visited)
rc, visited=self.__testPlayerWayToPosSub(rc, (pp[0], pp[1]-1), dp, visited)
rc, visited=self.__testPlayerWayToPosSub(rc, (pp[0], pp[1]+1), dp, visited)
return(False, visited)
# ###########################################################
# Liefert eine Liste aller Floors, auf der eine Box stehen
# kann, die von einem GoalSquare gezogen wurde.
def findGoodFloors(self):
good_floors=[]
for gsx, gsy in self.zfl: # für jedes Zielfeld...
pgdpt=[hlp.ppack(gsx, gsy)] # ...eine Box drauf setzen
rlst=self.__pullableBordersForSingleBox(gsx, gsy) # mögliche inverse Züge bestimmen
# rlst kann 0 bis 4 Elemente enthalten
for p in rlst:
pgdpt=[hlp.ppack(gsx, gsy)] # neu setzen, weil pgdpt von __findGoodFloorsForSingleBox geändert wird
good_floors=self.__findGoodFloorsForSingleBox(pgdpt, p, self.pp, good_floors)
rc=[]
for i in good_floors: # aus der Liste mit Pulls eine Liste von Floors machen
(dp, d)=hlp.munpack(i)
if dp not in rc:
rc.append(dp)
return(rc)
# ###########################################################
# Durchläuft rekursiv alle möglichen Box-Positionen, die
# durch Ziehen von einem GoalSquare erreicht werden können.
# Mit good_floors wird eine Liste geführt, in der Box-Positionen
# (und NICHT -wie sonst üblich- Player-Positionen) zusammen mit
# einer Pull-Richtung stehen. Somit können Floors mehrfach (bis
# zu viermal) besucht werden - mit je einer anderen Pull-Richtung.
#
# Verlängert ggf. good_floors und ändert pgdp.
def __findGoodFloorsForSingleBox(self, pgdp, pull, pp, good_floors):
bp=hlp.punpack(pgdp[0]) # derzeitige Box-Position(ungepackt) - ist ja nur eine drin
dp, d=hlp.munpack(pull) # Box-Ziel-Position (ungepackt)
if hlp.mpack(bp, d) in good_floors: # wenn Pull schon ausgeführt wurde...
return(good_floors) # ...langt das
if self.__testPlayerWayToPos(pp, dp, bp)==True: # wenn der Player die Pull-Position erreichen kann...
pp=dp # ...dann kann die Player-Pos auf Pull-Pos gesetzt werden
good_floors.append(hlp.mpack(bp, d)) # ...und der Pull als gut und ausgeführt vermerkt werden
rc, pp=self.sm.inverseMovePlayer(self.pgs, pgdp, pp, d) # Zug gemäß "pull" ausführen
bp=hlp.punpack(pgdp[0]) # ggf. geänderte Box-Position holen
rlst=self.__pullableBordersForSingleBox(bp[0], bp[1]) # mögliche Folge-Pulls ermitteln
for p in rlst: # über alle Folge-Pulls
pgdpt=[pgdp[0]] # neu setzen, weil pgdpt von __findGoodFloorsForSingleBox geändert wird
good_floors=self.__findGoodFloorsForSingleBox(pgdpt, p, pp, good_floors)
return(good_floors)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2004 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing templates for the documentation generator (lists style).
"""
from __future__ import unicode_literals
#################################################
## Common templates for index and docu files ##
#################################################
headerTemplate = \
'''<!DOCTYPE html>
<html><head>
<title>{{Title}}</title>
<meta charset="UTF-8">
</head>
<body style="background-color:{BodyBgColor};color:{BodyColor}">'''
footerTemplate = '''
</body></html>'''
#########################################
## Templates for documentation files ##
#########################################
moduleTemplate = \
'''<a NAME="top" ID="top"></a>
<h1 style="background-color:{Level1HeaderBgColor};color:{Level1HeaderColor}">
{{Module}}</h1>
{{ModuleDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Global Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Classes</h3>
{{ClassList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Functions</h3>
{{FunctionList}}
<hr />'''
rbFileTemplate = \
'''<a NAME="top" ID="top"></a>
<h1 style="background-color:{Level1HeaderBgColor};color:{Level1HeaderColor}">
{{Module}}</h1>
{{ModuleDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Global Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Classes</h3>
{{ClassList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Modules</h3>
{{RbModulesList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Functions</h3>
{{FunctionList}}
<hr />'''
classTemplate = \
'''<hr />
<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Class}}</h2>
{{ClassDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Derived from</h3>
{{ClassSuper}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Class Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Class Methods</h3>
{{ClassMethodList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Methods</h3>
{{MethodList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Static Methods</h3>
{{StaticMethodList}}
{{MethodDetails}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
methodTemplate = \
'''<a NAME="{{Anchor}}.{{Method}}" ID="{{Anchor}}.{{Method}}"></a>
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
{{Class}}.{{Method}}{{MethodClassifier}}</h3>
<b>{{Method}}</b>(<i>{{Params}}</i>)
{{MethodDescription}}'''
constructorTemplate = \
'''<a NAME="{{Anchor}}.{{Method}}" ID="{{Anchor}}.{{Method}}"></a>
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
{{Class}} (Constructor)</h3>
<b>{{Class}}</b>(<i>{{Params}}</i>)
{{MethodDescription}}'''
rbModuleTemplate = \
'''<hr />
<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Module}}</h2>
{{ModuleDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Module Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Classes</h3>
{{ClassesList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Functions</h3>
{{FunctionsList}}
<hr />
{{ClassesDetails}}
{{FunctionsDetails}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
rbModulesClassTemplate = \
'''<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Class}}</h2>
{{ClassDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Derived from</h3>
{{ClassSuper}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Methods</h3>
{{MethodList}}
{{MethodDetails}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
functionTemplate = \
'''<hr />
<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Function}}</h2>
<b>{{Function}}</b>(<i>{{Params}}</i>)
{{FunctionDescription}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
listTemplate = \
'''<table>
{{Entries}}
</table>'''
listEntryTemplate = \
'''<tr>
<td><a style="color:{LinkColor}" href="#{{Link}}">{{Name}}</a></td>
<td>{{Deprecated}}{{Description}}</td>
</tr>'''
listEntryNoneTemplate = '''<tr><td>None</td></tr>'''
listEntryDeprecatedTemplate = '''<b>Deprecated.</b>'''
listEntrySimpleTemplate = '''<tr><td>{{Name}}</td></tr>'''
paragraphTemplate = \
'''<p>
{{Lines}}
</p>'''
parametersListTemplate = \
'''<dl>
{{Parameters}}
</dl>'''
parametersListEntryTemplate = \
'''<dt><i>{{Name}}</i></dt>
<dd>
{{Description}}
</dd>'''
parameterTypesListEntryTemplate = \
'''<dt><i>{{Name}}</i> ({{Type}})</dt>
<dd>
{{Description}}
</dd>'''
returnsTemplate = \
'''<dl>
<dt>Returns:</dt>
<dd>
{{0}}
</dd>
</dl>'''
returnTypesTemplate = \
'''<dl>
<dt>Return Type:</dt>
<dd>
{{0}}
</dd>
</dl>'''
exceptionsListTemplate = \
'''<dl>
{{Exceptions}}
</dl>'''
exceptionsListEntryTemplate = \
'''<dt>Raises <b>{{Name}}</b>:</dt>
<dd>
{{Description}}
</dd>'''
signalsListTemplate = \
'''<h4>Signals</h4>
<dl>
{{Signals}}
</dl>'''
signalsListEntryTemplate = \
'''<dt>{{Name}}</dt>
<dd>
{{Description}}
</dd>'''
eventsListTemplate = \
'''<h4>Events</h4>
<dl>
{{Events}}
</dl>'''
eventsListEntryTemplate = \
'''<dt>{{Name}}</dt>
<dd>
{{Description}}
</dd>'''
deprecatedTemplate = \
'''<p>
<b>Deprecated.</b>
{{Lines}}
</p>'''
authorInfoTemplate = \
'''<p>
<i>Author(s)</i>:
{{Authors}}
</p>'''
seeListTemplate = \
'''<dl>
<dt><b>See Also:</b> | </dt>
{{Links}}
</dl>'''
seeListEntryTemplate = \
'''<dd>
{{Link}}
</dd>'''
seeLinkTemplate = '''<a style="color:{LinkColor}" {{Link}}'''
sinceInfoTemplate = \
'''<p>
<b>since</b> {{Info}}
</p>'''
#################################
## Templates for index files ##
#################################
indexBodyTemplate = '''
<h1 style="backgro | und-color:{Level1HeaderBgColor};color:{Level1HeaderColor}">
{{Title}}</h1>
{{Description}}
{{Subpackages}}
{{Modules}}'''
indexListPackagesTemplate = '''
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Packages</h3>
<table>
{{Entries}}
</table>'''
indexListModulesTemplate = '''
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Modules</h3>
<table>
{{Entries}}
</table>'''
indexListEntryTemplate = \
'''<tr>
<td><a style="color:{LinkColor}" href="{{Link}}">{{Name}}</a></td>
<td>{{Description}}</td>
</tr>'''
|
# Copyright 2019 Verily Life Sciences Inc. All Rights Re | served.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR C | ONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package marker file."""
|
# -*- coding:utf-8 -*-
import mock
import pytest
import libpy
import libpy.shadow
class TestShadow(object):
def test(self):
proxy = libpy.shadow.Shadow(ShadowTarget())
assert isinstance(proxy, libpy.shadow.Shadow)
assert proxy.a == mock.sentinel.proxy_target_a
assert proxy.b == mock.sentinel.proxy_target_b
with pytest.raises(AttributeError):
_ = proxy.X
with pytest.raises(AttributeError):
_ = proxy._c
def test_keep_value(self):
target = ShadowTarget()
proxy = libpy.shadow.Shadow(target)
assert proxy.b == mock.sentinel.proxy_target_b
delattr(target, 'b')
assert not hasattr(target, 'b')
assert proxy.b == mock.sentinel.proxy_target_b
delattr(proxy, 'b')
with pytest.raises(AttributeError):
_ = proxy.b
def test_update(self):
target = ShadowTarget()
proxy = libpy.shadow.Shadow(target)
assert proxy.a == mock.sentinel.proxy_target_a
target.a = 'new_value_for_a'
assert proxy.a == mock.sentinel.proxy_target_a
delattr( | proxy, 'a')
assert proxy.a == 'new_value_for_a'
def test_override(self):
proxy = libpy.shadow.Shadow(ShadowTarget(), a='override_a')
assert proxy.a == 'override_a'
assert proxy.b == mock.sentinel.proxy_target_b
proxy.b = 'dynamic_override_b'
assert proxy.b == 'dynamic_override_b'
class ShadowTarget(object):
a = mock.sentinel.proxy_target_a
def __i | nit__(self):
self.b = mock.sentinel.proxy_target_b
self._c = mock.sentinel.proxy_target_c
|
"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = [
"Antonio Gonzalez Pena",
"Andrew J. King",
"Michael S. Robeson",
]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "antgonza@gmail.com"
from qiime.distance_matrix_from_mapping import compute_distance_matrix_from_metadata, dist_vincenty, calculate_dist_vincenty
from numpy import array
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
import StringIO
class FunctionTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
self.fasting_map = """#SampleID BarcodeSequence LinkerPrimerSequence Treatment DOB Float_Col Description
#Example mapping file for the QIIME analysis package. These 9 samples are from a study of the effects of exercise and diet on mouse cardiac physiology (Crawford, et al, PNAS, 2009).
PC.354 AGCACGAGCCTA YATGCTGCCTCCCGTAGGAGT Control 20061218 .1 Control_mouse__I.D._354
PC.355 AACTCGTCGATG YATGCTGCCTCCCGTAGGAGT Control 20061218 .2 Control_mouse__I.D._355
PC.356 ACAGACCACTCA YATGCTGCCTCCCGTAGGAGT Control 20061126 .3 Control_mouse__I.D._356
PC.481 ACCAGCGACTAG YATGCTGCCTCCCGTAGGAGT Control 20070314 .4 Control_mouse__I.D._481
PC.593 AGCAGCACTTGT YATGCTGCCTCCCGTAGGAGT Control 20071210 .5 Control_mouse__I.D._593
PC.607 AACTGTGCGTAC YATGCTGCCTCCCGTAGGAGT Fast 20071112 .6 Fasting_mouse__I.D._607
PC.634 ACAGAGTCGGCT YATGCTGCCTCCCGTAGGAGT Fast 20080116 .7 Fasting_mouse__I.D._634
PC.635 ACCGCAGAGTCA YATGCTGCCTCCCGTAGGAGT Fast 20080116 .8 Fasting_mouse__I.D._635
PC.636 ACGGTGAGTGTC YATGCTGCCTCCCGTAGGAGT Fast 20080116 .9 Fasting_mouse__I.D._636"""
self.DOB = [
20061218,
20061218,
20061126,
20070314,
20071210,
20071112,
20080116,
20080116,
20080116]
self.Float_Col = [.1, .2, .3, .4, .5, .6, .7, .8, .9]
self.latitudes = [30, 20, 30, 30, 0, 1, 90, 89, 0, 0]
self.longitudes = [60, -50, 60, 60, 0, 0, | 0, 0, 0, 0]
def test_compute_distance_matrix_from_metadata_int(self):
""" distance calculations on ints should throw no errors"""
exp_out = array(
[[0, 0, 92, 9096, 9992, 9894, 18898, 18898, 18898], [0, 0, 92, 9096, 9992, 9894, 18898, 18898, 18898],
[92, 92, 0, 9188, 10084, 9986, 18990, 18990, 18990], [9096,
9096, 9188, 0, 896, 798, 9802, 9802, 9802],
| [9992, 9992, 10084, 896, 0, 98, 8906, 8906, 8906], [9894,
9894, 9986, 798, 98, 0, 9004, 9004, 9004],
[18898, 18898, 18990, 9802, 8906, 9004, 0, 0,
0], [18898, 18898, 18990, 9802, 8906, 9004, 0, 0, 0],
[18898, 18898, 18990, 9802, 8906, 9004, 0, 0, 0]])
res_out = compute_distance_matrix_from_metadata(self.DOB)
assert_almost_equal(exp_out, res_out)
def test_compute_distance_matrix_from_metadata_floats(self):
""" distance calculations on floats should throw no errors"""
# testing floats
exp_out = array(
[[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], [0.1, 0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7],
[0.2, 0.1, 0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [0.3,
0.2, 0.1, 0., 0.1, 0.2, 0.3, 0.4, 0.5],
[0.4, 0.3, 0.2, 0.1, 0., 0.1, 0.2, 0.3, 0.4], [0.5,
0.4, 0.3, 0.2, 0.1, 0., 0.1, 0.2, 0.3],
[0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0., 0.1, 0.2], [0.7,
0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0., 0.1],
[0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.]])
res_out = compute_distance_matrix_from_metadata(self.Float_Col)
assert_almost_equal(exp_out, res_out)
def test_dist_vincenty(self):
"""dist_Vincenty:Returns distance in meters between two lat long points"""
lat1, lon1, lat2, lon2, expected_value = 30, 60, 20, -50, 10709578.387
value = dist_vincenty(lat1, lon1, lat2, lon2, 20)
assert_almost_equal(value, expected_value)
lat1, lon1, lat2, lon2, expected_value = 30, 60, 30, 60, 0
value = dist_vincenty(lat1, lon1, lat2, lon2, 20)
assert_almost_equal(value, expected_value)
lat1, lon1, lat2, lon2, expected_value = 0, 0, 1, 0, 110574.389
value = dist_vincenty(lat1, lon1, lat2, lon2, 20)
assert_almost_equal(value, expected_value)
lat1, lon1, lat2, lon2, expected_value = 90, 0, 89, 0, 111693.865
value = dist_vincenty(lat1, lon1, lat2, lon2, 20)
assert_almost_equal(value, expected_value)
lat1, lon1, lat2, lon2, expected_value = 90, 0, -90, 0, 20003931.459
value = dist_vincenty(lat1, lon1, lat2, lon2, 20)
assert_almost_equal(value, expected_value)
lat1, lon1, lat2, lon2, expected_value = 90, 0, 0, 0, 10001965.729
value = dist_vincenty(lat1, lon1, lat2, lon2, 20)
assert_almost_equal(value, expected_value)
lat1, lon1, lat2, lon2, expected_value = 0, 0, 0, 0, 0
value = dist_vincenty(lat1, lon1, lat2, lon2, 20)
assert_almost_equal(value, expected_value)
# test for not converge
lat1, lon1, lat2, lon2 = 0, 180, 0, 0
self.assertRaises(
ValueError,
dist_vincenty,
lat1,
lon1,
lat2,
lon2,
20)
def test_calculate_dist_vincenty(self):
exp_out = array(
[[0.0, 10709578.387, 0.0, 0.0, 7154900.607, 7094106.828, 6681852.331, 6626434.332, 7154900.607, 7154900.607],
[10709578.387,
0.0,
10709578.387,
10709578.387,
5877643.846,
5831009.412,
7789599.475,
7718017.604,
5877643.846,
5877643.846],
[0.0,
10709578.387,
0.0,
0.0,
7154900.607,
7094106.828,
6681852.331,
6626434.332,
7154900.607,
7154900.607],
[0.0,
10709578.387,
0.0,
0.0,
7154900.607,
7094106.828,
6681852.331,
6626434.332,
7154900.607,
7154900.607],
[7154900.607,
5877643.846,
7154900.607,
7154900.607,
0.0,
110574.389,
10001965.729,
9890271.864,
0.0,
0.0],
[7094106.828,
5831009.412,
7094106.828,
7094106.828,
110574.389,
0.0,
9891391.341,
9779697.476,
110574.389,
110574.389],
[6681852.331,
7789599.475,
6681852.331,
6681852.331,
10001965.729,
9891391.341,
0.0,
111693.865,
10001965.729,
10001965.729],
[6626434.332,
7718017.604,
6626434.332,
6626434.332,
9890271.864,
9779697.476,
111693.865,
0.0,
9890271.864,
9890271.864],
[7154900.607,
5877643.846,
7154900.607,
7154900.607,
0.0,
110574.389,
10001965.729,
9890271.864,
0.0,
0.0],
[7154900.607, 5877643.846, 7154900.607, 7154900.607, 0.0, 110574.389, 10001965.729, 9890271.864, 0.0, 0.0]])
res_out = calculate_dist_vincenty(self.latitudes, self.longitudes)
assert_almost_equal(res_out, exp_out)
# run tests if called from command line
if __name |
from django.utils.unittest.case import TestCase
from scheduler.models import ScheduleGenerator
from uni_info.models import Semester, Course
class ScheduleGeneratorTest(TestCase):
"""
Test class for schedule generator, try different courses
"""
fixtures = ['/scheduler/fixtures/initial_data.json']
def setUp(self):
"""
Setup common data needed in each unit test
"""
self.fall_2013_semester = [sem for sem in Semester.objects.all() if sem.name == 'Fall 2013'][0]
def test_should_generate_empty_schedule(self):
"""
Test generator does not crash with empty list as edge case
"""
course_list = []
generator = ScheduleGenerator(course_list, self.fall_2013_semester)
result = generator.generate_schedules()
self.assertIsNotNone(result)
self.assertEqual(0, len(result))
def test_should_generate_with_1_course(self):
"""
Test generator with only 1 course as edge case
"""
soen341 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '341'][0]
course_list = [soen341]
generator = ScheduleGenerator(course_list, self.fall_2013_semester)
result = generator.generate_schedules()
self.assertIsNotNone(result)
self.assertEqual(2, len(result))
def test_should_generate_schedule_for_2_course(self):
"""
Test generator with more than 1 course
"""
soen341 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '341'][0]
soen287 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '287'][0]
course_list = [soen287, soen341]
generator = ScheduleGenerator(course_list, self.fall_2013_semester)
result = generator.generate_schedules()
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
def test_should_not_generate_schedule_for_3_course_conflict(self):
"""
Test generator with three conflicting courses
"""
soen341 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '341'][0]
soen342 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '342'][0]
soen287 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '287'][0]
course_list = [soen287, soen341, soen342]
generator = ScheduleGenerator(course_list, self.fall_2013_semester)
result = generator.generate_schedules()
self.assertIsNotNone(result)
self.assertEqual(0, len(result))
def test_should_generate_schedule_for_3_course_no_conflict(self):
"""
Test generator with three courses that has no confl | icts
"""
soen341 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '341'][0]
soen343 = [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '343'][0]
soen287 = | [s for s in Course.objects.all() if
s.course_letters == 'SOEN' and
s.course_numbers == '287'][0]
course_list = [soen287, soen341, soen343]
generator = ScheduleGenerator(course_list, self.fall_2013_semester)
result = generator.generate_schedules()
self.assertIsNotNone(result)
self.assertEqual(4, len(result)) |
from dja | ngo.conf.urls import patterns, include, url
from django.contrib import admin
import urls
from apps.blog import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'gigsblog.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^admin/', include(admin.site.urls) | ),
url(r'^$', views.Index.as_view(), name='index'),
url(r'^signup', views.SignUp.as_view(), name='signUp'),
url(r'^login', views.Login.as_view(), name='login'),
url(r'^logout', 'django.contrib.auth.views.logout',{'next_page':'/'}, name='logout'),
url(r'^post/', include('urls.blog', namespace='post')),
url(r'^admin/', include('urls.admin')),
)
|
#!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, a command-line cryptocurrency wallet
# Copyright (C)2013-2022 The MMGen Project <mmgen@tuta.io>
# Licensed under the GNU General Public License, Version 3:
# https://www.gnu.org/licenses
# Public project repositories:
# https://github.com/mmgen/mmgen
# https://gitlab.com/mmgen/mmgen
"""
wallet.brain: brainwallet wallet class
"""
from ..opts import opt
from ..util import msg,qmsg,qmsg_r
from ..color import yellow
from .enc import wallet
from .seed import Seed
import mmgen.crypto as crypto
class wallet(wallet):
stdin_ok = True
desc = 'brainwallet'
# brainwallet warning message? TODO
def get_bw_params(self):
# already checked
a = opt.brain_params.split(',')
return int(a[0]),a[1]
def _deformat(self):
self.brainpasswd = ' '.join(self.fmt_data.split())
return True
def _decrypt(self):
d = self.ssdata
if opt.brain_params:
"""
Don't set opt.seed_len! When using multiple wallets, BW seed len might differ from others
"""
bw_seed_len,d.hash_preset = self.get_bw_params()
else:
if not opt.seed_len:
qmsg(f'Using defaul | t seed length of {yellow(str(Seed.dfl_len))} bits\n'
+ 'If this is not what you want, use the --seed-len option' )
self._get_hash_preset()
bw_seed_len = opt.seed_len or Seed.dfl_len
qmsg_r('Hashing brainwallet data. Please wait...')
# Use buflen arg of scrypt.hash() to get see | d of desired length
seed = crypto.scrypt_hash_passphrase(
self.brainpasswd.encode(),
b'',
d.hash_preset,
buflen = bw_seed_len // 8 )
qmsg('Done')
self.seed = Seed(seed)
msg(f'Seed ID: {self.seed.sid}')
qmsg('Check this value against your records')
return True
def _format(self):
raise NotImplementedError('Brainwallet not supported as an output format')
def _encrypt(self):
raise NotImplementedError('Brainwallet not supported as an output format')
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the | "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed | to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a campaign in a given advertiser.
To create an advertiser, run create_advertiser.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfa
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
CAMPAIGN_NAME = 'INSERT_CAMPAIGN_NAME_HERE'
URL = 'INSERT_LANDING_PAGE_URL_HERE'
LANDING_PAGE_NAME = 'INSERT_LANDING_PAGE_NAME_HERE'
START_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_START_YEAR_HERE',
'month': int('INSERT_START_MONTH_HERE'),
'day': int('INSERT_START_DAY_HERE')}
END_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_END_YEAR_HERE',
'month': int('INSERT_END_MONTH_HERE'),
'day': int('INSERT_END_DAY_HERE')}
def main(client, advertiser_id, campaign_name, url, landing_page_name,
start_date, end_date):
# Initialize appropriate service.
campaign_service = client.GetService(
'campaign', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Create a default landing page for the campaign and save it.
default_landing_page = {
'url': url,
'name': landing_page_name
}
default_landing_page_id = campaign_service.saveLandingPage(
default_landing_page)['id']
# Construct and save the campaign.
campaign = {
'name': campaign_name,
'advertiserId': advertiser_id,
'defaultLandingPageId': default_landing_page_id,
'archived': 'false',
'startDate': start_date,
'endDate': end_date
}
result = campaign_service.saveCampaign(campaign)
# Display results.
print 'Campaign with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client, ADVERTISER_ID, CAMPAIGN_NAME, URL, LANDING_PAGE_NAME,
START_DATE, END_DATE)
|
#!/usr/bin/env python
# B a r a K u d a
#
# L. Brodeau, 2017]
import sys
import numpy as nmp
from PIL import Image
import string
import os
from netCDF4 import Dataset
narg = len(sys.argv)
if narg != 2:
print 'Usage: '+sys.argv[0]+' <mesh_mask>'; sys.exit(0)
cf_mm = sys.argv[1]
cf_bmp = string.replace(os.path.basename(cf_mm), '.nc', '_orig.bmp')
cf_bmp = string.replace(os.path.basename(cf_bmp), '_orig.bmp4', '_orig.bmp')
# Opening mesh_mask:
f_mm = Dataset(cf_mm)
mask = f_mm.variables['tmask'][0,0,:,:]
f_mm.close()
(nj, ni) = nmp.shape(mask)
print ' nj, ni =>', nj, ni
#imask= nmp.zeros((nj, ni), dtype=nmp.int8)
#imask[:,:] = mask[:,:]
#del mask
imask = (255*mask).astype(nmp.uint8)
# Then save it:
result = Image.fromarray(nmp.flipud(imask))
re | sult.save(cf_bmp)
print ' ** | * Image '+cf_bmp+' saved!\n'
|
# -*- coding: utf-8 -*-
'''
Created on Mar 12, 2012
@author: moloch
Copyright 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import re
from uuid import uuid4
from datetime import datetime
from sqlalchemy import Column
from sqlalchemy.types import DateTime, Integer, String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import declarative_base
generate_uuid = lambda: str(uuid4())
class _DatabaseObject(object):
''' All game objects inherit from this object '''
@declared_attr
def __tablename__(self):
''' Converts name from camel case to snake case '' | '
name = self.__name__
return (
name[0].lower() +
re.sub(r'([A-Z])',
lambda letter: "_" + letter.group(0).lower(), na | me[1:]
)
)
id = Column(Integer, unique=True, primary_key=True) # lint:ok
uuid = Column(String(36), unique=True, default=generate_uuid)
created = Column(DateTime, default=datetime.now)
# Create an instance called "BaseObject"
DatabaseObject = declarative_base(cls=_DatabaseObject)
|
!/usr/bin/env python
#
# Copyright (C) 2012 Strahinja Val Markovic <val@markovic.io>
#
# This file is part of MatchTagAlways.
#
# MatchTagAlways is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MatchTagAlways is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MatchTagAlways. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
PY2 = (sys.version_info[0] == 2)
TAG_REGEX = re.compile(
r"""<\s* # the opening bracket + whitespace
(?P<start_slash>/)? # captures the slash if closing bracket
\s* # more whitespace
(?P<tag_name>[\w:-]+) # the tag name, captured
.*? # anything else in the tag
(?P<end_slash>/)? # ending slash, for self-closed tags
>""",
re.VERBOSE | re.DOTALL )
COMMENT_REGEX = re.compile( '<!--.*?-->', re.DOTALL )
class TagType( object ):
OPENING = 1
CLOSING = 2
SELF_CLOSED = 3
class Tag( object ):
def __init__( self, match_object ):
if not match_object:
self.valid = False
return
self.valid = True
self.name = match_object.group( 'tag_name' )
if match_object.group( 'start_slash' ):
self.kind = TagType.CLOSING
elif match_object.group( 'end_slash' ):
self.kind = TagType.SELF_CLOSED
else:
self.kind = TagType.OPENING
self.start_offset = match_object.start()
self.end_offset = match_object.end()
if PY2:
def __nonzero__( self ):
return self.valid
else:
def __bool__( self ):
return self.valid
def __eq__( self, other ):
if type( other ) is type( self ):
return
return False
def PacifyHtmlComments( text ):
"""Replaces the contents (including delimiters) of all HTML comments in the
passed-in text with 'x'. For instance, 'foo <!-- bar -->' becomes
'foo xxxx xxx xxx'. We can't just remove the comments because that would screw
with the mapping of string offset to Vim line/column."""
def replacement( match ):
return re.sub( '\S', 'x', match.group() )
return COMMENT_REGEX.sub( replacement, text )
def ReverseFindTag( text, from_position ):
try:
bracket_index = text.rindex( '<', 0, from_position )
except ValueError:
return None
match = TAG_REGEX.match( text, bracket_index )
if not match:
return None
if match.end() <= from_position:
return Tag( match )
return None
def ForwardFindTag( text, from_position ):
return Tag( TAG_REGEX.search( text, from_position ) )
def OffsetForLineColumnInString( text, line, column ):
offset = -1
current_line = 1
current_column = 0
previous_char = ''
for char in text:
offset += 1
current_column += 1
if char == '\n':
current_line += 1
current_column = 0
if current_line == line and current_column == column:
return offset
if current_line > line:
# Vim allows the user to stop on an empty line and declares that column 1
# exists even when there are no characters on that line
if current_column | == 0 and previous_char == ' | \n':
return offset -1
break
previous_char = char
return None
def LineColumnForOffsetInString( text, offset ):
current_offset = -1
current_line = 1
current_column = 0
for char in text:
current_offset += 1
current_column += 1
if char == '\n':
current_line += 1
current_column = 0
continue
if current_offset == offset:
return current_line, current_column
if current_offset > offset:
break
return None, None
def TagWithSameNameExistsInSequence( tag, sequence ):
for current_tag in sequence:
if current_tag.name == tag.name:
return True
return False
def GetPreviousUnmatchedOpeningTag( html, cursor_offset ):
search_index = cursor_offset
tags_to_close = []
while True:
prev_tag = ReverseFindTag( html, search_index )
if not prev_tag:
break
search_index = prev_tag.start_offset
if prev_tag.kind == TagType.CLOSING:
tags_to_close.append( prev_tag )
elif prev_tag.kind == TagType.OPENING:
if tags_to_close:
if tags_to_close[ -1 ].name == prev_tag.name:
tags_to_close.pop()
else:
continue
else:
return prev_tag
# self-closed tags ignored
return None
def GetNextUnmatchedClosingTag( html, cursor_offset ):
def RemoveClosedOpenTags( tags_to_close, new_tag ):
i = 1
for tag in reversed( tags_to_close ):
if tag.name == new_tag.name:
break
else:
i += 1
assert i <= len( tags_to_close )
del tags_to_close[ -i: ]
return tags_to_close
search_index = cursor_offset
tags_to_close = []
while True:
next_tag = ForwardFindTag( html, search_index )
if not next_tag:
break
search_index = next_tag.end_offset
if next_tag.kind == TagType.OPENING:
tags_to_close.append( next_tag )
elif next_tag.kind == TagType.CLOSING:
if not tags_to_close or not TagWithSameNameExistsInSequence(
next_tag, tags_to_close ):
return next_tag
tags_to_close = RemoveClosedOpenTags( tags_to_close, next_tag )
# self-closed tags ignored
return None
def GetOpeningAndClosingTags( html, cursor_offset ):
current_offset = cursor_offset
closing_tag = GetNextUnmatchedClosingTag( html, current_offset )
while True:
opening_tag = GetPreviousUnmatchedOpeningTag( html, current_offset )
if not opening_tag or not closing_tag:
return None, None
if opening_tag.name == closing_tag.name:
return opening_tag, closing_tag
current_offset = opening_tag.start_offset
def AdaptCursorOffsetIfNeeded( sanitized_html, cursor_offset ):
"""The cursor offset needs to be adapted if it is inside a tag.
If the cursor is inside an opening tag, it will be moved to the index of the
character just past the '>'. If it's inside the closing tag, it will be moved
to the index of the '<'. This will ensure that both the opening and the
closing tags are correctly found.
If the cursor is inside a self-closed tag, then it doesn't really matter what
we do with it, the surrounding tags will be correctly found (the self-closed
tag is ignored, as it should be)."""
preceding_angle_bracket_index = cursor_offset
while True:
if preceding_angle_bracket_index < 0:
return cursor_offset
char = sanitized_html[ preceding_angle_bracket_index ]
if preceding_angle_bracket_index != cursor_offset and char == '>':
# Not inside a tag, no need for adaptation
return cursor_offset
if char == '<':
break
preceding_angle_bracket_index -= 1
tag = Tag( TAG_REGEX.match( sanitized_html,
preceding_angle_bracket_index ) )
if not tag:
return cursor_offset
if tag.kind == TagType.OPENING:
return tag.end_offset
return tag.start_offset
def LocationsOfEnclosingTags( input_html, cursor_line, cursor_column ):
bad_result = ( 0, 0, 0, 0 )
try:
sanitized_html = PacifyHtmlComments( input_html )
cursor_offset = OffsetForLineColumnInString( sanitized_html,
cursor_line,
cursor_column )
if cursor_offset == None:
return bad_result
adapted_cursor_offset = AdaptCursorOffsetIfNeeded( sanitized_html,
cursor_offset )
opening_tag, closing_tag = GetOpeningAndClosingTags( sanitized_html,
adapted_cursor_offset )
if not opening_tag or not closing_tag:
return bad_result
opening_tag_line, opening_tag_column = LineColumnForOffsetInString(
sanitized_ht |
from pymongo import MongoClient
from pymongo.collection import Collection
from pymongo.errors import AutoReconnect
from django.conf import settings
from types import FunctionType
import functools
import time
__all__ = ("connection", "connections", "db", "get_db")
"""
Goals:
* To provide a clean universal handler for Mongo, similar to how Django does it
for other db connections, but Mongo is unique and simple enough to just live on
it's own.
* To wrap the pymongo Collection methods automatically with a reconnect decorator
in case a server is temporarily down, or a replica set is in the middle of failing
over to a secondary server.
"""
"""
In settings.py:
MONGODB = {
'default': {
'NAME': 'db1' # Default database to connect to
'LOCATION': [ # An array of host strings, similar to the CACHES setting.
'localhost:27017',
]
}
}
Usage:
from mongodb import connections, connection, db
connections['default'].db1.messages.find({'key': 'value'}) # manually select the 'default' connection
connection.db1.messages.find({'key': 'value'}) # manually specific the database to be used to override "NAME"
db.messages.find({'key': 'value'}) # Just let the library use all of the defaults
"""
def with_reconnect(func):
"""
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely. See also http://jira.mongodb.org/browse/PYTHON-216
"""
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for x in xrange(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
pass
raise
return _reconnector
class ConnectionDoesNotExist(Exception):
pass
class CollectionWrapper(object):
def __init__(self, collection):
self._collection = collection
def __getattr__(self, func):
old = getattr(self._collection, func)
if type(old) is FunctionType:
return with_reconnect(old)
return old
def __repr__(self):
return "<CollectionWrapper %s>" % self._collection.__repr__()
def __str__(self):
return "<CollectionWrapper %s>" % self._collection.__str__()
class DatabaseWrapper(object):
def __init__(self, database):
self._database = database
def __getattr__(self, func):
old = getattr(self._database, func)
if type(old) is FunctionType:
return with_reconnect(old)
elif isinstance(old, Collection):
return CollectionWrapper(old)
r | eturn old
def __getitem__(self, func):
old = getattr(self._database, func)
if isinstance(old, Collection):
return CollectionWrapper(old)
return old
def __repr__(self):
return "<DatabaseWrapper %s>" % self._database.__repr | __()
def __str__(self):
return "<DatabaseWrapper %s>" % self._database.__str__()
class ConnectionWrapper(object):
def __init__(self, connection, default=None):
self._connection = connection
self._databases = {}
self._default = default
def __getattr__(self, alias):
if self._default is not None and alias == "default":
alias = self._default
if alias in self._databases:
return self._databases[alias]
database = DatabaseWrapper(self._connection[alias])
self._databases[alias] = database
return database
def __repr__(self):
return "<ConnectionWrapper %s>" % self._connection.__repr__()
def __str__(self):
return "<ConnectionWrapper %s>" % self._connection.__str__()
class MongoHandler(object):
def __init__(self, databases):
self.databases = databases
self._connections = {}
def __getitem__(self, alias):
if alias in self._connections:
return self._connections[alias]
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn = MongoClient(
[node for node in self.databases[alias]["LOCATION"]], authSource=self.databases[alias]["authSource"]
)
self._connections[alias] = ConnectionWrapper(conn, self.databases[alias]["NAME"])
return self._connections[alias]
def get_db():
connections = MongoHandler(settings.MONGODB)
connection = connections["default"]
return connection.default
db = get_db()
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
Support for the KDE4 libraries and msgfmt
"""
import os, sys, re
from waflib import Options, TaskGen, Task, Utils
from waflib.TaskGen import feature, after_method
@feature('msgfmt')
def apply_msgfmt(self):
"""
Process all languages to create .mo files and to install them::
def build(bld):
bld(features='msgfmt', langs='es de fr', appname='myapp', install_path='${KDE4_LOCALE_INSTALL_DIR}')
"""
for lang in self.to_list(self.langs):
node = self.path.find_resource(lang+'.po')
task = self.create_task('msgfmt', node, node.change_ext('.mo'))
langname = lang.split('/')
langname = langname[-1]
inst = getattr(self, 'install_path', '${KDE4_LOCALE_INSTALL_DIR}')
self.bld.install_as(
inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + getattr(self, 'appname', 'set_your_appname') + '.mo',
task.outputs[0],
chmod = getattr(self, 'chmod', Utils.O644))
class msgfmt(Task. | Task):
"""
Transform .po files into .mo files
"""
color = 'BLUE'
run_str = '${MSGFMT} ${SRC} -o ${TGT}'
def configure(self):
"""
Detect kde4-config and set various variables for the *use* system::
def options(opt):
opt.load('compiler_cxx kde4')
def configure(conf):
conf.load('compiler_cxx kde4') |
def build(bld):
bld.program(source='main.c', target='app', use='KDECORE KIO KHTML')
"""
kdeconfig = self.find_program('kde4-config')
prefix = self.cmd_and_log('%s --prefix' % kdeconfig).strip()
fname = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
try: os.stat(fname)
except OSError:
fname = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
try: os.stat(fname)
except OSError: self.fatal('could not open %s' % fname)
try:
txt = Utils.readf(fname)
except (OSError, IOError):
self.fatal('could not read %s' % fname)
txt = txt.replace('\\\n', '\n')
fu = re.compile('#(.*)\n')
txt = fu.sub('', txt)
setregexp = re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
found = setregexp.findall(txt)
for (_, key, val) in found:
#print key, val
self.env[key] = val
# well well, i could just write an interpreter for cmake files
self.env['LIB_KDECORE']= ['kdecore']
self.env['LIB_KDEUI'] = ['kdeui']
self.env['LIB_KIO'] = ['kio']
self.env['LIB_KHTML'] = ['khtml']
self.env['LIB_KPARTS'] = ['kparts']
self.env['LIBPATH_KDECORE'] = [os.path.join(self.env.KDE4_LIB_INSTALL_DIR, 'kde4', 'devel'), self.env.KDE4_LIB_INSTALL_DIR]
self.env['INCLUDES_KDECORE'] = [self.env['KDE4_INCLUDE_INSTALL_DIR']]
self.env.append_value('INCLUDES_KDECORE', [self.env['KDE4_INCLUDE_INSTALL_DIR']+ os.sep + 'KDE'])
self.find_program('msgfmt', var='MSGFMT')
|
"""
This is a comment
"""
#try: # this part to import as part of the DIRAC framework
import xml.sax
from xml.sax.handler import ContentHandler
from DIRAC.Core.Workflow.Parameter import *
from DIRAC.Core.Workflow.Module import *
from DIRAC.Core.Workflow.Step import *
from DIRAC.Core.Workflow.Workflow import Workflow
__RCSID__ = "$Id$"
class WorkflowXMLHandler(ContentHandler):
def __init__(self, new_wf=None):
""" If new_wf defined, it will be used as root of document """
# this is an attribute for the object to be created from the XML document
self.root=new_wf # the reference on the all document
self.stack=None # to keep last object
self.strings=None # to accumulate string object (list of strings) used to split long string
def startDocument(self):
#reset the process
#self.root=None
self.stack=[]
self.strings=[]
def endDocument(self):
pass
def startElement(self, name, attrs):
#print name ,"startElement", "attr=", attrs.getLength(), attrs.getNames()
self.clearCharacters() # clear to remove empty or nonprintable characters
if name == "Workflow":
if self.root == None: #if root not defined by constractor
self.root = Workflow()
self.stack.append(self.root)
elif name == "StepDefinition":
obj = StepDefinition("TemporaryXMLObject_StepDefinition")
if self.root == None: # in case we are saving Step only
self.root = obj
self.stack.append(obj)
elif name == "StepInstance":
obj = StepInstance("TemporaryXMLObject_StepInstance")
self.stack.append(obj)
elif name == "ModuleDefinition":
obj = ModuleDefinition("TemporaryXMLObject_ModuleDefinition")
if self.root == None: # in case we are saving Module only
self.root = obj
self.stack.append(obj)
elif name == "ModuleInstance":
obj = ModuleInstance("TemporaryXMLObject_ModuleInstance")
self.stack.append(obj)
elif name == "Parameter":
obj = Parameter(str(attrs['name']), None, str(attrs['type']), str(attrs['linked_module']), str(attrs['linked_parameter']), str(attrs['in']), str(attrs['out']), str(attrs['description']))
self.stack.append(obj)
# TEMPORARY CODE
elif name=="origin" or name == "version" or name == "name" or name == "type" or name == "value" or\
name == "required" or name == "descr_short" or name == "name" or name == "type" or name == "description" or name == "body":
pass
else:
print "UNTREATED! startElement name=", name, "attr=", attrs.getLength(), attrs.getNames()
pass
def endElement(self, name):
#print name, "endElement"
# attributes
if name=="origin":
self.stack[len(self.stack)-1].setOrigin(self.getCharacters())
elif name == "version":
self.stack[len(self.stack)-1].setVersion(self.getCharacters())
elif name == "name":
self.stack[len(self.stack)-1].setName(self.getCharacters())
elif name == "type":
self.stack[len(self.stack)-1].setType(self.getCharacters())
elif name == "required":
self.stack[len(self.stack)-1].setRequired(self.getCharacters())
elif name == "descr_short":
self.stack[len(self.stack)-1].setDescrShort(self.getCharacters())
elif name == "name":
self.stack[len(self.stack)-1].setName(self.getCharacters())
elif name == "type":
self.stack[len(self.stack)-1].setType(self.getCharacters())
elif name == "description":
self.stack[len(self.stack)-1].setDescription(self.getCharacters())
elif name == "body":
self.stack[len(self.stack)-1].setBody(self.getCharacters())
elif name == "value":
ch = self.getCharacters()
# to keep compatibility with the old version
# were """ was not used for the string
if self.stack[len(self.stack)-1].isTypeString():
self.stack[len(self.stack)-1].setValue(ch)
else:
self.stack[len(self.stack)-1].setValue(eval(ch))
#objects
elif name=="Workflow":
self.stack.pop()
elif name == "StepDefinition":
self.root.step_definiti | ons.append(self.stack.pop())
elif name == "StepInstance":
self.root.step_instances.append(self.stack.pop())
elif name == "ModuleDefinition":
self.root.addModule(self.stack.pop())
elif name == "ModuleInstance":
obj=self.stack.pop()
self.stack[len(self.stack)-1].module_instances.append(obj)
elif name == "Parameter":
obj=self.stack.pop();
self.stack[len(self.stack)-1].addParameter(obj)
else:
| print "UNTREATED! endElement", name
def getCharacters(self):
# combine all strings and clear the list
ret = ''.join(self.strings)
self.clearCharacters()
return str(ret)
def clearCharacters(self):
del self.strings
self.strings=[]
def characters(self, content):
self.strings.append(content)
|
#encoding:utf-8
subreddit = 'getm | otivated'
t_channel = '@r_getmotivated'
def send_post(submission, r2t):
return r2t.send_simple(subm | ission)
|
'), 'LIST,len=5')
self.assertEquals(token_value('container(TUPLE,len=2)'), 'TUPLE,len=2')
self.assertEquals(token_value('number(42)'), '42')
self.assertEquals(token_value('string("ab(c)")'), '"ab(c)"')
def test_token_length(self):
self.assertEquals(token_length('key(field1)'), 1)
self.assertEquals(token_length('wrapper(OPTIONAL)'), 1)
self.assertEquals(token_length('container(LIST,len=5)'), 5)
self.assertEquals(token_length('container(TUPLE,len=2)'), 2)
self.assertEquals(token_length('number(42)'), 0)
self.assertEquals(token_length('string("ab(c)")'), 0)
def test_print_rulekey(self):
with captured_output() as (out, err):
print_rulekey([
'key(field1)',
'container(TUPLE,len=2)',
'container(LIST,len=3)',
'string("s1")',
'string("s2")',
'string("s3")',
'wrapper(OPTIONAL)',
'string("s4")',
'key(field2)',
'number(42)',
])
self.assertEquals('\n'.join([
'key(field1)',
' container(TUPLE,len=2)',
' container(LIST,len=3)',
' string("s1")',
' string("s2")',
' string("s3")',
' wrapper(OPTIONAL)',
' string("s4")',
'key(field2)',
' number(42)',
''
]), out.getvalue())
def test_reconstruct_rulekey(self):
s = reconstruct_rulekey([
'key(field1)',
'container(TUPLE,len=2)',
'container(LIST,len=3)',
'string("s1")',
'string("s2")',
'string("s3")',
'wrapper(OPTIONAL)',
'string("s4")',
'key(field2)',
'number(42)',
])
self.assertEquals(s.token, 'root()')
self.assertEquals(len(s), 2)
self.assertEquals(s[0].token, 'key(field1)')
self.assertEquals(len(s[0]), 1)
self.assertEquals(s[0][0].token, 'container(TUPLE,len=2)')
self.assertEquals(len(s[0][0]), 2)
self.assertEquals(s[0][0][0].token, 'container(LIST,len=3)')
self.assertEquals(len(s[0][0][0]), 3)
self.assertEquals(s[0][0][0][0].token, 'string("s1")')
self.assertEquals(len(s[0][0][0][0]), 0)
self.assertEquals( | s[0][0][0][1].token, 'string("s2")')
self.assertEquals(len(s[0] | [0][0][1]), 0)
self.assertEquals(s[0][0][0][2].token, 'string("s3")')
self.assertEquals(len(s[0][0][0][2]), 0)
self.assertEquals(s[0][0][1].token, 'wrapper(OPTIONAL)')
self.assertEquals(len(s[0][0][1]), 1)
self.assertEquals(s[0][0][1][0].token, 'string("s4")')
self.assertEquals(len(s[0][0][1][0]), 0)
self.assertEquals(s[1].token, 'key(field2)')
self.assertEquals(len(s[1]), 1)
self.assertEquals(s[1][0].token, 'number(42)')
self.assertEquals(len(s[1][0]), 0)
@staticmethod
def diff_rulekeys_result(s1, s2):
res = []
def visitor(p1, _s1, p2, _s2): res.append((p1, p2))
diff_rulekeys(s1, s2, visitor)
return res
def test_diff_rulekeys_insert_or_remove_element(self):
s1 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=2)', 'string("s1")', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=3)', 'string("s1")', 'string("s2")', 'string("s3")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report different length
('/root():0/key(k1):0/container(LIST,len=2)',
'/root():0/key(k1):0/container(LIST,len=3)'),
# report 'None' on the left != 'string("s2")' on the right
('/root():0/key(k1):0/container(LIST,len=2):None',
'/root():0/key(k1):0/container(LIST,len=3):1/string("s2")')
])
def test_diff_rulekeys_change_element_order(self):
s1 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=3)', 'string("s1")', 'string("s2")', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=3)', 'string("s2")', 'string("s3")', 'string("s1")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report different order
('/root():0/key(k1):0/container(LIST,len=3):order[0, 1, 2]',
'/root():0/key(k1):0/container(LIST,len=3):order[2, 0, 1]'),
])
def test_diff_rulekeys_insert_or_remove_key(self):
s1 = reconstruct_rulekey(
['key(k1)', 'string("s1")', 'key(k3)', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k1)', 'string("s1")', 'key(k2)', 'string("s2")', 'key(k3)', 'string("s3")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report 'None' on the left != 'key(k2)' on the right
('/root():None',
'/root():1/key(k2)'),
])
def test_diff_rulekeys_change_key_order(self):
s1 = reconstruct_rulekey(
['key(k1)', 'string("s1")', 'key(k2)', 'string("s2")', 'key(k3)', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k2)', 'string("s2")', 'key(k3)', 'string("s3")', 'key(k1)', 'string("s1")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report different order
('/root():order[0, 1, 2]',
'/root():order[2, 0, 1]'),
])
def test_find_children(self):
s = reconstruct_rulekey([
'key(field1)',
'container(TUPLE,len=2)',
'container(LIST,len=3)',
'string("s1")',
'string("s2")',
'string("s3")',
'wrapper(OPTIONAL)',
'string("s4")',
'key(field2)',
'number(42)',
])
self.assertEquals(find_children(s, r'field1', 0), [])
self.assertEquals(find_children(s, r'field1', 1), [s[0]])
self.assertEquals(find_children(s, r'field2', 1), [s[1]])
t = s[0][0] # 'container(TUPLE,len=2)'
self.assertEquals(find_children(s, r'string'), [t[0][0], t[0][1], t[0][2], t[1][0]])
def get_keys1(self):
return {
'rulekey1': [
'key(deps)',
'container(LIST,len=2)',
'string("//fake:ruleB")',
'string("//fake:ruleC")',
'key(.rule_key_type)',
'string("default")',
'key(.target_name)',
'string("//fake:ruleA")',
],
'rulekey2': [
'key(.rule_key_type)',
'string("input")',
'key(.target_name)',
'string("//fake:ruleA")',
],
'rulekey3': [
'key(.rule_key_type)',
'string("default")',
'key(.target_name)',
'string("//fake:ruleB")',
],
}
def test_find_keys(self):
keys = self.get_keys1()
self.assertEquals(
sorted(find_keys(keys, [])),
[])
self.assertEquals(
sorted(find_keys(keys, [(r'fake:ruleA', None)])),
['rulekey1', 'rulekey2'])
self.assertEquals(
sorted(find_keys(
keys,
[(r'fake:ruleA', '.target_name'), (r'default', '.rule_key_type')])),
['rulekey1'])
self.assertEquals(
sorted(find_keys(keys, [(r'fake:ruleB', None)])),
['rulekey1', 'rulekey3'])
self.assertEquals(
sorted(find_keys(keys, [(r'fake:ruleB', '.target_name')])),
['rulekey3'])
def test_extract_target(self):
tokens = [
'key(.rule_key_type)',
'string("default")',
'key(.target_name)',
'string("//fake:ruleB")']
self.assertEquals |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class PathologicalMobileSitesPage(page_module.Page):
def __init__(self, url, page_set):
super(PathologicalMobileSitesPage, self).__init | __(
url=url, page_set=page_set, credentials_path='data/credentials.json')
self.user_agent_type = 'mobile'
self.archive_data_file = 'data/pathological_mobile_sites.json'
def RunPageInteractions(self, act | ion_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class PathologicalMobileSitesPageSet(page_set_module.PageSet):
"""Pathologically bad and janky sites on mobile."""
def __init__(self):
super(PathologicalMobileSitesPageSet, self).__init__(
user_agent_type='mobile',
archive_data_file='data/pathological_mobile_sites.json',
bucket=page_set_module.PARTNER_BUCKET)
sites = ['http://edition.cnn.com',
'http://m.espn.go.com/nhl/rankings',
'http://recode.net',
'http://www.latimes.com',
('http://www.pbs.org/newshour/bb/'
'much-really-cost-live-city-like-seattle/#the-rundown'),
('http://www.theguardian.com/politics/2015/mar/09/'
'ed-balls-tory-spending-plans-nhs-charging'),
'http://www.zdnet.com',
'http://www.wowwiki.com/World_of_Warcraft:_Mists_of_Pandaria',
'https://www.linkedin.com/in/linustorvalds']
for site in sites:
self.AddUserStory(PathologicalMobileSitesPage(site, self))
|
import os
import shutil
import logging
import tarfile
import tempfile
import subprocess
from threading import Thread
class TarToWebmCompressor(object):
@staticmethod
def compress(filename):
thread = Thread(target=TarToWebmCompressor._compress, args=(filename,))
thread.start()
@staticmethod
def _compress(filename):
# extract tar
tmp_dir = tempfile.mkdtemp()
try:
tar = tarfile.open(filename, "r|")
tar.extractall(tmp_dir)
tar.close()
except:
logging.exception("Failed extract %s file" % (filename,))
os.rmdir(tmp_dir)
return
# compress images into web movie
vopts = "-c:v libvpx -quality good -cpu-used 0 -b:v 500k -qmin 4 -qmax 60 -bufsize 5000k -threads 1"
input_images = os.path.join(tmp_dir, '%04d.jpg')
hour_webm = filename + '.webm'
null = '/dev/null'
if os.name == 'nt':
| null = 'NUL'
cmd1 = 'ffmpeg -y -v quiet -nostats -f image2 -i "%s" -pass 1 -f rawvideo %s %s' % (input_images, vopts, null)
cmd2 = 'ffmpeg -y -v quiet -nostats -f image2 -i "%s" -pass 2 %s "%s"' % (input_images, vopts, hour_webm)
try:
subprocess.check_call(cmd1)
| subprocess.check_call(cmd2)
os.remove(filename)
except subprocess.CalledProcessError:
logging.exception("Failed to compress %s file" % (filename,))
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
# combine 1-hour movies to daily movies
# combined_webm = (datetime.datetime.now() - datetime.timedelta(hours=6)).strftime('%Y-%m-%d.webm')
# combined_webm = os.path.join(os.path.dirname(filename), combined_webm)
# if not os.path.exists(combined_webm):
# try:
# os.rename(hour_webm, combined_webm)
# except:
# logging.exception("Failed to rename %s to %s" % (hour_webm, combined_webm))
# else:
# combined_webm_tmp = combined_webm + ".tmp"
# cmd = 'mkvmerge --quiet --webm -o "%s" "%s" "+" "%s"' % (combined_webm_tmp, combined_webm, hour_webm)
# try:
# subprocess.check_call(cmd)
# os.remove(combined_webm) # for windows os
# os.rename(combined_webm_tmp, combined_webm)
# os.remove(hour_webm)
# except subprocess.CalledProcessError:
# logging.exception("Failed to append %s to %s" % (hour_webm, combined_webm))
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Lunzhy'
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from pyshanbay.shanbay import VisitShanbay
from pyshanbay import page_parser as parser
from gui.ui_main import UIMainWidget
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.table = QTableWidget()
layout = QHBoxLayout()
layout.addWidget(self.table)
self.setLayout(layout)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.setSelectionMode(QAbstractItemView.SingleSelection)
self.table.verticalHeader().setResizeMode(QHeaderView.Fixed)
self.table.itemClicked.connect(self.show_selected)
return
def set_data(self, members_data):
self.table.setColumnCount(2)
self.table.setRowCount(len(members_data))
for row_index, member in enumerate(members_data):
new_item = Q | TableWidgetItem(member['nickname'])
self.table.setItem(row_index, 0, new_item)
new_item = QTableWidgetItem(str(member['checked_today']))
self.table.setItem(row_index, 1, new_item)
return
def show_selected(self):
select = self.table.selectionModel().selectedRows()
print(self.table.item(sele | ct[0].row(), 0).text())
print(self.table.item(select[0].row(), 1).text())
return
def get_data():
shanbay = VisitShanbay()
shanbay.login()
page_members = shanbay.members()
total_page = parser.total_page_members(page_members)
pages = []
for page in range(1, int(total_page) + 1):
page_html = shanbay.members_page(page)
pages.append(page_html)
members_info = parser.parse_members_info(pages)
return members_info
if __name__ == '__main__':
app = QApplication(sys.argv)
main_form = UIMainWidget()
main_form.set_data_members(get_data())
main_form.show()
app.exec_() |
from typing import List
import click
from valohai.internals.pipeline import get_pipeline_from_source
from valohai.yaml import config_to_yaml
from valohai_yaml.objs.config import Config
from valohai_cli.ctx import get_project
from valohai_cli.exceptions import ConfigurationError
from valohai_cli.messages import error, info
from valohai_cli.models.project import Project
@click.command()
@click.argument(
"filenames",
nargs=-1,
type=click.Path(file_okay=True, exists=True, dir_okay=False),
required=True,
)
def pipeline(filenames: List[str]) -> None:
"""
Update a pipeline config(s) in valohai.yaml based on Python source file(s).
Python source file is expected to have def main(config: Config) -> Pipeline
Example:
vh yaml pipeline mypipeline.py
:param filenames: Path(s) of the Python source code files.
"""
project = get_project(require=True)
yaml_filename = project.get_config_filename()
did_update = False
for source_path in filenames:
old_config = get_current_config(project)
try:
new_config = get_pipeline_from_source(source_path, old_config)
except Exception:
error(
f"Retrieving a new pipeline definition for project {project} for {source_path} failed.\n"
f"The configuration file in use is {yaml_filename}. "
f"See the full traceback below."
)
raise
merged | _config = old_config.merge_with(new_config)
if old_config.serialize() != merged_config.serialize():
with open(yaml_filename, "w") as out_file:
out_file.write(config_to_yaml(merg | ed_config))
did_update = True
if did_update:
info(f"{yaml_filename} updated.")
else:
info(f"{yaml_filename} already up-to-date.")
def get_current_config(project: Project) -> Config:
try:
return project.get_config()
except FileNotFoundError as fnfe:
valohai_yaml_name = project.get_config_filename()
raise ConfigurationError(
f"Did not find {valohai_yaml_name}. "
f"Can't create a pipeline without preconfigured steps."
) from fnfe
|
import os
from pip.backwardcompat import urllib
from tests.lib.path import Path
from pip.index import package_to_requirement, HTMLPage
from pip.index import PackageFinder, Link, INSTALLED_VERSION
from tests.lib import path_to_url
from string import ascii_lowercase
from mock import patch
def test_package_name_should_be_converted_to_requirement():
"""
Test that it translates a name like Foo-1.2 to Foo==1.3
"""
assert package_to_requirement('Foo-1.2') == 'Foo==1.2'
assert package_to_requirement('Foo-dev') == 'Foo==dev'
assert package_to_requirement('Foo') == 'Foo'
def test_html_page_should_be_able_to_scrap_rel_links():
"""
Test scraping page looking for url in href
"""
page = HTMLPage("""
<!-- The <th> elements below are a terrible terrible hack for setuptools -->
<li>
<strong>Home Page:</strong>
<!-- <th>Home Page -->
<a href="htt | p://supervisord.org/">http://supervisord.org/</a>
</li>""", "supervisor")
links = list(page.scraped_rel_links())
assert len(links) == 1
assert links[0].url == 'http://supervisord.org/'
def test_sort_locations_file_find_link(data):
"""
Test that a file:// find-link dir gets listdir run
"""
| finder = PackageFinder([data.find_links], [])
files, urls = finder._sort_locations([data.find_links])
assert files and not urls, "files and not urls should have been found at find-links url: %s" % data.find_links
def test_sort_locations_file_not_find_link(data):
"""
Test that a file:// url dir that's not a find-link, doesn't get a listdir run
"""
finder = PackageFinder([], [])
files, urls = finder._sort_locations(data.index_url("empty_with_pkg"))
assert urls and not files, "urls, but not files should have been found"
def test_INSTALLED_VERSION_greater():
"""Test INSTALLED_VERSION compares greater."""
assert INSTALLED_VERSION > Link("some link")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of box-linux-sync.
#
# Copyright (C) 2013 Vítor Brandão <noisebleed@noiselabs.org>
#
# box-linux-sync is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# box-linux-sync is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with box-linux-sync; if not, see
# <http://www.gnu.org/licenses/>.
from __future__ import print_function
import curses
import logging
import os
import time
import sys
import types
from noiselabs.box.config import BASEDIR
from noiselabs.box.utils import create_file
from noiselabs.box.ansistrm import ColorizingStreamHandler
################################################################################
##
## Color codes (taken from Portage)
##
################################################################################
_styles = {}
"""Maps style class to tuple of attribute names."""
codes = {}
"""Maps attribute name to ansi code."""
esc_seq = "\x1b["
codes["normal"] = esc_seq + "0m"
codes['reset'] = esc_seq + "39;49;00m"
codes["bold"] = esc_seq + "01m"
codes["faint"] = esc_seq + "02m"
codes["standout"] = esc_seq + "03m"
codes["underline"] = esc_seq + "04m"
codes["blink"] = esc_seq + "05m"
codes["overline"] = esc_seq + "06m"
codes["reverse"] = esc_seq + "07m"
codes["invisible"] = esc_seq + "08m"
codes["no-attr"] = esc_seq + "22m"
codes["no-standout"] = esc_seq + "23m"
codes["no-underline"] = esc_seq + "24m"
codes["no-blink"] = esc_seq + "25m"
codes["no-overline"] = esc_seq + "26m"
codes["no-reverse"] = esc_seq + "27m"
codes["bg_black"] = esc_seq + "40m"
codes["bg_darkred"] = esc_seq + "41m"
codes["bg_darkgreen"] = esc_seq + "42m"
codes["bg_brown"] = esc_seq + "43m"
codes["bg_darkblue"] = esc_seq + "44m"
codes["bg_purple"] = esc_seq + "45m"
codes["bg_teal"] = esc_seq + "46m"
codes["bg_lightgray"] = esc_seq + "47m"
codes["bg_default"] = esc_seq + "49m"
codes["bg_darkyellow"] = codes["bg_brown"]
def color(fg, bg="default", attr=["normal"]):
mystr = codes[fg]
for x in [bg]+attr:
mystr += codes[x]
return mystr
ansi_codes = []
for x in range(30, 38):
ansi_codes.append("%im" % x)
ansi_codes.append("%i;01m" % x)
rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
'0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
'0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
for x in range(len(rgb_ansi_colors)):
codes[rgb_ansi_colors[x]] = esc_seq + ansi_codes[x]
del x
codes["black"] = codes["0x000000"]
codes["darkgray"] = codes["0x555555"]
codes["red"] = codes["0xFF5555"]
codes["darkred"] = codes["0xAA0000"]
codes["green"] = codes["0x55FF55"]
codes["darkgreen"] = codes["0x00AA00"]
codes["yellow"] = codes["0xFFFF55"]
codes["brown"] = codes["0xAA5500"]
codes["blue"] = codes["0x5555FF"]
codes["darkblue"] = codes["0x0000AA"]
codes["fuchsia"] = codes["0xFF55FF"]
codes["purple"] = codes["0xAA00AA"]
codes["turquoise"] = codes["0x55FFFF"]
codes["teal"] = codes["0x00AAAA"]
codes["white"] = codes["0xFFFFFF"]
codes["lightgray"] = codes["0xAAAAAA"]
codes["darkteal"] = codes["turquoise"]
# Some terminals have darkyellow instead of brown.
codes["0xAAAA00"] = codes["brown"]
codes["darkyellow"] = codes["0xAAAA00"]
# Colors from /etc/init.d/functions.sh
_styles["NORMAL"] = ( "normal", )
_styles["GOOD"] = ( "green", )
_styles["WARN"] = ( "yellow", )
_styles["BAD"] = ( "red", )
_styles["HILITE"] = ( "teal", )
_styles["BRACKET"] = ( "blue", )
def style_to_ansi_code(style):
"""
@param style: A style name
@type style: String
@rtype: String
@return: A string containing one or more ansi escape codes that are
used to render the given style.
"""
ret = ""
for attr_name in _styles[style]:
# allow stuff that has found it's way through ansi_code_pattern
ret += codes.get(attr_name, attr_name)
return ret
def colorize(color_key, text):
if color_key in codes:
return codes[color_key] + text + codes["reset"]
elif color_key in _styles:
return style_to_ansi_code(color_key) + text + codes["reset"]
else:
return text
class BoxConsole():
"""
A class that performs fancy terminal formatting for status and informational
messages built upon the logging module.
"""
def __init__(self, opts, name):
self.name = name
self.opts = opts
self.logger = logging.getLogger(name)
self.level = logging.DEBUG if self.opts.verbose else logging.INFO
self.logger.setLevel(self.level)
# create console handler
ch = ColorizingStreamHandler()
ch.setLevel(self.level)
# create formatter and add it to the handlers
#ch.setFormatter(logging.Formatter('%(message)s'))
self.logger.addHandler(ch)
# create file handler
if self.opts.log:
logfile = os.path.join(BASEDIR, 'box-sync.log')
create_file(logfile)
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s'))
self.logger.addHandler(fh)
def debug(self, msg):
self.logger.debug(msg)
def info(self, msg):
self.logger.info(msg)
def warning(self, msg):
self.logger.warning(msg)
def error(self, msg):
self.logger.error(msg)
def critical(self, msg):
self.logger.critical(msg)
def log(self, lvl, msg):
self.logger.log(lvl, msg)
def countdown(self, secs=5, doing="Starting"):
""" This method is based on Portage's _emerge.countdown
Copyright 1999-2009 Gentoo Foundation"""
if secs:
| print("Waiting",secs,"seconds before starting (Control-C to abort)...")
pr | int(doing+" in: ", end=' ')
ticks=list(range(secs))
ticks.reverse()
for sec in ticks:
sys.stdout.write(colorize("red", str(sec+1)+" "))
sys.stdout.flush()
time.sleep(1)
print()
|
": 1, "test2": "{{ beer }}"},
["{{ beer }}", 1], |
)
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleI | nvalid):
schema("America/Do_Not_Exist")
schema("America/Los_Angeles")
schema("UTC")
def test_date():
"""Test date validation."""
schema = vol.Schema(cv.date)
for value in ["Not a date", "23:42", "2016-11-23T18:59:08"]:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().date())
schema("2016-11-23")
def test_time():
"""Test date validation."""
schema = vol.Schema(cv.time)
for value in ["Not a time", "2016-11-23", "2016-11-23T18:59:08"]:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().time())
schema("23:42:00")
schema("23:42")
def test_datetime():
"""Test date time validation."""
schema = vol.Schema(cv.datetime)
for value in [date.today(), "Wrong DateTime", "2016-11-23"]:
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema(datetime.now())
schema("2016-11-23T18:59:08")
@pytest.fixture
def schema():
"""Create a schema used for testing deprecation."""
return vol.Schema({"venus": cv.boolean, "mars": cv.boolean, "jupiter": cv.boolean})
@pytest.fixture
def version(monkeypatch):
"""Patch the version used for testing to 0.5.0."""
monkeypatch.setattr(homeassistant.const, "__version__", "0.5.0")
def test_deprecated_with_no_optionals(caplog, schema):
"""
Test deprecation behaves correctly when optional params are None.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
"""
deprecated_schema = vol.All(cv.deprecated("mars"), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name in [
__name__,
"homeassistant.helpers.config_validation",
]
assert (
"The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration"
) in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_replacement_key(caplog, schema):
"""
Test deprecation behaves correctly when only a replacement key is provided.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter"), schema
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_invalidation_version(caplog, schema, version):
"""
Test deprecation behaves correctly with only an invalidation_version.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
deprecated_schema = vol.All(
cv.deprecated("mars", invalidation_version="1.0.0"), schema
)
message = (
"The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration. "
"This option will become invalid in version 1.0.0"
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert message in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": False}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
invalidated_schema = vol.All(
cv.deprecated("mars", invalidation_version="0.1.0"), schema
)
test_data = {"mars": True}
with pytest.raises(vol.MultipleInvalid) as exc_info:
invalidated_schema(test_data)
assert (
"The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration. This option will "
"become invalid in version 0.1.0"
) == str(exc_info.value)
def test_deprecated_with_replacement_key_and_invalidation_version(
caplog, schema, version
):
"""
Test deprecation behaves with a replacement key & invalidation_version.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter", invalidation_version="1.0.0"),
schema,
)
warning = (
"The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'. This option will become "
"invalid in version 1.0.0"
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert warning in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
invalidated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter", invalidation_version="0.1.0"),
schema,
)
test_data = {"mars": True}
with pytest.raises(vol.MultipleInvalid) as exc_info:
invalidated_schema(test_data)
assert (
"The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'. This option will become "
"invalid in version 0.1.0"
) == str(exc_info.value)
def test_deprecated_with_default(caplog, schema):
"""
Test deprecation behaves correctly with a default value.
This is likely a scenario that would never occur.
Expected behavior:
- Behaves identically as when the default value was not present
"""
deprecated_schema = vol.All(cv.deprecated("mars", default=False), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name == __name__
assert (
"The 'mars' option (with value 'True') is deprecated, "
"plea |
from __future__ import division
import numpy as np
from fatiando.gravmag.euler import Classic, ExpandingWindow, MovingWindow
from fatiando.gravmag import sphere, fourier
from fatiando.mesher import Sphere
from fatiando import utils, gridder
model = None
xp, yp, zp = None, None, None
inc, dec = None, None
struct_ind = None
base = None
pos = None
field, xderiv, yderiv, zderiv = None, None, None, None
precision = 0.01
def setup():
global model, x, y, z, inc, dec, struct_ind, field, xderiv, yderiv, \
zderiv, base, pos
inc, dec = -30, 50
pos = np.array([1000, 1000, 200])
model = Sphere(pos[0], pos[1], pos[2], 1,
#{'magnetization':utils.ang2vec(100, 25, -10)})
{'magnetization':10000})
struct_ind = 3
shape = (128, 128)
x, y, z = gridder.regular((0, 3000, 0, 3000), shape, z=-1)
base = 10
field = utils.nt2si(sphere.tf(x, y, z, [model], inc, dec)) + base
xderiv = fourier.derivx(x, y, field, shape)
yderiv = fourier.derivy(x, y, field, shape)
zderiv = fourier.derivz(x, y, field, shape)
def test_euler_classic_sphere_mag():
"gravmag.euler.Classic for sphere model and magnetic data"
euler = Classic(x, y, z, field, xderiv, yderiv, zderiv, struct_ind).fit()
assert (base - euler.baselevel_)/base <= precision, \
'baselevel: %g estimated: %g' % (base, euler.baselevel_)
assert np.all((pos - euler.estimate_)/pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(euler.estimate_))
def test_euler_classic_expandingwindow_sphere_mag():
"gravmag.euler. | ExpandingWindow w Classic for sphere model + magnetic data"
euler = ExpandingWindow(
Classic(x, y, z, field, x | deriv, yderiv, zderiv, struct_ind),
center=[1000, 1000], sizes=np.linspace(100, 2000, 20)).fit()
assert (base - euler.baselevel_)/base <= precision, \
'baselevel: %g estimated: %g' % (base, euler.baselevel_)
assert np.all((pos - euler.estimate_)/pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(euler.estimate_))
def test_euler_classic_movingwindow_sphere_mag():
"gravmag.euler.MovingWindow w Classic for sphere model + magnetic data"
euler = MovingWindow(
Classic(x, y, z, field, xderiv, yderiv, zderiv, struct_ind),
windows=[10, 10], size=(1000, 1000), keep=0.2).fit()
for b in euler.baselevel_:
assert (base - b)/base <= precision, \
'baselevel: %g estimated: %g' % (base, b)
for c in euler.estimate_:
assert np.all((pos - c)/pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(c))
|
#!/usr/bin/env python
import os, sys
try:
from Tkinter import *
except:
sys.stderr.write('Please install Tkinter!\n\n')
sys.exit(1)
root = Tk()
root.title('Wavelet Demo')
wtype = StringVar()
wtype.set('b')
type_frame = Frame(root,relief=SUNKEN,borderwidth=2)
type_frame.pack(side=TOP,fill=X)
Label(type_frame,text='Wavelet Type').pack(side=TOP)
types = {'h':'Haar',
'l':'Linear',
'b':'Bi-orthogonal'}
for t in 'hlb':
rbut = Radiobutton(type_frame,text=types[t],value=t,variable=wtype)
rbut.pack(side=LEFT)
pclip_frame = Frame(root,relief=SUNKEN,borderwidth=2)
pclip_frame.pack(side=TOP,fill=X)
pclip = IntVar()
pclip.set(50)
scale = Scale(pclip_frame,from_=1,to=99,resolution=1,orient=HOR | IZONTAL,
variable=pclip,length=200)
scale.pack(side=RIGHT)
Label(pclip_frame,text='Threshold\nPercentile').pack(side=RIGHT,anchor=SE)
frame = Frame(root)
frame.pack(side=TOP,fill=X)
quit = Button(frame,text='Quit',background='red',command=sys.exit)
quit.pack | (side=RIGHT)
def scons():
'Get parameters from GUI and pass them to SCons'
os.system ('scons -Q type=%s pclip=%d view' % (wtype.get(),pclip.get()))
cycle = Button(frame,text='Run',background='yellow',command=scons)
cycle.pack()
root.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.