prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
__val[:50], __val[-50:]))
if __attr == self.textProperty:
# restore spaces that have been replaced
__val = __val.replace(WS, ' ')
attrs[__attr + __parens] = __val
else:
m = hashRE.match(attr)
if m:
attrs['class'] = m.group('class')
attrs['oid'] = m.group('oid')
else:
if DEBUG:
print >>sys.stderr, attr, "doesn't match"
if True: # was assignViewById
if not viewId:
# If the view has NO_ID we are assigning a default id here (id/no_id) which is
# immediately incremented if another view with no id was found before to generate
# a unique id
viewId = "id/no_id/1"
if viewId in self.viewsById:
# sometimes the view ids are not unique, so let's generate a unique id here
i = 1
while True:
newId = re.sub('/\d+$', '', viewId) + '/%d' % i
if not newId in self.viewsById:
break
i += 1
viewId = newId
if DEBUG:
print >>sys.stderr, "adding viewById %s" % viewId
# We are assigning a new attribute to keep the original id preserved, which could have
# been NO_ID repeated multiple times
attrs['uniqueId'] = viewId
return attrs
def __parseTree(self, receivedLines, windowId=None):
'''
Parses the View tree contained in L{receivedLines}. The tree is created and the root node assigned to L{self.root}.
This method also assigns L{self.viewsById} values using L{View.getUniqueId} as the key.
@type receivedLines: str
@param receivedLines: the string received from B{View Server}
'''
self.root = None
self.viewsById = {}
self.views = []
parent = None
parents = []
treeLevel = -1
newLevel = -1
lastView = None
for v in receivedLines:
if v == '' or v == 'DONE' or v == 'DONE.':
bre | ak
attrs = self.__splitAttrs(v)
if not self.root:
if v[0] == ' ':
raise Exception("Unexpected root element starting with ' '.")
self.root = View.facto | ry(attrs, self.device, self.build[VERSION_SDK_PROPERTY], self.forceViewServerUse, windowId)
if DEBUG: self.root.raw = v
treeLevel = 0
newLevel = 0
lastView = self.root
parent = self.root
parents.append(parent)
else:
newLevel = (len(v) - len(v.lstrip()))
if newLevel == 0:
raise Exception("newLevel==0 treeLevel=%d but tree can have only one root, v=%s" % (treeLevel, v))
child = View.factory(attrs, self.device, self.build[VERSION_SDK_PROPERTY], self.forceViewServerUse, windowId)
if DEBUG: child.raw = v
if newLevel == treeLevel:
parent.add(child)
lastView = child
elif newLevel > treeLevel:
if (newLevel - treeLevel) != 1:
raise Exception("newLevel jumps %d levels, v=%s" % ((newLevel-treeLevel), v))
parent = lastView
parents.append(parent)
parent.add(child)
lastView = child
treeLevel = newLevel
else: # newLevel < treeLevel
for _ in range(treeLevel - newLevel):
parents.pop()
parent = parents.pop()
parents.append(parent)
parent.add(child)
treeLevel = newLevel
lastView = child
self.views.append(lastView)
self.viewsById[lastView.getUniqueId()] = lastView
def __parseTreeFromUiAutomatorDump(self, receivedXml):
parser = UiAutomator2AndroidViewClient(self.device, self.build[VERSION_SDK_PROPERTY])
try:
start_xml_index = receivedXml.index("<")
except ValueError:
raise ValueError("received does not contain valid XML data")
self.root = parser.Parse(receivedXml[start_xml_index:])
self.views = parser.views
self.viewsById = {}
for v in self.views:
self.viewsById[v.getUniqueId()] = v
def getRoot(self):
'''
Gets the root node of the C{View} tree
@return: the root node of the C{View} tree
'''
return self.root
def traverse(self, root="ROOT", indent="", transform=None, stream=sys.stdout):
'''
Traverses the C{View} tree and prints its nodes.
The nodes are printed converting them to string but other transformations can be specified
by providing a method name as the C{transform} parameter.
@type root: L{View}
@param root: the root node from where the traverse starts
@type indent: str
@param indent: the indentation string to use to print the nodes
@type transform: method
@param transform: a method to use to transform the node before is printed
'''
if transform is None:
# this cannot be a default value, otherwise
# TypeError: 'staticmethod' object is not callable
# is raised
transform = ViewClient.TRAVERSE_CIT
if type(root) == types.StringType and root == "ROOT":
root = self.root
return ViewClient.__traverse(root, indent, transform, stream)
# if not root:
# return
#
# s = transform(root)
# if s:
# print >>stream, "%s%s" % (indent, s)
#
# for ch in root.children:
# self.traverse(ch, indent=indent+" ", transform=transform, stream=stream)
@staticmethod
def __traverse(root, indent="", transform=View.__str__, stream=sys.stdout):
if not root:
return
s = transform(root)
if stream and s:
ius = "%s%s" % (indent, s if isinstance(s, unicode) else unicode(s, 'utf-8', 'replace'))
print >>stream, ius.encode('utf-8', 'replace')
for ch in root.children:
ViewClient.__traverse(ch, indent=indent+" ", transform=transform, stream=stream)
def dump(self, window=-1, sleep=1):
'''
Dumps the window content.
Sleep is useful to wait some time before obtaining the new content when something in the
window has changed.
@type window: int or str
@param window: the window id or name of the window to dump.
The B{name} is the package name or the window name (i.e. StatusBar) for
system windows.
The window id can be provided as C{int} or C{str}. The C{str} should represent
and C{int} in either base 10 or 16.
Use -1 to dump all windows.
This parameter only is used when the backend is B{ViewServer} and it's
ignored for B{UiAutomator}.
@type sleep: int
@param sleep: sleep in seconds before proceeding to dump the content
@return: the list of Views as C{str} received from the server after being split into lines
'''
if sleep > 0:
time.sleep(sleep)
if self.useUiAutomator:
if self.uiAutomatorHelper:
received = self.uiAutomatorHelper.dumpWindowHierarchy()
else:
# NOTICE:
# Using /dev/tty this works even on devices with no sdcard
received = unicode(self.device.shell('uiautomator dump %s /dev/tty >/dev/null' % ('--compressed' if self.getSdkVersion() >= 18 and self.compressedDump else '')), encoding='utf-8', errors='replace')
if not received:
|
from django.conf import settings
from django.db.models import Q
from libs.django_utils import render_to_response
from django.views.generic import ListView
from springboard.models import IntranetApplication
from django.contrib.auth.decorators import login_required
from alerts.models import Alert
class SpringBoard(ListView):
context_object_name = "applications"
template_name = "springboard/springboard.html"
def get_queryset(self):
# Check the groups the user is allowed to see
return IntranetApplication.objects.filter(Q(groups__in = self.request.user.g | roups.all()) | Q(groups__isnull=True)).distinct()
def get_context_data(self, **kwargs):
# Temporar | y message for testing
from django.contrib import messages
# Call the base implementation first to get a context
context = super(SpringBoard, self).get_context_data(**kwargs)
# Get all the alerts for the user
context['alerts'] = Alert.objects.filter(sent_to = self.request.user)
return context
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license i | nformation, please see license.t | xt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class GoogleAppSetup(Document):
pass
|
from logan.runner import run_app, configure_app
import sys
import base64
import os
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
from fabric_bolt.core.settings.base import *
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'fabric-bolt.db'),
'USER': 'sqlite3',
'PASSWOR | D': '',
'HOST': '',
'PORT': '',
}
}
SECRET_KEY = %(default_key)r
"""
def generate_settings():
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def configure():
configure_app(
project='fabric-bolt',
default_config_ | path='~/.fabric-bolt/settings.py',
default_settings='fabric_bolt.core.settings.base',
settings_initializer=generate_settings,
settings_envvar='FABRIC_BOLT_CONF',
)
def main(progname=sys.argv[0]):
run_app(
project='fabric-bolt',
default_config_path='~/.fabric-bolt/settings.py',
default_settings='fabric_bolt.core.settings.base',
settings_initializer=generate_settings,
settings_envvar='FABRIC_BOLT_CONF',
)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import division
# -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, re
# If your extensions are in another directory, add it here.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SpiffWorkflow'
copyright = '2012 ' + ', '.join(open('../AUTHORS').readlines())
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import SpiffWorkflow
version = SpiffWorkflow.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are tw | o options for replacing |today|: either, you set t | oday to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'sphinxdoc.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['figures']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
html_additional_pages = {'index': 'index.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_use_opensearch = 'http://sphinx.pocoo.org'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinxdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation',
'Georg Brandl', 'manual', 1)]
latex_logo = '_static/sphinx.png'
#latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_elements = {
'fontpkg': '\\usepackage{palatino}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Extension interface
# -------------------
from sphinx import addnodes
dir_sig_re = re.compile(r'\.\. ([^:]+)::(.*)$')
def parse_directive(env, sig, signode):
if not sig.startswith('.'):
dec_sig = '.. %s::' % sig
signode += addnodes.desc_name(dec_sig, dec_sig)
return sig
m = dir_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
dec_name = '.. %s::' % name
signode += addnodes.desc_name(dec_name, dec_name)
signode += addnodes.desc_addname(args, args)
return name
def parse_role(env, sig, signode):
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_description_unit('directive', 'dir', 'pair: %s; directive', parse_directive)
app.add_description_unit('role', 'role', 'pair: %s; role', parse_role)
app.add_description_unit('confval', 'confval', 'pair: %s; configuration value')
app.add_description_unit('event', 'event', 'pair: %s; event', parse_event) |
# -*- coding: utf-8 -*-
import unittest
class PilhaVaziaErro(Exception):
pass
class Pilha():
def __init__(self):
self.lista = []
def topo(self):
if self.lista:
return self.lista[-1]
raise PilhaVaziaErro()
def vazia(self):
return not bool(self.lista)
def empilhar(self, valor):
self.lista.append(valor)
def desempilhar(self):
try:
return sel | f.lista.pop()
except IndexError:
raise PilhaVaziaErro
def esta_balanceada(expressao):
"""
Função que calcula se expressão | possui parenteses, colchetes e chaves balanceados
O Aluno deverá informar a complexidade de tempo e espaço da função
Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior
:param expressao: string com expressao a ser balanceada
:return: boleano verdadeiro se expressao está balanceada e falso caso contrário
Complexidade
Tempo: O(n)
Memoria: O(n)
"""
if expressao:
pilha = Pilha()
if expressao[0] in '}])':
return False
for i in expressao:
if i in '{[(':
pilha.empilhar(i)
elif i in '}])':
if i=='}' and pilha.desempilhar() != '{':
return False
elif i==']' and pilha.desempilhar() != '[':
return False
elif i==')' and pilha.desempilhar() != '(':
return False
if pilha.vazia():
return True
return False
else:
return True
class BalancearTestes(unittest.TestCase):
def test_expressao_vazia(self):
self.assertTrue(esta_balanceada(''))
def test_parenteses(self):
self.assertTrue(esta_balanceada('()'))
def test_chaves(self):
self.assertTrue(esta_balanceada('{}'))
def test_colchetes(self):
self.assertTrue(esta_balanceada('[]'))
def test_todos_caracteres(self):
self.assertTrue(esta_balanceada('({[]})'))
self.assertTrue(esta_balanceada('[({})]'))
self.assertTrue(esta_balanceada('{[()]}'))
def test_chave_nao_fechada(self):
self.assertFalse(esta_balanceada('{'))
def test_colchete_nao_fechado(self):
self.assertFalse(esta_balanceada('['))
def test_parentese_nao_fechado(self):
self.assertFalse(esta_balanceada('('))
def test_chave_nao_aberta(self):
self.assertFalse(esta_balanceada('}{'))
def test_colchete_nao_aberto(self):
self.assertFalse(esta_balanceada(']['))
def test_parentese_nao_aberto(self):
self.assertFalse(esta_balanceada(')('))
def test_falta_de_caracter_de_fechamento(self):
self.assertFalse(esta_balanceada('({[]}'))
def test_falta_de_caracter_de_abertura(self):
self.assertFalse(esta_balanceada('({]})'))
def test_expressao_matematica_valida(self):
self.assertTrue(esta_balanceada('({[1+3]*5}/7)+9'))
|
import os
import sys
from setuptools import setup, find_packages
version = '0.3.3'
def get_package_manifest(filename):
packages = []
with open(filename) as package_file:
for line in package_file.readlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
# comment
continue
if line.startswith('-e '):
# not a valid package
continue
packages.append(line)
return packages
def get_install_requires():
"""
:returns: A list of packages required for installation.
"""
return get_package_manifest('requirements.txt')
def get_tests_requires():
"""
:returns: A list of packages required for running the tests.
"""
packages = get_package_manifest('requirements_dev.txt')
try:
from unittest import mock
except ImportError:
packages.append('mock')
if sys.version_info[:2] < (2, 7):
packages.append('unittest2')
return packages
def read(f):
with open(os.path.join(os.path.dirname(__file__), f)) as f:
return f.read().strip()
setup(
name='sockjs-gevent',
version=version,
description=('gevent base sockjs server'),
long_description='\n\n'.join((read('README.md'), read('CHANGES.txt'))),
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet :: WWW/HTTP",
'Topic :: Internet :: WWW/HTTP :: WSGI'
],
author='Nick Joyce',
author_email='nick | .joyce@realkinetic.com',
url='https://github.com/njoyce/sockjs-gevent',
license=' | MIT',
install_requires=get_install_requires(),
tests_require=get_tests_requires(),
setup_requires=['nose>=1.0'],
test_suite='nose.collector',
include_package_data = True,
packages=find_packages(exclude=["examples", "tests"]),
zip_safe = False,
)
|
############################### | # FIG J.2 P.683 ################################
import matplotlib.pyplot as plt
def freqplot(fdata, ydata, symbol='', ttl='', xlab='Frequency (Hz)', ylab=''):
""" FREQPLOT - Plot a function of frequency. See myplot for more features."""
#not sure what this means
#if nargin<2, fdata=0:length(ydata)-1; end
plt.plot(fdata, ydata, symbol);
plt.grid()
plt.title(ttl)
plt.yl | abel(ylab)
plt.xlabel(xlab); |
# Copyright 2013. Amazon Web Services, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import application
import unittest
from application import application
from flask import Flask, current_app, request, Response
""" Main test cases for our application """
class AppTestCase(unittest.TestCase):
#application = Flask(__name__)
def setUp(self):
application.testing = True
with application.app_context():
se | lf.client = current_app.test_client()
def test_load_config(self):
""" Test that we can load our config properly """
self.assertTrue(1)
def test_get_test(self):
""" Test hitting /test and that we get a correct HTTP response """
self.assertTrue(1)
def test_get_form(self):
""" Test that we can get a signup form """
self.assertTrue(1)
| def test_get_user(self):
""" Test that we can get a user context """
self.assertTrue(1)
def test_login(self):
""" Test that we can authenticate as a user """
self.assertTrue(1)
if __name__ == '__main__':
unittest.main()
|
s have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Misaka'
copyright = u'2011-2017, Frank Smit'
author = u'Frank Smit'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.0'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {
# '**': [
# 'about.html',
# # 'navigation.html',
# # 'relations.html',
# # 'searchbox.html',
# # 'donate.html',
# ]
# }
# Additiona | l templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_spl | it_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Misakadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Misaka.tex', u'Misaka Documentation',
u'Frank Smit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. |
# Copyright (C) 2016 - Yevgen Muntyan
# Copyright (C) 2016 - Ignacio Casal Quinteiro
# Copyright (C) 2016 - Arnavion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as publis | hed by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but | WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from gvsbuild.utils.base_builders import Meson
from gvsbuild.utils.base_expanders import Tarball
from gvsbuild.utils.base_project import project_add
@project_add
class Graphene(Tarball, Meson):
def __init__(self):
Meson.__init__(
self,
"graphene",
archive_url="https://github.com/ebassi/graphene/archive/refs/tags/1.10.6.tar.gz",
hash="7eba972751d404316a9b59a7c1e0782de263c3cf9dd5ebf1503ba9b8354cc948",
dependencies=["ninja", "meson", "pkg-config", "glib"],
)
if self.opts.enable_gi:
self.add_dependency("gobject-introspection")
enable_gi = "enabled"
else:
enable_gi = "disabled"
self.add_param("-Dintrospection={}".format(enable_gi))
def build(self):
Meson.build(self, make_tests=True)
self.install(r".\LICENSE share\doc\graphene")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Скрипт извлекает слова из текстового файла и сортирует их по частоте.
# С помощью модуля pymorphy2 можно привести слова к начальной форме | (единственное | число, именительный падеж).
# Нужен pymorphy2 и русскоязычный словарь для него!
# pip install --user pymorphy2
# Примеры:
# ./wordfreq-morph.py ./text-file.txt | less
# xclip -o | ./wordfreq-morph.py -m
# Проверялся на интерпретаторе:
# Python 3.6.1 on linux
import sys
import sqlite3
import os
import re
import argparse
# Сортировка вывода словарей:
from collections import OrderedDict
#------------------------------------------------------------------------------
# Опции:
# Проверочный морфологический словарь (в каталоге скрипта):
NORMAL_DICT_PATH = 'dict.opencorpora-sing-nom.txt'
NORMAL_DICT_DIR = 'word-length-dicts'
database_name = 'opencorpora-sing-nom.sqlite'
#-------------------------------------------------------------------------
# Аргументы командной строки:
def create_parser():
"""Список доступных параметров скрипта."""
parser = argparse.ArgumentParser()
parser.add_argument('file',
nargs='*',
help='Русскоязычный текстовый файл в UTF-8'
)
parser.add_argument('-m', '--morph',
action='store_true', default='False',
help='Преобразование слов в начальную форму (нужен pymorphy2)'
)
return parser
#-------------------------------------------------------------------------
# Функции:
def metadict_path (metadict_dir):
"""Возвращает абсолютный путь к каталогу словарей."""
# Получаем абсолютный путь к каталогу скрипта:
script_path = os.path.dirname(os.path.abspath(__file__))
# Добавляем к пути каталог словарей:
metadict_path = script_path + '/' + metadict_dir
return metadict_path
def find_files (directory):
"""Возвращает список путей ко всем файлам каталога, включая подкаталоги."""
path_f = []
for d, dirs, files in os.walk(directory):
for f in files:
# Формирование адреса:
path = os.path.join(d,f)
# Добавление адреса в список:
path_f.append(path)
return path_f
def lowercase (text):
"""Создаёт из текста список слов в нижнем регистре"""
# Переводим текст в нижний регистр:
text = str(text.lower())
# Регексп вытаскивает из текста слова:
words = re.findall(r"(\w+)", text, re.UNICODE)
# Восстанавливаются ссылки:
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
words = words + urls
return words
def wordfreq_old (words):
"""Создаёт словарь с частотой слов"""
stats = {}
# Слово -- ключ словаря, значение, это его частота:
for word in words:
stats[word] = stats.get(word, 0) + 1
return stats
def word_test_slow (word):
"""Светяет слово со словарём, выбирая словарь по длине слова."""
# Определяем длину слова:
search_string = '-' + str(len(word)) + '.txt'
dicts_list = find_files(metadict_path(NORMAL_DICT_DIR))
test = False
# Подключаем словарь для проверки:
for dict in dicts_list:
if search_string in dict:
normal_dict_file = open(dict, "r")
normal_dict = normal_dict_file.read()
normal_dict_file.close()
if word in normal_dict:
return True
else:
return False
def word_test_sql (word,cursor):
"""Проверяет, есть ли слово в базе данных"""
# Номер таблицы, это длина слова:
word_lenght = len(word)
# А вот не нужно хардкодить (число таблиц в базе данных может измениться)
if word_lenght > 32:
word_lenght = 32
table_name = 'opencorpora' + str(word_lenght)
#database = sqlite3.connect(metadict_path(database_name))
#cursor = database.cursor()
cursor.execute("SELECT words FROM "+table_name+" WHERE words=?",(word,))
result = cursor.fetchall()
#database.close()
if result:
return True
else:
return False
def wordfreq_morph (words):
"""Создаёт словарь с частотой слов (в начальной форме)"""
# Морфологический анализатор:
import pymorphy2
stats = {}
n_stats = {}
for word in words:
stats[word] = stats.get(word, 0) + 1
morph = pymorphy2.MorphAnalyzer()
for item in stats:
# Слово приводится к начальной форме:
n_word = morph.parse(item)[0].normal_form
# Неологизмы оставляем без изменений:
if word_test_sql(n_word,cursor) is not True:
n_word = item
# Создаётся новый ключ, или прибавляется значение к существующему:
if n_word not in n_stats:
n_stats[n_word] = stats[item]
else:
n_stats[n_word] = n_stats[n_word] + stats[item]
return n_stats
def dict_sort (stats):
"""Сортировка словаря по частоте и алфавиту"""
stats_sort = OrderedDict(sorted(stats.items(), key=lambda x: x[0], reverse=False))
stats_list = OrderedDict(sorted(stats_sort.items(), key=lambda x: x[1], reverse=True))
return stats_list
#-------------------------------------------------------------------------
# Тело программы:
# Создаётся список аргументов скрипта:
parser = create_parser()
namespace = parser.parse_args()
# Проверяем, существует ли указанный файл:
file_patch = ' '.join(namespace.file)
if namespace.file is not None and os.path.exists(file_patch):
file = open(file_patch, "r")
text = file.read()
file.close()
# Если нет, читаем стандартный ввод:
else:
text = sys.stdin.read()
# Извлекаем из текста слова:
words = lowercase(text)
# Подключение к базе данных:
database = sqlite3.connect(metadict_path(database_name))
cursor = database.cursor()
# Если указано преобразование слов:
if namespace.morph is True:
wordfreq = wordfreq_morph(words)
else:
wordfreq = wordfreq_old(words)
# Отключаемся от базы данных:
database.close()
# Вывод словаря:
wordfreq_sort=dict_sort(wordfreq)
for word, count in wordfreq_sort.items():
print (count, word)
|
_data['request_uri'] = 'https://example.com/testing'
self.assertEqual(url + '/testing', OneLogin_Saml2_Utils.get_self_url(request_data))
def testGetSelfURLNoQuery(self):
"""
Tests the get_self_url_no_query method of the OneLogin_Saml2_Utils
"""
request_data = {
'http_host': 'example.com',
'script_name': '/index.html'
}
url = OneLogin_Saml2_Utils.get_self_url_host(request_data) + request_data['script_name']
self.assertEqual(url, OneLogin_Saml2_Utils.get_self_url_no_query(request_data))
request_data['path_info'] = '/test'
self.assertEqual(url + '/test', OneLogin_Saml2_Utils.get_self_url_no_query(request_data))
def testGetSelfRoutedURLNoQuery(self):
"""
Tests the get_self_routed_url_no_query method of the OneLogin_Saml2_Utils
"""
request_data = {
'http_host': 'example.com',
'request_uri': '/example1/route?x=test',
'query_string': '?x=test'
}
url = OneLogin_Saml2_Utils.get_self_url_host(request_data) + '/example1/route'
self.assertEqual(url, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data))
request_data_2 = {
'http_host': 'example.com',
'request_uri': '',
}
url_2 = OneLogin_Saml2_Utils.get_self_url_host(request_data_2)
self.assertEqual(url_2, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_2))
request_data_3 = {
'http_host': 'example.com',
}
url_3 = OneLogin_Saml2_Utils.get_self_url_host(request_data_3)
self.assertEqual(url_3, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_3))
request_data_4 = {
'http_host': 'example.com',
'request_uri': '/example1/route/test/',
'query_string': '?invalid=1'
}
url_4 = OneLogin_Saml2_Utils.get_self_url_host(request_data_4) + '/example1/route/test/'
self.assertEqual(url_4, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_4))
request_data_5 = {
'http_host': 'example.com',
'request_uri': '/example1/route/test/',
'query_string': ''
}
url_5 = OneLogin_Saml2_Utils.get_self_url_host(request_data_5) + '/example1/route/test/'
self.assertEqual(url_5, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_5))
request_data_6 = {
'http_host': 'example.com',
'request_uri': '/example1/route/test/',
}
url_ | 6 = OneLogin_Saml2_Utils.get_self_url_host(request_data_6) + '/example1/route/test/'
self.assertEqual(url_6, OneLogin_Saml2_Utils.get_self_routed_url_no_query( | request_data_6))
def testGetStatus(self):
"""
Gets the status of a message
"""
xml = self.file_contents(join(self.data_path, 'responses', 'response1.xml.base64'))
xml = b64decode(xml)
dom = etree.fromstring(xml)
status = OneLogin_Saml2_Utils.get_status(dom)
self.assertEqual(OneLogin_Saml2_Constants.STATUS_SUCCESS, status['code'])
xml2 = self.file_contents(join(self.data_path, 'responses', 'invalids', 'status_code_responder.xml.base64'))
xml2 = b64decode(xml2)
dom2 = etree.fromstring(xml2)
status2 = OneLogin_Saml2_Utils.get_status(dom2)
self.assertEqual(OneLogin_Saml2_Constants.STATUS_RESPONDER, status2['code'])
self.assertEqual('', status2['msg'])
xml3 = self.file_contents(join(self.data_path, 'responses', 'invalids', 'status_code_responer_and_msg.xml.base64'))
xml3 = b64decode(xml3)
dom3 = etree.fromstring(xml3)
status3 = OneLogin_Saml2_Utils.get_status(dom3)
self.assertEqual(OneLogin_Saml2_Constants.STATUS_RESPONDER, status3['code'])
self.assertEqual('something_is_wrong', status3['msg'])
xml_inv = self.file_contents(join(self.data_path, 'responses', 'invalids', 'no_status.xml.base64'))
xml_inv = b64decode(xml_inv)
dom_inv = etree.fromstring(xml_inv)
self.assertRaisesRegexp(Exception, 'Missing Status on response',
OneLogin_Saml2_Utils.get_status, dom_inv)
xml_inv2 = self.file_contents(join(self.data_path, 'responses', 'invalids', 'no_status_code.xml.base64'))
xml_inv2 = b64decode(xml_inv2)
dom_inv2 = etree.fromstring(xml_inv2)
self.assertRaisesRegexp(Exception, 'Missing Status Code on response',
OneLogin_Saml2_Utils.get_status, dom_inv2)
def testParseDuration(self):
"""
Tests the parse_duration method of the OneLogin_Saml2_Utils
"""
duration = 'PT1393462294S'
timestamp = 1393876825
parsed_duration = OneLogin_Saml2_Utils.parse_duration(duration, timestamp)
self.assertEqual(2787339119, parsed_duration)
parsed_duration_2 = OneLogin_Saml2_Utils.parse_duration(duration)
self.assertTrue(parsed_duration_2 > parsed_duration)
invalid_duration = 'PT1Y'
self.assertRaisesRegexp(Exception, 'Unrecognised ISO 8601 date format',
OneLogin_Saml2_Utils.parse_duration, invalid_duration)
new_duration = 'P1Y1M'
parsed_duration_4 = OneLogin_Saml2_Utils.parse_duration(new_duration, timestamp)
self.assertEqual(1428091225, parsed_duration_4)
neg_duration = '-P14M'
parsed_duration_5 = OneLogin_Saml2_Utils.parse_duration(neg_duration, timestamp)
self.assertEqual(1357243225, parsed_duration_5)
def testParseSAML2Time(self):
"""
Tests the parse_SAML_to_time method of the OneLogin_Saml2_Utils
"""
time = 1386650371
saml_time = '2013-12-10T04:39:31Z'
self.assertEqual(time, OneLogin_Saml2_Utils.parse_SAML_to_time(saml_time))
self.assertRaisesRegexp(Exception, 'does not match format',
OneLogin_Saml2_Utils.parse_SAML_to_time, 'invalidSAMLTime')
# Now test if toolkit supports miliseconds
saml_time2 = '2013-12-10T04:39:31.120Z'
self.assertEqual(time, OneLogin_Saml2_Utils.parse_SAML_to_time(saml_time2))
def testParseTime2SAML(self):
"""
Tests the parse_time_to_SAML method of the OneLogin_Saml2_Utils
"""
time = 1386650371
saml_time = '2013-12-10T04:39:31Z'
self.assertEqual(saml_time, OneLogin_Saml2_Utils.parse_time_to_SAML(time))
self.assertRaisesRegexp(Exception, 'could not convert string to float',
OneLogin_Saml2_Utils.parse_time_to_SAML, 'invalidtime')
def testGetExpireTime(self):
"""
Tests the get_expire_time method of the OneLogin_Saml2_Utils
"""
self.assertEqual(None, OneLogin_Saml2_Utils.get_expire_time())
self.assertNotEqual(None, OneLogin_Saml2_Utils.get_expire_time('PT360000S'))
self.assertEqual('1291955971', OneLogin_Saml2_Utils.get_expire_time('PT360000S', '2010-12-10T04:39:31Z'))
self.assertEqual('1291955971', OneLogin_Saml2_Utils.get_expire_time('PT360000S', 1291955971))
self.assertNotEqual('3311642371', OneLogin_Saml2_Utils.get_expire_time('PT360000S', '2074-12-10T04:39:31Z'))
self.assertNotEqual('3311642371', OneLogin_Saml2_Utils.get_expire_time('PT360000S', 1418186371))
def testGenerateNameIdWithSPNameQualifier(self):
"""
Tests the generateNameId method of the OneLogin_Saml2_Utils
"""
name_id_value = 'ONELOGIN_ce998811003f4e60f8b07a311dc641621379cfde'
entity_id = 'http://stuff.com/endpoints/metadata.php'
name_id_format = 'urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified'
name_id = OneLogin_Saml2_Utils.generate_name_id(name_id_value, entity_id, name_id_format)
expected_name_id = '<saml:NameID SPNameQualifier="http://stuff.com/endpoints/metadata.php" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified">ONELOGIN_ce998811003f4e60f8b07a311dc641621379cfde</saml:NameID>'
self. |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"PaymentModeEnum",},
)
class PaymentModeEnum(proto.Message):
r"""Container for enum describing po | ssible payment modes.
"""
class PaymentMode(proto.Enum):
r"""Enum describing possible payment modes."""
UNSPECIFIED = 0
UNKNOWN = | 1
CLICKS = 4
CONVERSION_VALUE = 5
CONVERSIONS = 6
GUEST_STAY = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
# -*- Mode: Python; test-case-name: -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import time
from twisted.internet import reactor
from flumotion.common import log
# Minimum size to take in account when calculating mean file read
MIN_REQUEST_SIZE = 64 * 1024 + 1
# Statistics update period
STATS_UPDATE_PERIOD = 10
class RequestStatistics(object):
def __init__(self, serverStats):
self._stats = serverStats
self.bytesSent = 0L
self._stats._onRequestStart(self)
def onDataSent(self, size):
self.bytesSent += size
self._stats._onRequestDataSent(self, size)
def onCompleted(self, size):
self._stats._onRequestComplete(self, size)
class ServerStatistics(object):
_updater = None
_callId = None
def __init__(self):
now = time.time()
self.startTime = now
self.currentRequestCount = 0
self.totalRequestCount = 0
self.requestCountPeak = 0
self.requestCountPeakTime = now
self.finishedRequestCount = 0
self.totalBytesSent = 0L
# Updated by a call to the update method
self.meanRequestCount = 0
self.currentRequestRate = 0
self.requestRatePeak = 0
self.requestRatePeakTime = now
self.meanRequestRate = 0.0
self.currentBitrate = 0
self.meanBitrate = 0
self.bitratePeak = 0
self.bitratePeakTime = now
self._fileReadRatios = 0.0
self._lastUpdateTime = now
self._lastRequestCount = 0
self._lastBytesSent = 0L
def startUpdates(self, updater):
self._updater = updater
self._set("bitrate-peak-time", self.bitratePeakTime)
self._set("request-rate-peak-time", self.requestRatePeakTime)
self._set("request-count-peak-time", self.requestCountPeakTime)
if self._callId is None:
self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update)
def stopUpdates(self):
self._updater = None
if self._callId is not None:
self._callId.cancel()
self._callId = None
def getMeanFileReadRatio(self):
if self.finishedRequestCount > 0:
return self._fileReadRatios / self.finishedRequestCount
return 0.0
meanFileReadRatio = property(getMeanFileReadRatio)
def _update(self):
now = time.time()
updateDelta = now - self._lastUpdateTime
# Update average concurrent request
meanReqCount = self._updateAverage(self._lastUpdateTime, now,
self.mean | RequestCount,
self.currentRequestCount)
# Calculate Request rate
countDiff = self.totalRequestCount - self._lastRequestCount
newReqRate = float(countDiff) / updateDelta
# Calculate average request rate
meanReqRate = self._updateAverage(self._lastUpdateTime, now,
self.currentRequestRate, newReqRate)
| # Calculate current bitrate
bytesDiff = (self.totalBytesSent - self._lastBytesSent) * 8
newBitrate = bytesDiff / updateDelta
# calculate average bitrate
meanBitrate = self._updateAverage(self._lastUpdateTime, now,
self.currentBitrate, newBitrate)
# Update Values
self.meanRequestCount = meanReqCount
self.currentRequestRate = newReqRate
self.meanRequestRate = meanReqRate
self.currentBitrate = newBitrate
self.meanBitrate = meanBitrate
# Update the statistics keys with the new values
self._set("mean-request-count", meanReqCount)
self._set("current-request-rate", newReqRate)
self._set("mean-request-rate", meanReqRate)
self._set("current-bitrate", newBitrate)
self._set("mean-bitrate", meanBitrate)
# Update request rate peak
if newReqRate > self.requestRatePeak:
self.requestRatePeak = newReqRate
self.requestRatePeakTime = now
# update statistic keys
self._set("request-rate-peak", newReqRate)
self._set("request-rate-peak-time", now)
# Update bitrate peak
if newBitrate > self.bitratePeak:
self.bitratePeak = newBitrate
self.bitratePeakTime = now
# update statistic keys
self._set("bitrate-peak", newBitrate)
self._set("bitrate-peak-time", now)
# Update bytes read statistic key too
self._set("total-bytes-sent", self.totalBytesSent)
self._lastRequestCount = self.totalRequestCount
self._lastBytesSent = self.totalBytesSent
self._lastUpdateTime = now
# Log the stats
self._logStatsLine()
self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update)
def _set(self, key, value):
if self._updater is not None:
self._updater.update(key, value)
def _onRequestStart(self, stats):
# Update counters
self.currentRequestCount += 1
self.totalRequestCount += 1
self._set("current-request-count", self.currentRequestCount)
self._set("total-request-count", self.totalRequestCount)
# Update concurrent request peak
if self.currentRequestCount > self.requestCountPeak:
now = time.time()
self.requestCountPeak = self.currentRequestCount
self.requestCountPeakTime = now
self._set("request-count-peak", self.currentRequestCount)
self._set("request-count-peak-time", now)
def _onRequestDataSent(self, stats, size):
self.totalBytesSent += size
def _onRequestComplete(self, stats, size):
self.currentRequestCount -= 1
self.finishedRequestCount += 1
self._set("current-request-count", self.currentRequestCount)
if (size > 0) and (stats.bytesSent > MIN_REQUEST_SIZE):
self._fileReadRatios += float(stats.bytesSent) / size
self._set("mean-file-read-ratio", self.meanFileReadRatio)
def _updateAverage(self, lastTime, newTime, lastValue, newValue):
lastDelta = lastTime - self.startTime
newDelta = newTime - lastTime
if lastDelta > 0:
delta = lastDelta + newDelta
before = (lastValue * lastDelta) / delta
after = (newValue * newDelta) / delta
return before + after
return lastValue
def _logStatsLine(self):
"""
Statistic fields names:
TRC: Total Request Count
CRC: Current Request Count
CRR: Current Request Rate
MRR: Mean Request Rate
FRR: File Read Ratio
MBR: Mean Bitrate
CBR: Current Bitrate
"""
log.debug("stats-http-server",
"TRC: %s; CRC: %d; CRR: %.2f; MRR: %.2f; "
"FRR: %.4f; MBR: %d; CBR: %d",
self.totalRequestCount, self.currentRequestCount,
self.currentRequestRate, self.meanRequestRate,
self.meanFileReadRatio, self.meanBitrate,
self.currentBitrate)
|
# -*- coding: utf-8 -*-
__all__ = ["inet_aton", "record_by_ip", "record_by_request", "get_ip",
"record_by_ip_as_dict", "record_by_request_as_dict"]
import struct
import socket
from geoip.defaults import BACKEND, REDIS_TYPE
from geoip.redis_wrapper import RedisClient
from geoip.models import Range
_RECORDS_KEYS = ('country', 'area', 'city', 'isp', 'provider')
def _from_redis(ip):
r = RedisClient()
data = r.zrangebyscore("geoip", ip, 'inf', 0, 1, withscores=True)
if not data:
return
res, score = data[0]
geo_id, junk, prefix = res.decode().split(":", 2)
if prefix == "s" and score > ip:
return
info = r.get("geoip:%s" % junk)
if info is not None:
return info.decode('utf-8', 'ignore').split(':')
def | _from_db(ip):
obj = Range.objects.select_related().filter(
start_ip__lte=ip, end_ip__gte=ip
).order_by('end_ip', '-start_ip')[:1][0]
if REDIS_TYPE == 'pk':
return map(lambda k: str(getattr(obj, k).pk), _REC | ORDS_KEYS)
return map(lambda k: str(getattr(obj, k)), _RECORDS_KEYS)
def inet_aton(ip):
return struct.unpack('!L', socket.inet_aton(ip))[0]
def get_ip(request):
ip = request.META['REMOTE_ADDR']
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR'].split(',')[0]
return ip
def record_by_ip(ip):
return (_from_redis if BACKEND == 'redis' else _from_db)(inet_aton(ip))
def record_by_request(request):
return record_by_ip(get_ip(request))
def record_by_ip_as_dict(ip):
return dict(zip(_RECORDS_KEYS, record_by_ip(ip)))
def record_by_request_as_dict(request):
return dict(zip(_RECORDS_KEYS, record_by_ip(get_ip(request))))
|
(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True)
def test_fitted(self):
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
raise SkipTest
res = self.results
fitted = res.fittedvalues
assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12)
assert_allclose(fitted, res.predict(), rtol=1e-12)
def test_predict_types(self):
res = self.results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
# SMOKE test only TODO
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()),
rtol=1e-12)
# only one prediction:
assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()),
rtol=1e-12)
assert_allclose(fitted[:1], res.predict(p_exog[0]),
rtol=1e-12)
# predict doesn't preserve DataFrame, e.g. dot converts to ndarray
# import pandas
# predicted = res.predict(pandas.DataFrame(p_exog))
# assert_(isinstance(predicted, pandas.DataFrame))
# assert_allclose(predicted, fitted, rtol=1e-12)
######### subclasses for individual models, unchanged from test_shrink_pickle
# TODO: check if setup_class is faster than setup
class TestGenericOLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestGenericOLSOneExog(CheckGenericMixin):
# check with single regressor (no constant)
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog[:, 1]
np.random.seed(987689)
y = x + np.random.randn(x.shape[0])
self.results = sm.OLS(y, x).fit()
class TestGenericWLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestGenericPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
class TestGenericNegativeBinomial(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, data.exog)
start_params = np.array([-0.0565406 , -0.21213599, 0.08783076,
-0.02991835, 0.22901974, 0.0621026,
0.06799283, 0.08406688, 0.18530969,
1.36645452])
self.results = mod.fit(start_params=start_params, disp=0)
class TestGenericLogit(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestGenericRLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestGenericGLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
| x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestGenericGEEPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = s | elf.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# no sm. import
# vi = sm.dependence_structures.Independence()
from statsmodels.genmod.dependence_structures import Independence
vi = Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params)
class TestGenericGEEPoissonNaive(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# no sm. import
# vi = sm.dependence_structures.Independence()
from statsmodels.genmod.dependence_structures import Independence
|
e)
else:
cache_misses = cachemissarchive.CacheMissArchive(
options.cache_miss_file)
if options.server:
AddDnsForward(server_manager, options.server)
else:
host = platformsettings.get_server_ip_address(options.server_mode)
real_dns_lookup = dnsproxy.RealDnsLookup(
name_servers=[platformsettings.get_original_primary_nameserver()])
if options.record:
httparchive.HttpArchive.AssertWritable(replay_filename)
if options.append and os.path.exists(replay_filename):
http_archive = httparchive.HttpArchive.Load(replay_filename)
logging.info('Appending to %s (loaded %d existing responses)',
replay_filename, len(http_archive))
else:
http_archive = httparchive.HttpArchive()
else:
http_archive = httparchive.HttpArchive.Load(replay_filename)
logging.info('Loaded %d responses from %s',
len(http_archive), replay_filename)
server_manager.AppendRecordCallback(real_dns_lookup.ClearCache)
server_manager.AppendRecordCallback(http_archive.clear)
if options.dns_forwarding:
if not options.server_mode:
AddDnsForward(server_manager, host)
| AddDnsProxy(server_manager, options, host, real_dns_lookup, http_archive)
if options.ssl and options.certfile is None:
options.certfile = os.path.join(os.path.dirname(__file__), 'wpr_cert.pem')
AddWebProxy(server_manager, options, host, real_dns_lookup,
http_archive, cache_misses)
AddTrafficShaper(server_manager, options, host)
exit_status = 0
try:
server_manager.Run()
exce | pt KeyboardInterrupt:
logging.info('Shutting down.')
except (dnsproxy.DnsProxyException,
trafficshaper.TrafficShaperException,
platformsettings.NotAdministratorError,
platformsettings.DnsUpdateError) as e:
logging.critical('%s: %s', e.__class__.__name__, e)
exit_status = 1
except:
logging.critical(traceback.format_exc())
exit_status = 2
if options.record:
http_archive.Persist(replay_filename)
logging.info('Saved %d responses to %s', len(http_archive), replay_filename)
if cache_misses:
cache_misses.Persist()
logging.info('Saved %d cache misses and %d requests to %s',
cache_misses.get_total_cache_misses(),
len(cache_misses.request_counts.keys()),
options.cache_miss_file)
return exit_status
def GetOptionParser():
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + '\n'
else:
return ''
option_parser = optparse.OptionParser(
usage='%prog [options] replay_file',
formatter=PlainHelpFormatter(),
description=__doc__,
epilog='http://code.google.com/p/web-page-replay/')
option_parser.add_option('--spdy', default=False,
action='store_true',
help='Replay via SPDY. (Can be combined with --no-ssl).')
option_parser.add_option('-r', '--record', default=False,
action='store_true',
help='Download real responses and record them to replay_file')
option_parser.add_option('--append', default=False,
action='store_true',
help='Append responses to replay_file.')
option_parser.add_option('-l', '--log_level', default='debug',
action='store',
type='choice',
choices=('debug', 'info', 'warning', 'error', 'critical'),
help='Minimum verbosity level to log')
option_parser.add_option('-f', '--log_file', default=None,
action='store',
type='string',
help='Log file to use in addition to writting logs to stderr.')
option_parser.add_option('-e', '--cache_miss_file', default=None,
action='store',
dest='cache_miss_file',
type='string',
help='Archive file to record cache misses as pickled objects.'
'Cache misses occur when a request cannot be served in replay mode.')
network_group = optparse.OptionGroup(option_parser,
'Network Simulation Options',
'These options configure the network simulation in replay mode')
network_group.add_option('-u', '--up', default='0',
action='store',
type='string',
help='Upload Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
network_group.add_option('-d', '--down', default='0',
action='store',
type='string',
help='Download Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
network_group.add_option('-m', '--delay_ms', default='0',
action='store',
type='string',
help='Propagation delay (latency) in milliseconds. Zero means no delay.')
network_group.add_option('-p', '--packet_loss_rate', default='0',
action='store',
type='string',
help='Packet loss rate in range [0..1]. Zero means no loss.')
network_group.add_option('-w', '--init_cwnd', default='0',
action='store',
type='string',
help='Set initial cwnd (linux only, requires kernel patch)')
network_group.add_option('--net', default=None,
action='store',
type='choice',
choices=OptionsWrapper.NET_CHOICES,
help='Select a set of network options: %s.' % ', '.join(
OptionsWrapper.NET_CHOICES))
network_group.add_option('--shaping_type', default='dummynet',
action='store',
choices=('dummynet', 'proxy'),
help='When shaping is configured (i.e. --up, --down, etc.) decides '
'whether to use |dummynet| (default), or |proxy| servers.')
option_parser.add_option_group(network_group)
harness_group = optparse.OptionGroup(option_parser,
'Replay Harness Options',
'These advanced options configure various aspects of the replay harness')
harness_group.add_option('-S', '--server', default=None,
action='store',
type='string',
help='IP address of host running "replay.py --server_mode". '
'This only changes the primary DNS nameserver to use the given IP.')
harness_group.add_option('-M', '--server_mode', default=False,
action='store_true',
help='Run replay DNS & http proxies, and trafficshaping on --port '
'without changing the primary DNS nameserver. '
'Other hosts may connect to this using "replay.py --server" '
'or by pointing their DNS to this server.')
harness_group.add_option('-i', '--inject_scripts', default='deterministic.js',
action='store',
dest='inject_scripts',
help='A comma separated list of JavaScript sources to inject in all '
'pages. By default a script is injected that eliminates sources '
'of entropy such as Date() and Math.random() deterministic. '
'CAUTION: Without deterministic.js, many pages will not replay.')
harness_group.add_option('-D', '--no-diff_unknown_requests', default=True,
action='store_false',
dest='diff_unknown_requests',
help='During replay, do not show a diff of unknown requests against '
'their nearest match in the archive.')
harness_group.add_option('-C', '--use_closest_match', default=False,
action='store_true',
dest='use_closest_match',
help='During replay, if a request is not found, serve the closest match'
'in the archive instead of giving a 404.')
harness_group.add_option('-U', '--use_server_delay', default=False,
action='store_true',
dest='use_server_delay',
help='During replay, simulate server delay by delaying response time to'
'requests.')
harness_group.add_option('-I', '--screenshot_dir', default=None,
action='store',
type='string',
help='Save PNG images of the loaded page in the given directory.')
harness_group.add_option('-P', '--no-dns_private_passthrough', default=True,
action='store_false',
dest='dns_private_passthrough',
help='Don\'t forward DNS requests that resolve to private network '
'addresses. CAUTION: With this option important services like '
'Kerberos will resolve to the HTTP proxy address.')
harness_group.add_option('-x', '--no-dns_forwarding', default=True,
action='s |
Rtn.append((Prepend + FlInf[0], FlInf[1]))
for Dir in self.Dirs.values():
Rtn.extend(Dir.GfeInfCache(LstExts, BegDots, Prepend + Dir.Name + "/"))
return Rtn
def GfeCheckCache(self, Func, Cmd, Path, LstExts, BegDots = False):
TheDir = None
Rtn = None
try:
TheDir = self.GetDir(Path)
Rtn = TheDir.GfeInfCache(LstExts, BegDots)
except:
Rtn = Func(Cmd, Path, LstExts, BegDots)
TmpRtn = [0] * len(Rtn)
if len(Rtn) > 0 and Rtn[0][0][0] != '/':
Path += "/"
for c in xrange(len(Rtn)):
TmpRtn[c] = (Path + Rtn[c][0], Rtn[c][1])
self.SetFlInfLst(TmpRtn, True)
return Rtn
DRV_TYPE_REAL = 0
DRV_TYPE_RFS = 1
class FileDrv:#InfoGetters: Reader takes filename
AllDrv = dict()
CurDir = os.getcwd()
def __init__(self, DrvName, PrependName):
FileDrv.AllDrv[DrvName] = self
self.Name = PrependName
self.InfoGetters = dict()
self.InfCache = dict()
self.SnglReaders = dict()
self.Type = DRV_TYPE_REAL
def Open(self, fName, Mode):
return open(self.Name + fName, Mode)
def NativeOpen(self, fName, Mode):
return File(self.Name + fName, Mode)
def ListDir(self, Path):
return os.listdir(self.Name + Path)
def IsDir(self, Path):
return os.path.isdir(self.Name + Path)
def Exists(self, Path):
return os.path.exists(self.Name + Path)
def IsFile(self, Path):
return os.path.isfile(self.Name + Path)
def GetFilesExts(self, Path, LstExts, Invert = False, RtnBegDots = False):
for c in xrange(len(LstExts)):
LstExts[c] = LstExts[c].lower()
CurPath = self.Name + Path + "/"
Next = [""]
Cur = []
Rtn = list()
while len(Next) > 0:
Cur = Next
Next = list()
for TestPath in Cur:
LstPaths = list()
try:
LstPaths = os.listdir(CurPath + TestPath)
except WindowsError:
continue
for PathName in LstPaths:
Add = TestPath + PathName
if os.path.isdir(CurPath + Add):
Next.append(Add + "/")
elif not RtnBegDots and PathName[0] == '.':
continue
else:
Pos = PathName.rfind('.')
if Pos < 0 and (Invert ^ ("" in LstExts)): Rtn.append(Add)
elif Pos >= 0 and (Invert ^ (PathName[Pos + 1:].lower() in LstExts)):
Rtn.append(Add)
return Rtn
def UseGfeReader(self, Cmd, Path, LstExts = None, BegDots = False):
CurReader = self.InfoGetters[Cmd]
if LstExts == None: LstExts = CurReader.Params[0]
LstFls = self.GetFilesExts(Path, list(LstExts), CurReader.Params[1], BegDots)
Rtn = [None] * len(LstFls)
Prepend = self.Name + Path + "/"
for c in xrange(len(LstFls)):
Rtn[c] = LstFls[c], CurReader.Reader(Prepend + LstFls[c])
return Rtn
def GetInfSingle(self, Cmd, fName):
Rtn = self.SnglReader[Cmd](fName)
self.InfCache[Cmd].SetFlInf(fName, Rtn)
return Rtn
if os.name == "nt":
for c in xrange(26):
CurDrv = chr(ord('A') + c)
if os.path.isdir(CurDrv + ":"):
FileDrv(CurDrv, CurDrv + ":")
elif os.name == "posix":
FileDrv("C", "")
def EvalPath(Path):
Pos = Path.find(":")
if Pos == -1:
Path = CurDir + "/" + Path
Pos = Path.find(":")
return Path[0:Pos], Path[Pos + 1:]
def OpenFile(fName, Mode):
DrvName, fName = EvalPath(fName)
return FileDrv.AllDrv[DrvName].Open(fName, Mode)
def NativeOpenFile(fName, Mode):
DrvName, fName = EvalPath(fName)
return FileDrv.AllDrv[DrvName].NativeOpen(fName, Mode)
def ListDir(Path):
if Path == "": return [OutPath + ":" for OutPath in FileDrv.AllDrv.keys()]
DrvName, Path = EvalPath(Path)
return FileDrv.AllDrv[DrvName].ListDir(Path)
def Exists(Path):
DrvName, Path = EvalPath(Path)
if not FileDrv.AllDrv.has_key(DrvName): return False
return FileDrv.AllDrv[DrvName].Exists(Path)
def IsDir(Path):
DrvName, Path = EvalPath(Path)
if not FileDrv.AllDrv.has_key(DrvName): return False
return FileDrv.AllDrv[DrvName].IsDir(Path)
def IsFile(Path):
DrvName, Path = EvalPath(Path)
if not FileDrv.AllDrv.has_key(DrvName): return False
return FileDrv.AllDrv[DrvName].IsFile(Path)
def GetFilesExts(Path, LstExt, Invert = False, RtnBegDots = False):
DrvName, Path = EvalPath(Path)
return FileDrv.AllDrv[DrvName].GetFilesExts(Path, LstExt, Invert, RtnBegDots)
def GetInfGfe(Cmd, Path, LstExts = None, BegDots = False):#Warning BegDots functionality is in question
print Path
DrvName, Path = EvalPath(Path)
Drv = FileDrv.AllDrv[DrvName]
return Drv.InfCache[Cmd].GfeCheckCache(Drv.UseGfeReader, Cmd, Path, LstExts, BegDots)
def GetInfFile(Cmd, Path):#TODO: Execute specified callback to retreive info if info is not cached like with the gfe readers
DrvName, Path = EvalPath(Path)
Drv = FileDrv.AllDrv[DrvName]
try:
return Drv.InfCache[Cmd].GetFlInf(Path)
except:
return Drv.GetInfSingle(Cmd, Path)
class TxtUsrIface:
def __init__(self, Ostream, Istream, Err):
self.Os = Ostream
self.Is = Istream
self.Err = Err
def getpass(self, Msg):
self.Os.write(Msg)
return self.Is.readline().replace("\n", "")
def get(self, Msg):
self.Os.write(Msg)
return self.Is.readline().replace("\n", "")
def out(self, Msg):
self.Os.write(Msg + "\n")
def success(self, Msg):
self.Os.write(Msg + "\n")
def error(self, Msg):
self.Err.write(Msg + "\n")
class RfsDrv(FileDrv):#InfoGetters have AddInfo = (Formating Sender, Is Data received encrypted), Reader takes bytes
def __init__(self, ServIface):
self.Name = ServIface.Username
self.Iface = ServIface
try:
self.FsLock = SingleMutex()
except:
self.FsLock = threading.Lock()
self.InfoGetters = dict()
self.SnglReaders = dict()
self.InfCache = dict()
self.Type = DRV_TYPE_RFS
FileDrv.AllDrv[self.Name] = self
def Open(self, fName, Mode):
self.FsLock.acquire()
Rtn = CltFile(self.Iface.Prot, fName, Mode)
self.FsLock.release()
Rtn.FsLock = self.FsLock
return Rtn
def NativeOpen(self, fName, Mode):
if not (isinstance(self.FsLock, KyleObj) or isinstance(self.Iface.Prot.Prot, KyleObj)):
raise Exception("This Drive does not support native KyleUtils Files in the current state")
return RfsFile(self.Iface.Prot.Prot, fName, Mode, self.FsLock)
def ListDir(self, Path):
with self.FsLock: return self.Iface.ListDir(Path)
def IsDir(self, Path):
with self.FsLock: return self.Iface.PathIsDir(Path)
def Exists(self, Path):
with self.FsLock: return self.Iface.PathExists(Path)
def IsFile(self, Path):
with self.FsLock: return self.Iface.PathIsFile(Path)
def GetFilesExts(self, Path, LstExts, Invert = False, RtnBegDots = False):
with self.FsLock: return self.Iface.GetFilesExts(Path, LstExts, Invert, RtnBegDots)
def UseGfeReader(self, Cmd, Path, LstExts = None, BegDots = False):
Cur = self.InfoGetters[Cmd]
if LstExts == None: LstExts = Cur.Params[0]
Cur.AddInfo[0](Iface.Prot, Path, LstExts, Cur.Params[1], BegDots)
| Rtn, | NoEncDat, EncDat = Iface.Prot.Recv()
if NoEncDat[0] != '\0': raise Exception(NoEncDat[1:])
CurStr = NoEncDat[1:]
if Cur.AddInfo[1]:
CurStr = EncDat
NumElem = GetLongStrBytes(CurStr[0:2])
CurStr = Cu |
)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed | to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, | minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions = jira_fix_versions,
comment = comment, resolution = {'id': resolution.raw['id']})
print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("SPARK-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX] [MLLIB] Issue"
>>> standardize_jira_ref("[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref("[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123] [PROJECT INFRA] [WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954] [MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref("SPARK-1094 Support MiMa for reporting binary compatibility accross versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility accross versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146] [WIP] Vagrant support for Spark'
>>> standardize_jira_ref("SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref("[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250] [SPARK-6146] [SPARK-5911] [SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\] (\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ' '.join(jira_refs).strip() + " " + ' '.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def main():
global original_head
os.chdir(SPARK_HOME)
original_head = run_cmd("git rev-parse HEAD")[:8]
branches = get_json("%s/branches" % GITHUB_API_BASE)
#branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = "master"
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print "I've re-written the title as follows to match the standard format:"
print "Original: %s" % pr["title"]
print "Modified: %s" % modified_title
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print "Using modified title:"
else:
title = pr["title"]
print "Using original title:"
print title
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): |
import unittest
import traceback
from time import perf_counter
class CodewarsTestRunner(object):
def __init__(self): pass
def run(self, test):
r = CodewarsTestResult()
s = perf_counter()
print("\n<DESCRIBE::>Tests")
try:
test(r)
finally:
pass
print("\n<COMPLETEDIN::>{:.4f}".format(1000*(perf_counter() - s)))
return r
__unittest = True
class CodewarsTestResult(unittest.TestResult):
def __init__(self):
super().__init__()
self.start = 0.0
def startTest(self, test):
print("\n<IT::>" + test._testMethodName)
super().startTest(test)
self.start = perf_counter()
def stopTest(self, test):
print("\n<COMPLETEDIN::>{:.4f}".format(1000*(perf_counter() - self.start)))
super().stopTest(test)
def addSuccess(s | elf, test):
print("\n<PASSED::>Test Passed")
super().addSuccess(test)
def addError(self, test, err):
print("\n<ERROR::>Unhandled Exception")
print("\n<LOG:ESC:Error>" + esc(''.join(traceback.format_exception_only(err[0], err[1]))))
print("\n<LOG:ESC:Traceback>" + esc(self._exc_info_to_string(err, test)))
super().addError(test, err)
def addFailure(self, test, err):
print("\n<FAILED::>Test Failed")
| print("\n<LOG:ESC:Failure>" + esc(''.join(traceback.format_exception_only(err[0], err[1]))))
super().addFailure(test, err)
# from unittest/result.py
def _exc_info_to_string(self, err, test):
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
length = self._count_relevant_tb_levels(tb) # Skip assert*() traceback levels
else:
length = None
return ''.join(traceback.format_tb(tb, limit=length))
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def esc(s):
return s.replace("\n", "<:LF:>")
|
# Copyright (c) 2011 Nick Hurley <hurley at todesschaf dot org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Helpers for git extensions written in python
"""
import inspect
import os
import subprocess
import sys
import traceback
config = {}
def __extract_name_email(info, type_):
"""Extract a name and email from a string in the form:
User Name <user@example.com> tstamp offset
Stick that into our config dict for either git committer or git author.
"""
val = ' '.join(info.split(' ')[:-2])
angle = val.find('<')
if angle > -1:
config['GIT_%s_NAME' % type_] = val[:angle - 1]
config['GIT_%s_EMAIL' % type_] = val[angle + 1:-1]
else:
config['GIT_%s_NAME' % type_] = val
def __create_config():
"""Create our configuration dict from git and the env variables we're given.
"""
devnull = file('/dev/null', 'w')
# Stick all our git variables in our dict, just in case anyone needs them
gitvar = subprocess.Popen(['git', 'var', '-l'], stdout=subprocess.PIPE,
stderr=devnull)
for line in gitvar.stdout:
k, v = line.split('=', 1)
if k == 'GIT_COMMITTER_IDENT':
__extract_name_email(v, 'COMMITTER')
elif k == 'GIT_AUTHOR_IDENT':
__extract_name_email(v, 'AUTHOR')
elif v == 'true':
v = True
elif v == 'false':
v = False
else:
try:
v = int(v)
except:
pass
config[k] = v
gitvar.wait()
# Find out where git's sub-exes live
gitexec = subprocess.Popen(['git', '--exec-path'], stdout=subprocess.PIPE,
stderr=devnull)
config['GIT_LIBEXEC'] = gitexec.stdout.readlines()[0].strip()
gitexec.wait()
# Figure out the git dir in our repo, if applicable
gitdir = subprocess.Popen(['git', 'rev-parse', '--git-dir'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gitdir.stdout.readlines()
if gitdir.wait() == 0:
config['GIT_DIR'] = lines[0].strip()
# Figure out the top level of our repo, if applicable
gittoplevel = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gittoplevel.stdout.readlines()
if gittoplevel.wait() == 0:
config['GIT_TOPLEVEL'] = lines[0].strip()
# We may have been called by a wrapper that passes us some info through the
# environment. Use it if it's there
for k, v in os.environ.iteritems():
if k.startswith('PY_GIT_'):
config[k[3:]] = v
elif k == | 'PGL_OK':
config['PGL_OK'] = True
# Make sure our git dir and toplevel are fully-qualified
if 'GIT_DIR' in config and not os.path.isabs(config['GIT_DIR']):
git_dir = os.path.join(config['GIT_TOPLEVEL'], config['GIT_DIR'])
config['GIT_DIR'] = os.path.abspath(git_dir)
def warn(msg):
"""Print a warning
"""
| sys.stderr.write('%s\n' % (msg,))
def die(msg):
"""Print an error message and exit the program
"""
sys.stderr.write('%s\n' % (msg,))
sys.exit(1)
def do_checks():
"""Check to ensure we've got everything we expect
"""
try:
import argparse
except:
die('Your python must support the argparse module')
def main(_main):
"""Mark a function as the main function for our git subprogram. Based
very heavily on automain by Gerald Kaszuba, but with modifications to make
it work better for our purposes.
"""
parent = inspect.stack()[1][0]
name = parent.f_locals.get('__name__', None)
if name == '__main__':
__create_config()
if 'PGL_OK' not in config:
do_checks()
rval = 1
try:
rval = _main()
except Exception, e:
sys.stdout.write('%s\n' % str(e))
f = file('pygit.tb', 'w')
traceback.print_tb(sys.exc_info()[2], None, f)
f.close()
sys.exit(rval)
return _main
if __name__ == '__main__':
"""If we get run as a script, check to make sure it's all ok and exit with
an appropriate error code
"""
do_checks()
sys.exit(0)
|
#!/usr/bin/env python
"""
fla.gr user model
Given a userID or a username or a email, return the users couchc.database ORM
http://xkcd.com/353/
Josh Ashby
2013
http://joshashby.com
joshuaashby@joshashby.com
"""
from couchdb.mapping import Document, TextField, DateTimeField, \
BooleanField, IntegerField
import bcrypt
from datetime import datetime
import config.config as c
import utils.markdownUtils as mdu
from models.modelExceptions.userModelExceptions import \
multipleUsersError, passwordError, userError
from models.couch.baseCouchModel import baseCouchModel
class userORM(Document, baseCouchModel):
"""
Base ORM for users in fla.gr, this one currently uses couchc.database to store
the data.
TODO: Flesh this doc out a lot more
"""
_name = "users"
username = TextField()
email = TextField()
about = TextField(default="")
disable = BooleanField(default=False)
emailVisibility = BooleanField(default=False)
level = IntegerField(default=1)
password = TextField()
joined = DateTimeField(default=datetime.now)
docType = TextField(default="user")
formatedAbout = ""
_view = 'typeViews/user'
@classmethod
def new(cls, username, password):
"""
Make a new user, checking for username conflicts. If no conflicts are
found the password is encrypted with bcrypt and the resulting `userORM` returned.
:param username: The username that should be used for the new user
:param password: The plain text password that should be used for the password.
:return: `userORM` if the username is available,
"""
if password == "":
raise passwordError("Password cannot be null")
elif not cls.find(username):
passwd = bcrypt.hashpw(password, bcrypt.gensalt())
user = cls(username=username, password=passwd)
return user
else:
raise userError("That username is taken, please choose again.",
username)
def setPassword(self, password):
"""
Sets the users password to `password`
:param password: plain text password to hash
"""
self.password = bcrypt.hashpw(password, bcrypt.gensalt())
self.store(c.database.couchServer)
@staticmethod
def _search(items, value):
"""
Searches the list `items` for th | e given value
:param items: A list of ORM objects to search
:param value: The value to search for, in this case
value can be a username or an email, or an id
"""
foundUser = []
for user in items:
if user.email == value \
| or user.username == value \
or user.id == value:
foundUser.append(user)
if not foundUser:
return None
if len(foundUser)>1:
raise multipleUsersError("Multiple Users", value)
else:
user = foundUser[0]
user.formatedAbout = mdu.markClean(user.about)
return user
@property
def hasAdmin(self):
return self.level > 50
def format(self):
"""
Formats markdown and dates into the right stuff
"""
self.formatedAbout = mdu.markClean(self.about)
self.formatedJoined = datetime.strftime(self.joined, "%a %b %d, %Y @ %H:%I%p")
|
from rsk_mind.datasource import *
from rsk_mind.classifier import *
from transformer import CustomTransformer
PROJECT_NAME = 'test'
DATASOURCE= {
'IN' : {
'class' : CSVDataSource,
'params' : ('in.csv', )
},
'OUT' : {
'class' : CSVDataSource,
'params' : ('out.csv', )
}
}
ANALYSIS = {
'persist': True,
'out': 'info.json'
}
TRANSFORMER = Cus | tomTransformer
TRAINING = {
'algorithms' : [
{
'classifier': XGBoostClassifier,
'parameters' : {
'bst:max_depth': 7,
'bst:eta': 0.3,
'bst:subsample': 0.5,
| 'silent': 0,
'objective': 'binary:logistic',
'nthread': 4,
'eval_metric': 'auc'
},
'dataset': DATASOURCE['IN']
}
],
'ensemble': 'max',
'dataset': DATASOURCE['IN']
}
ENGINE = {
} |
np.arange(10), transform=my_trans + ax.transData)
plt.draw()
# enable the transform to raise an exception if it's non-affine transform
# method is triggered again.
my_trans.raise_on_transform = True
ax.transAxes.invalidate()
plt.draw()
def test_external_transform_api():
class ScaledBy(object):
def __init__(self, scale_factor):
self._scale_factor = scale_factor
def _as_mpl_transform(self, axes):
return (mtransforms.Affine2D().scale(self._scale_factor)
+ axes.transData)
ax = plt.axes()
line, = plt.plot(np.arange(10), transform=ScaledBy(10))
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
# assert that the top transform of the line is the scale transform.
assert_allclose(line.get_transform()._a.get_matrix(),
mtransforms.Affine2D().scale(10).get_matrix())
@image_comparison(baseline_images=['pre_transform_data'],
tol=0.08)
def test_pre_transform_plotting():
# a catch-all for as many as possible plot layouts which handle
# pre-transforming the data NOTE: The axis range is important in this
# plot. It should be x10 what the data suggests it should be
ax = plt | .axes()
times10 = mtransforms.Affine2D().scale(10)
ax.contourf(np.arange(48).reshape(6, 8), transform=times10 + ax.transData)
ax.pcolormesh(np.linspace(0, 4, 7),
np.linspace(5.5, 8, 9),
np.arange(48).reshape(8, 6),
transform=times10 + ax.transData)
ax.scatter(np.linspace(0, 10), np.linspace(10, 0),
transform=times10 + ax.transData)
x = np.linspa | ce(8, 10, 20)
y = np.linspace(1, 5, 20)
u = 2*np.sin(x) + np.cos(y[:, np.newaxis])
v = np.sin(x) - np.cos(y[:, np.newaxis])
df = 25. / 30. # Compatibility factor for old test image
ax.streamplot(x, y, u, v, transform=times10 + ax.transData,
density=(df, df), linewidth=u**2 + v**2)
# reduce the vector data down a bit for barb and quiver plotting
x, y = x[::3], y[::3]
u, v = u[::3, ::3], v[::3, ::3]
ax.quiver(x, y + 5, u, v, transform=times10 + ax.transData)
ax.barbs(x - 3, y + 5, u**2, v**2, transform=times10 + ax.transData)
def test_contour_pre_transform_limits():
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.contourf(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_pcolor_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolor(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_pcolormesh_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolormesh(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_Affine2D_from_values():
points = np.array([[0, 0],
[10, 20],
[-1, 0],
])
t = mtransforms.Affine2D.from_values(1, 0, 0, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [10, 0], [-1, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 2, 0, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [0, 20], [0, -2]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 3, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [60, 0], [0, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 4, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [0, 80], [0, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 5, 0)
actual = t.transform(points)
expected = np.array([[5, 0], [5, 0], [5, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 0, 6)
actual = t.transform(points)
expected = np.array([[0, 6], [0, 6], [0, 6]])
assert_almost_equal(actual, expected)
def test_clipping_of_log():
# issue 804
M, L, C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY
points = [(0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99)]
codes = [M, L, L, L, C]
path = Path(points, codes)
# something like this happens in plotting logarithmic histograms
trans = mtransforms.BlendedGenericTransform(mtransforms.Affine2D(),
LogScale.Log10Transform('clip'))
tpath = trans.transform_path_non_affine(path)
result = tpath.iter_segments(trans.get_affine(),
clip=(0, 0, 100, 100),
simplify=False)
tpoints, tcodes = list(zip(*result))
assert_allclose(tcodes, [M, L, L, L, C])
class NonAffineForTest(mtransforms.Transform):
"""
A class which looks like a non affine transform, but does whatever
the given transform does (even if it is affine). This is very useful
for testing NonAffine behaviour with a simple Affine transform.
"""
is_affine = False
output_dims = 2
input_dims = 2
def __init__(self, real_trans, *args, **kwargs):
self.real_trans = real_trans
mtransforms.Transform.__init__(self, *args, **kwargs)
def transform_non_affine(self, values):
return self.real_trans.transform(values)
def transform_path_non_affine(self, path):
return self.real_trans.transform_path(path)
class BasicTransformTests(unittest.TestCase):
def setUp(self):
self.ta1 = mtransforms.Affine2D(shorthand_name='ta1').rotate(np.pi / 2)
self.ta2 = mtransforms.Affine2D(shorthand_name='ta2').translate(10, 0)
self.ta3 = mtransforms.Affine2D(shorthand_name='ta3').scale(1, 2)
self.tn1 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn1')
self.tn2 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn2')
self.tn3 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn3')
# creates a transform stack which looks like ((A, (N, A)), A)
self.stack1 = (self.ta1 + (self.tn1 + self.ta2)) + self.ta3
# creates a transform stack which looks like (((A, N), A), A)
self.stack2 = self.ta1 + self.tn1 + self.ta2 + self.ta3
# creates a transform stack which is a subset of stack2
self.stack2_subset = self.tn1 + self.ta2 + self.ta3
# when in debug, the transform stacks can produce dot images:
# self.stack1.write_graphviz(file('stack1.dot', 'w'))
# self.stack2.write_graphviz(file('stack2.dot', 'w'))
# self.stack2_subset.write_graphviz(file('stack2_subset.dot', 'w'))
def test_transform_depth(self):
assert self.stack1.depth == 4
assert self.stack2.depth == 4
assert self.stack2_subset.depth == 3
def test_left_to_right_iteration(self):
stack3 = (self.ta1 + (self.tn1 + (self.ta2 + self.tn2))) + self.ta3
# stack3.write_graphviz(file('stack3.dot', 'w'))
target_transforms = [stack3,
(self.tn1 + (self.ta2 + self.tn2)) + self.ta3,
(self.ta2 + self.tn2) + self.ta3,
self.tn2 |
"""
This inline scripts makes it possible to use mitmproxy in scenarios where IP spoofing has been used to redirect
connections to mitmproxy. The way this works is that we rely on either the TLS Server Name Indication (SNI) or the
Host header of the HTTP request.
Of course, this is not foolproof - if an HTTPS connection comes without SNI, we don't
know the actual target and cannot construct a certificate that looks valid.
Similarly, if there's no Host header or a spoofed Host header, we're out of luck as well.
Using transparent mode is the better option most of the time.
Usage:
mitmproxy
-p 80
-R http://example.com/ // Used as the target location if no Host header is present
mitmproxy
-p 443
-R https://example.com/ // Used as the target locaction if neither SNI nor host header are present.
mitmproxy will always connect to the default location first, so it must be reachable.
As a workaround, you can spawn an arbitrary HTTP server and use that for both endpoints, e.g.
mitmproxy -p 80 -R http://localhost:8000
mitmproxy -p 443 -R https2http://localhost:8000
"""
def request(context, flow):
if | flow.client_conn.ssl_established:
# TLS SNI or Host header
flow.request.host = flow.client_conn.connection.get_servername(
) or flow.request.pr | etty_host(hostheader=True)
# If you use a https2http location as default destination, these
# attributes need to be corrected as well:
flow.request.port = 443
flow.request.scheme = "https"
else:
# Host header
flow.request.host = flow.request.pretty_host(hostheader=True)
|
ew_models_to_pull = build_pullable_view_models_from_data_models(
self.domain, upstream_link, downstream_apps, downstream_fixtures, downstream_reports,
downstream_keywords, timezone, is_superuser=is_superuser
)
view_models_to_push = build_view_models_from_data_models(
self.domain, upstream_apps, upstream_fixtures, upstream_reports, upstream_keywords,
is_superuser=is_superuser
)
account = BillingAccount.get_account_by_domain(self.request.domain)
available_domains_to_link = get_available_domains_to_link(self.request.domain,
self.request.couch_user,
billing_account=account)
upstream_domain_urls = []
upstream_domains = get_available_upstream_domains(self.request.domain,
self.request.couch_user,
billing_account=account)
for domain in upstream_domains:
upstream_domain_urls.append({'name': domain, 'url': reverse('domain_links', args=[domain])})
if upstream_link and upstream_link.is_remote:
remote_linkable_ucr = get_remote_linkable_ucr(upstream_link)
else:
remote_linkable_ucr = None
return {
'domain': self.domain,
'timezone': timezone.localize(datetime.utcnow()).tzname(),
'has_release_management_privilege': domain_has_privilege(self.domain, RELEASE_MANAGEMENT),
'is_superuser': is_superuser,
'view_data': {
'is_downstream_domain': bool(upstream_link),
'upstream_domains': upstream_domain_urls,
'available_domains': available_domains_to_link,
'upstream_link': build_domain_link_view_model(upstream_link, timezone) if upstream_link else None,
'view_models_to_pull': sorted(view_models_to_pull, key=lambda m: m['name']),
'view_models_to_push': sorted(view_models_to_push, key=lambda m: m['name']),
'linked_domains': sorted(linked_domains, key=lambda d: d['downstream_domain']),
'linkable_ucr': remote_linkable_ucr,
},
}
@method_decorator(domain_admin_required, name='dispatch')
class DomainLinkRMIView(JSONResponseMixin, View, DomainViewMixin):
urlname = "domain_link_rmi"
@allow_remote_invocation
def update_linked_model(self, in_data):
model = in_data['model']
type_ = model['type']
detail = model['detail']
detail_obj = wrap_detail(type_, detail) if detail else None
upstream_link = get_upstream_domain_link(self.domain)
error = ""
try:
update_model_type(upstream_link, type_, detail_obj)
model_detail = detail_obj.to_json() if detail_obj else None
upstream_link.update_last | _pull(type_, self.request.couch_user._id, model_detail=model_detail)
except (DomainLinkError, UnsupportedActionError) as e:
error = str(e)
track_workflow(
self.request.couch_user.username,
"Linked domain: pulled data model",
{"data_model": type_}
)
timezone = get_timezone_for_request()
return {
'success': not error,
'error': error,
'last_update': server_to_user_time(upstream_ | link.last_pull, timezone)
}
@allow_remote_invocation
def delete_domain_link(self, in_data):
linked_domain = in_data['linked_domain']
link = DomainLink.objects.filter(linked_domain=linked_domain, master_domain=self.domain).first()
link.deleted = True
link.save()
track_workflow(self.request.couch_user.username, "Linked domain: domain link deleted")
return {
'success': True,
}
@allow_remote_invocation
def create_release(self, in_data):
push_models.delay(self.domain, in_data['models'], in_data['linked_domains'],
in_data['build_apps'], self.request.couch_user.username)
track_workflow(
self.request.couch_user.username,
"Linked domain: pushed data models",
{"data_models": in_data['models']}
)
return {
'success': True,
'message': ugettext('''
Your release has begun. You will receive an email when it is complete.
Until then, to avoid linked domains receiving inconsistent content, please
avoid editing any of the data contained in the release.
'''),
}
@allow_remote_invocation
def create_domain_link(self, in_data):
domain_to_link = in_data['downstream_domain']
try:
domain_link = link_domains(self.request.couch_user, self.domain, domain_to_link)
except (DomainDoesNotExist, DomainLinkAlreadyExists, DomainLinkNotAllowed, DomainLinkError) as e:
return {'success': False, 'message': str(e)}
track_workflow(self.request.couch_user.username, "Linked domain: domain link created")
domain_link_view_model = build_domain_link_view_model(domain_link, get_timezone_for_request())
return {'success': True, 'domain_link': domain_link_view_model}
@allow_remote_invocation
def create_remote_report_link(self, in_data):
linked_domain = in_data['linked_domain']
master_domain = in_data['master_domain'].strip('/').split('/')[-1]
report_id = in_data['report_id']
link = DomainLink.objects.filter(
remote_base_url__isnull=False,
linked_domain=linked_domain,
master_domain=master_domain,
).first()
if link:
create_linked_ucr(link, report_id)
return {'success': True}
else:
return {'success': False}
def link_domains(couch_user, upstream_domain, downstream_domain):
if not domain_exists(downstream_domain):
error = ugettext("The project space {} does not exist. Verify that the name is correct, and that the "
"domain has not been deleted.").format(downstream_domain)
raise DomainDoesNotExist(error)
if get_active_domain_link(upstream_domain, downstream_domain):
error = ugettext(
"The project space {} is already a downstream project space of {}."
).format(downstream_domain, upstream_domain)
raise DomainLinkAlreadyExists(error)
if not user_has_admin_access_in_all_domains(couch_user, [upstream_domain, downstream_domain]):
error = ugettext("You must be an admin in both project spaces to create a link.")
raise DomainLinkNotAllowed(error)
return DomainLink.link_domains(downstream_domain, upstream_domain)
class DomainLinkHistoryReport(GenericTabularReport):
name = 'Linked Project Space History'
base_template = "reports/base_template.html"
section_name = 'Project Settings'
slug = 'project_link_report'
dispatcher = ReleaseManagementReportDispatcher
ajax_pagination = True
asynchronous = False
sortable = False
@property
def fields(self):
if self.upstream_link:
fields = []
else:
fields = ['corehq.apps.linked_domain.filters.DomainLinkFilter']
fields.append('corehq.apps.linked_domain.filters.DomainLinkModelFilter')
return fields
@property
def link_model(self):
return self.request.GET.get('domain_link_model')
@property
@memoized
def domain_link(self):
if self.request.GET.get('domain_link'):
try:
return DomainLink.all_objects.get(
pk=self.request.GET.get('domain_link'),
master_domain=self.domain
)
except DomainLink.DoesNotExist:
pass
@property
@memoized
def upstream_link(self):
return get_upstream_domain_link(self.domain)
@property
@memoized
def select |
# Copyright (c) 2017 Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================================================
from __future__ import absolute_import
from os import path
class Visualizable(object):
def __init__(self, visualizer=None):
if visualizer is not None:
assert isinstance(visualizer, BaseVisualizer), "visualizer should derive from BaseVisualizer"
self._visualizer = visualizer
def visualize(self, index, tag, value, **kwargs):
if self._visualizer is not None:
self._visualizer << (index, tag, value, kwargs)
@property
def can_visualize(self):
return self._visual | izer is not None
class BaseVisualizer(object):
""" Provide a unified interface for observing the training progress """
def add_entry(self, index, key, result, **kwargs):
raise NotImplementedError()
def __lshift__(self, other):
if isinstance(other, tuple):
if len(other) >= 3:
self.add_entry(other[0], str(other[1]), other[2])
else:
raise ValueError("Provided tuple should be of the form (key, value)")
| else:
raise ValueError("Trying to use stream operator without a tuple (key, value)")
class EmptyVisualizer(BaseVisualizer):
""" A boilerplate visualizer that does nothing """
def add_entry(self, index, key, result, **kwargs):
pass
class ConsoleVisualizer(BaseVisualizer):
""" Print visualization to stdout as:
key -> value
"""
CONSOLE_DEFAULT_FORMAT = "[%s] %d : %s -> %.3f"
def __init__(self, format=None, prefix=None):
self._format = format or ConsoleVisualizer.CONSOLE_DEFAULT_FORMAT
self._prefix = prefix or '-'
def add_entry(self, index, key, result, **kwargs):
print(self._format % (self._prefix, index, key, result))
class CsvVisualizer(BaseVisualizer):
""" Write data to file. The following formats are supported: CSV, JSON, Excel. """
def __init__(self, output_file, override=False):
if path.exists(output_file) and not override:
raise Exception('%s already exists and override is False' % output_file)
super(CsvVisualizer, self).__init__()
self._file = output_file
self._data = {}
def add_entry(self, index, key, result, **kwargs):
if key in self._data[index]:
print('Warning: Found previous value for %s in visualizer' % key)
self._data[index].update({key: result})
def close(self, format='csv'):
import pandas as pd
if format == 'csv':
pd.DataFrame.from_dict(self._data, orient='index').to_csv(self._file)
elif format == 'json':
pd.DataFrame.from_dict(self._data, orient='index').to_json(self._file)
else:
writer = pd.ExcelWriter(self._file)
pd.DataFrame.from_dict(self._data, orient='index').to_excel(writer)
writer.save()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return self
|
dient_tape.gradient(microbatch_loss, var_list)
sample_state = self._dp_sum_query.accumulate_record(
sample_params, sample_state, grads)
return sample_state
for idx in range(self._num_microbatches):
sample_state = process_microbatch(idx, sample_state)
grad_sums, self._global_state, _ = (
self._dp_sum_query.get_noised_result(sample_state,
self._global_state))
def normalize(v):
return v / tf.cast(self._num_microbatches, tf.float32)
final_grads = tf.nest.map_structure(normalize, grad_sums)
grads_and_vars = list(zip(final_grads, var_list))
return grads_and_vars
else:
# Note: it would be closer to the correct i.i.d. sampling of records | if
# we sampled each microbatch from the appropriate binomial distribution,
# although that still wouldn't be quite correct because it would be
# sampling from the dataset without | replacement.
if self._num_microbatches is None:
self._num_microbatches = tf.shape(input=loss)[0]
microbatches_losses = tf.reshape(loss, [self._num_microbatches, -1])
sample_params = (
self._dp_sum_query.derive_sample_params(self._global_state))
def process_microbatch(i, sample_state):
"""Process one microbatch (record) with privacy helper."""
self_super = super(DPOptimizerClass, self)
mean_loss = tf.reduce_mean(
input_tensor=tf.gather(microbatches_losses, [i]))
if hasattr(self_super, 'compute_gradients'):
# This case covers optimizers in tf.train.
compute_gradients_fn = self_super.compute_gradients
else:
# This case covers Keras optimizers from optimizers_v2.
compute_gradients_fn = self_super._compute_gradients # pylint: disable=protected-access
if gradient_tape:
# This is intended to work for TF2 and may not work for TF1.
with gradient_tape.stop_recording():
grads_list = list(gradient_tape.gradient(mean_loss, var_list))
else:
grads, _ = zip(*compute_gradients_fn(
mean_loss, var_list, gate_gradients, aggregation_method,
colocate_gradients_with_ops, grad_loss))
grads_list = list(grads)
sample_state = self._dp_sum_query.accumulate_record(
sample_params, sample_state, grads_list)
return sample_state
if var_list is None:
var_list = (
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
sample_state = self._dp_sum_query.initial_sample_state(var_list)
if self._unroll_microbatches:
for idx in range(self._num_microbatches):
sample_state = process_microbatch(idx, sample_state)
else:
# Use of while_loop here requires that sample_state be a nested
# structure of tensors. In general, we would prefer to allow it to be
# an arbitrary opaque type.
cond_fn = lambda i, _: tf.less(i, self._num_microbatches)
body_fn = lambda i, state: [tf.add(i, 1), process_microbatch(i, state)] # pylint: disable=line-too-long
idx = tf.constant(0)
_, sample_state = tf.while_loop(
cond=cond_fn,
body=body_fn,
loop_vars=[idx, sample_state],
parallel_iterations=self._while_loop_parallel_iterations)
grad_sums, self._global_state, _ = (
self._dp_sum_query.get_noised_result(sample_state,
self._global_state))
def normalize(v):
try:
return tf.truediv(v, tf.cast(self._num_microbatches, tf.float32))
except TypeError:
return None
final_grads = tf.nest.map_structure(normalize, grad_sums)
return list(zip(final_grads, var_list))
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
# pylint: disable=g-doc-args, g-doc-return-or-yield
"""DP-SGD version of base class method."""
assert self._was_compute_gradients_called, (
'compute_gradients() on the differentially private optimizer was not'
' called. Which means that the training is not differentially '
'private. It happens for example in Keras training in TensorFlow '
'2.0+.')
return super(DPOptimizerClass, self).apply_gradients(
grads_and_vars=grads_and_vars, global_step=global_step, name=name)
return DPOptimizerClass
def make_gaussian_optimizer_class(cls):
"""Given a subclass of `tf.compat.v1.train.Optimizer`, returns a subclass using DP-SGD with Gaussian averaging.
Args:
cls: Class from which to derive a DP subclass. Should be a subclass of
`tf.compat.v1.train.Optimizer`.
Returns:
A subclass of `cls` using DP-SGD with Gaussian averaging.
"""
class DPGaussianOptimizerClass(make_optimizer_class(cls)): # pylint: disable=empty-docstring
__doc__ = ("""DP subclass of `{}`.
You can use this as a differentially private replacement for
`tf.compat.v1.train.{}`. This optimizer implements DP-SGD using
the standard Gaussian mechanism.
When instantiating this optimizer, you need to supply several
DP-related arguments followed by the standard arguments for
`{}`.
Examples:
```python
# Create optimizer.
opt = {}(l2_norm_clip=1.0, noise_multiplier=0.5, num_microbatches=1,
<standard arguments>)
```
When using the optimizer, be sure to pass in the loss as a
rank-one tensor with one entry for each example.
```python
# Compute loss as a tensor. Do not call tf.reduce_mean as you
# would with a standard optimizer.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
train_op = opt.minimize(loss, global_step=global_step)
```
""").format(
'tf.compat.v1.train.' + cls.__name__, cls.__name__, cls.__name__,
'DP' + cls.__name__.replace('Optimizer', 'GaussianOptimizer'))
def __init__(
self,
l2_norm_clip,
noise_multiplier,
num_microbatches=None,
unroll_microbatches=False,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
"""Initializes the `DPGaussianOptimizerClass`.
Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients).
noise_multiplier: Ratio of the standard deviation to the clipping norm.
num_microbatches: Number of microbatches into which each minibatch is
split. If `None`, will default to the size of the minibatch, and
per-example gradients will be computed.
unroll_microbatches: If true, processes microbatches within a Python
loop instead of a `tf.while_loop`. Can be used if using a
`tf.while_loop` raises an exception.
*args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method.
"""
self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier
self._num_microbatches = num_microbatches
self._base_optimizer_class = cls
dp_sum_query = gaussian_query.GaussianSumQuery(
l2_norm_clip, l2_norm_clip * noise_multiplier)
super(DPGaussianOptimizerClass,
self).__init__(dp_sum_query, num_microbatches, unroll_microbatches,
*args, **kwargs)
def get_config(self):
"""Creates configuration for Keras serialization.
This method will be called when Keras creates model checkpoints
and is necessary so that deserialization can be performed.
Returns:
A dict object storing arguments to be passed to the __init__ method
upon deserialization.
"""
|
import wx
from gui.controller.CustomListCtrl import CustomListCtrl
from gui.controller.PlotCtrl import PlotCtrl
class WofSitesView(wx.Frame):
def __init__(self, parent, title, table_columns):
wx.Frame.__init__(self, parent=parent, id=-1, title=title, pos=wx.DefaultPosition, size=(680, 700),
style=wx.FRAME_FLOAT_ON_PARENT | wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MAXIMIZE_BOX)
self.start_date = wx.DateTime_Now() - 7 * wx.DateSpan_Day()
self.end_date = wx.DateTime_Now()
self.parent = parent
self._data = None
panel = wx.Panel(self)
top_panel = wx.Panel(panel)
middle_panel = wx.Panel(panel, size=(-1, 30))
lower_panel = wx.Panel(panel)
hboxTopPanel = wx.BoxSizer(wx.HORIZONTAL)
self.plot = PlotCtrl(top_panel)
hboxTopPanel.Add(self.plot.canvas, 1, wx.EXPAND | wx.ALL, 2)
top_panel.SetSizer(hboxTopPanel)
hboxMidPanel = wx.BoxSizer(wx.HORIZONTAL)
self.startDateText = wx.StaticText(middle_panel, id=wx.ID_ANY, label="Start")
| self.startDatePicker = wx.DatePickerCtrl(middle_panel, id=wx.ID_ANY, dt=self.start_date)
self.endDateText = wx.StaticText(middle_panel, id=wx.ID_ANY, label="End")
self.endDatePicker = wx.DatePickerCtrl(middle_panel, id=wx.ID_ANY, dt=self.end_date)
self.exportBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Export")
self.addToCanvasBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Add to Canvas")
self.PlotBtn = wx.Button(middle_panel, id=wx.ID_ANY, | label="Preview")
self.line_style_combo = wx.ComboBox(middle_panel, value="Line style")
self.line_style_options = ["Line", "Scatter"]
self.line_style_combo.AppendItems(self.line_style_options)
hboxMidPanel.Add(self.startDateText, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL)
hboxMidPanel.Add(self.startDatePicker, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.endDateText, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL)
hboxMidPanel.Add(self.endDatePicker, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.PlotBtn, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.exportBtn, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.addToCanvasBtn, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.line_style_combo, 1, wx.EXPAND | wx.ALL, 2)
middle_panel.SetSizer(hboxMidPanel)
hboxLowPanel = wx.BoxSizer(wx.HORIZONTAL)
# Build time series table
self.variableList = CustomListCtrl(lower_panel)
self.variableList.set_columns(table_columns)
hboxLowPanel.Add(self.variableList, 1, wx.EXPAND | wx.ALL, 2)
lower_panel.SetSizer(hboxLowPanel)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(top_panel, 1, wx.EXPAND | wx.ALL, 2)
vbox.Add(middle_panel, 0, wx.EXPAND | wx.ALL, 2)
vbox.Add(lower_panel, 1, wx.EXPAND | wx.ALL, 2)
panel.SetSizer(vbox)
self.status_bar = self.CreateStatusBar()
self.status_bar.SetStatusText("Ready")
self.Show()
|
#-*- coding: utf-8 -*-
from openerp.osv import fields, osv
class finance_contract_rachat(osv.osv_memory):
_name = "finance.contract.rachat"
_columns = {
'date': field | s.date('Date de rachat'),
'date_dem': fields.date('Dat | e de la demande'),
'motif': fields.text('Motif'),
'memo': fields.text('Memo'),
'act_rachat': fields.boolean('Rachat'),
'act_res': fields.boolean('Resiliation'),
'contract_id': fields.many2one('finance.contract', 'Contrat')
}
def set_rachat(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
vals = {
'res_reason': obj.motif and obj.motif or False,
'res_memo': obj.memo and obj.memo or False,
'res_date': obj.date and obj.date or False,
'res_dated': obj.date_dem and obj.date_dem or False,
'res': obj.act_res,
'rachat': obj.act_rachat
}
return self.pool.get('finance.contract').save_rachat(cr, uid, obj.contract_id.id, vals)
|
# flake8: noqa
# -*- coding: utf | -8 -*-
###############################################
# Geosite local settings
###############################################
import os
# Outside URL
SITEURL = 'http://$DOMAIN'
OGC_SERVER['default']['LOCATION'] = os.path.join(GEOSERVER_URL, 'geoserver/')
OGC_SERVER['de | fault']['PUBLIC_LOCATION'] = os.path.join(SITEURL, 'geoserver/')
# databases unique to site if not defined in site settings
"""
SITE_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, '../development.db'),
},
}
"""
|
from core.serializers import | ProjectSerializer
from rest_framework import generics
from core.models import Project
class ProjectList(generics.ListCreateAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
class ProjectDetai | l(generics.RetrieveUpdateDestroyAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
|
# Copyright 2014-2015 Facundo Batista, Nicolás Demarchi
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://github.com/PyAr/fades
"""A collection of utilities for fades."""
import os
import sys
import json
import logging
import subprocess
from urllib import request
from urllib.error import HTTPError
import pkg_resources
logger = logging.getLogger(__name__)
SHOW_VERSION_CMD = """
import sys, json
d = dict(path=sys.executable)
d.update(zip('major minor micro releaselevel serial'.split(), sys.version_info))
print(json.dumps(d))
"""
BASE_PYPI_URL = 'https://pypi.python.org/pypi/{name}/json'
STDOUT_LOG_PREFIX = ":: "
class ExecutionError(Exception):
"""Execution of subprocess ended not in 0."""
def __init__(self, retcode, cmd, collected_stdout):
"""Init."""
self._retcode = retcode
self._cmd = cmd
self._collected_stdout = collected_stdout
super().__init__()
def dump_to_log(self, logger):
"""Send the cmd info and collected stdout to logger."""
logger.error("Execution ended in %s for cmd %s", self._retcode, self._cmd)
for line in self._collected_stdout:
logger.error(STDOUT_LOG_PREFIX + line)
def logged_exec(cmd):
"""Execute a command, redirecting the output to the log."""
logger = logging.getLogger('fades.exec')
logger.debug("Executing external command: %r", cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = []
for line in p.stdout:
line = line[:-1].decode("utf8")
stdout.append(line)
logger.debug(STDOUT_LOG_PREFIX + line)
retcode = p.wait()
if retcode:
raise ExecutionError(retcode, cmd, stdout)
return stdout
def get_basedir():
"""Get the base fades directory, from xdg or kinda hardcoded."""
try:
from xdg import BaseDirectory # NOQA
return os.path.join(BaseDirectory.xdg_data_home, 'fades')
except ImportError:
logger.debug("Package xdg not installed; using ~/.fades folder")
from os.path import expanduser
return expanduser("~/.fades")
def get_confdir():
"""Get the config fades directory, from xdg or kinda hardcoded."""
try:
from xdg import BaseDirectory # NOQA
return os.path.join(BaseDirectory.xdg_config_home, 'fades')
except ImportError:
logger.debug("Package xdg not installed; using ~/.fades folder")
from os.path import expanduser
return expanduser("~/.fades")
def _get_interpreter_info(interpreter=None):
"""Return the interpreter's full path using pythonX.Y format."""
if interpreter is None:
# If interpreter is None by default returns the current interpreter data.
major, minor = sys.version_info[:2]
executable = sys.executable
else:
args = [interpreter, '-c', SHOW_VERSION_CMD]
try:
requested_interpreter_info = logged_exec(args)
except Exception as error:
logger.error("Error getting requested interpreter version: %s", error)
exit()
requested_interpreter_info = json.loads(requested_interpreter_info[0])
executable = requested_interpreter_info['path']
major = requested_interpreter_info['major']
minor = requested_interpreter_info['minor']
if executable[-1].isdigit():
executable = executable.split(".")[0][:-1]
interpreter = "{}{}.{}".format(executable, major, minor)
return interpreter
def get_interpreter_version(requested_interpreter):
"""Return a 'sanitized' interpreter and indicates if it is the current one."""
logger.debug('Getting interpreter version for: %s', requested_interpreter)
current_interpreter = _get_interpreter_info()
logger.debug('Current interpreter is %s', current_interpreter)
if requested_interpreter is None:
return(current_interpreter, True)
else:
requested_interpreter = _get_interpreter_info(requested_interpreter)
is_current = requested_interpreter == current_interpreter
logger.debug('Interpreter=%s. It is the same as fades?=%s',
requested_interpreter, is_current)
return (requested_interpreter, is_current)
def get_latest_version_number(project_name):
"""Return latest version of a package."""
try:
raw = request.urlopen(BASE_PYPI_URL.format(name=project_name)).read()
except HTTPError as error:
logger.warning("Network error. Error: %s", error)
raise error
try:
data = json.loads(raw.decode("utf8"))
latest_version = data["info"]["version"]
return latest_version
except (KeyError, ValueError) as error: # malformed json or empty string
logger.error("Could not get the version of the package. Error: %s", error)
raise error
def check_pypi_updates(dependencies):
"""Return a list of dependencies to upgrade."""
dependencies_up_to_date = []
for dependency in dependencies.get('pypi', []):
# get latest version from PyPI api
try:
latest_version = get_latest_version_number(dependency.project_name)
except Exception as error:
logger.warning("--check-updates command will be aborted. Error: %s", error)
return dependencies
# get required version
required_version = None
if dependency.specs:
_, required_version = depen | dency.specs[0]
if required_version:
dependencies_up_to_date.append(dependency)
if latest_version > required_version:
logger.info("There is a new version of %s: %s",
dependency.project_name, latest_version)
elif latest_ve | rsion < required_version:
logger.warning("The requested version for %s is greater "
"than latest found in PyPI: %s",
dependency.project_name, latest_version)
else:
logger.info("The requested version for %s is the latest one in PyPI: %s",
dependency.project_name, latest_version)
else:
project_name_plus = "{}=={}".format(dependency.project_name, latest_version)
dependencies_up_to_date.append(pkg_resources.Requirement.parse(project_name_plus))
logger.info("There is a new version of %s: %s and will use it.",
dependency.project_name, latest_version)
dependencies["pypi"] = dependencies_up_to_date
return dependencies
|
s) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'destination_format') and \
| config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.ge | t('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
|
raise GDALException('Unable to read raster source input "{}"'.format(ds_input))
try:
# GDALOpen will auto-detect the data source type.
self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
except GDALException as err:
raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err))
elif isinstance(ds_input, dict):
# A new raster needs to be created in write mode
self._write = 1
# Create driver (in memory by default)
driver = Driver(ds_input.get('driver', 'MEM'))
# For out of memory drivers, check filename argument
if driver.name != 'MEM' and 'name' not in ds_input:
raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name))
# Check if width and height where specified
if 'width' not in ds_input or 'height' not in ds_input:
raise GDALException('Specify width and height attributes for JSON or dict input.')
# Check if srid was specified
if 'srid' not in ds_input:
raise GDALException('Specify srid for JSON or dict input.')
# Create GDAL Raster
self._ptr = capi.create_ds(
driver._ptr,
force_bytes(ds_input.get('name', '')),
ds_input['width'],
ds_input['height'],
ds_input.get('nr_of_bands', len(ds_input.get('bands', []))),
ds_input.get('datatype', 6),
None
)
# Set band data if provided
for i, band_input in enumerate(ds_input.get('bands', [])):
band = self.bands[i]
band.data(band_input['data'])
if 'nodata_value' in band_input:
band.nodata_value = band_input['nodata_value']
# Set SRID
self.srs = ds_input.get('srid')
# Set additional properties if provided
if 'origin' in ds_input:
self.origin.x, self.origin.y = ds_input['origin']
if 'scale' in ds_input:
self.scale.x, self.scale.y = ds_input['scale']
if 'skew' in ds_input:
self.skew.x, self.skew.y = ds_input['skew']
elif isinstance(ds_input, c_void_p):
# Instantiate the object using an existing pointer to a gdal raster.
self._ptr = ds_input
else:
raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input)))
def __del__(self):
if self._ptr and capi:
capi.close_ds(self._ptr)
def __str__(self):
return self.name
def __repr__(self):
"""
Short-hand representation because WKB may be very large.
"""
return '<Raster object at %s>' % hex(addressof(self._ptr))
def _flush(self):
"""
Flush all data from memory into the source file if it exists.
The data that needs flushing are geotransforms, coordinate systems,
nodata_values and pixel values. This function will be called
automatically wherever it is needed.
"""
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException('Raster needs to be opened in write mode to change values.')
capi.flush_ds(self._ptr)
@property
def name(self):
"""
Returns the name of this raster. Corresponds to filename
for file-based rasters.
"""
return force_text(capi.get_ds_description(self._ptr))
@cached_property
def driver(self):
"""
Returns the GDAL Driver used for this raster.
"""
ds_driver = capi.get_ds_driver(self._ptr)
return Driver(ds_driver)
@property
def width(self):
"""
Width (X axis) in pixels.
"""
return capi.get_ds_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels.
"""
return capi.get_ds_ysize(self._ptr)
@property
def srs(self):
"""
Returns the SpatialReference used in this GDALRaster.
"""
try:
wkt = capi.get_ds_projection_ref(self._ptr)
if not wkt:
return None
return SpatialReference(wkt, srs_type='wkt')
except SRSException:
return None
@srs.setter
def srs(self, value):
"""
Sets the spatial reference used in this GDALRaster. The input can be
a SpatialReference or any parameter accepted by the SpatialReference
constructor.
"""
if isinstance(value, SpatialReference):
srs = value
elif isinstance(value, six.integer_types + six.string_types):
srs = SpatialReference(value)
else:
raise ValueError('Could not create a SpatialReference from input.')
capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())
self._flush()
@property
def geotransform(self):
"""
Returns the geotransform of the data source.
Returns the default geotransform if it does not exist or has not been
set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].
"""
# Create empty ctypes double array for data
gtf = (c_double * 6)()
capi.get_ds_geotransform(self._ptr, byref(gtf))
return list(gtf)
@geotransform.setter
def geotransform(self, values):
"Sets the geotransform for the data source."
if sum([isinstance(x, (int, float)) for x in values]) != 6:
raise ValueError('Geotransform must consist of 6 numeric values.')
# Create ctypes double array with input and write data
values = (c_double * 6)(*values)
capi.set_ds_geotransform(self._ptr, byref(values))
self._flush()
@property
def origin(self):
"""
Coordinates of the raster origin.
"""
return TransformPoint(self, 'origin')
@property
def scale(self):
"""
Pixel scale in units of the raster projection.
"""
return TransformPoint(self, 'scale')
@pro | perty
def skew(self):
"""
Skew of pixels (rotation pa | rameters).
"""
return TransformPoint(self, 'skew')
@property
def extent(self):
"""
Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax).
"""
# Calculate boundary values based on scale and size
xval = self.origin.x + self.scale.x * self.width
yval = self.origin.y + self.scale.y * self.height
# Calculate min and max values
xmin = min(xval, self.origin.x)
xmax = max(xval, self.origin.x)
ymin = min(yval, self.origin.y)
ymax = max(yval, self.origin.y)
return xmin, ymin, xmax, ymax
@property
def bands(self):
return BandList(self)
def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0):
"""
Returns a warped GDALRaster with the given input characteristics.
The input is expected to be a dictionary containing the parameters
of the target raster. Allowed values are width, height, SRID, origin,
scale, skew, datatype, driver, and name (filename).
By default, the warp functions keeps all parameters equal to the values
of the original source raster. For the name of the target raster, the
name of the source raster will be used and appended with
_copy. + source_driver_name.
In addition, the resampling algorithm can be specified with the "resampling"
input parameter. The default is NearestNeighbor. For a list of all options
consult the GDAL_RESAMPLE_ALGORITHMS constant.
"""
# Get the parameters defining the geotransform, srid, and size of the raster
if 'width' not in ds_input:
ds_input['width'] = self.width
if 'height' not in ds_inpu |
icenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto
import boto.s3
import boto.s3.connection
import datetime
import logging
import re
import sys
import zlib
import six
# THIS MUST END IN A /
S3PREFIX = "logs/"
S3_KEY_RE = re.compile(r'.*/(?P<stream_name>[\w-]+)/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/.+(?P<gz>\.gz)?$')
#----------------------- SCRIBE LOG CHUNK OBJECTS -----------------------#
class BadKeyError(Exception):
def __init__(self, key, keytype=""):
self.key = key
self.keytype = keytype
def __repr__(self):
return "<BadKeyError %s:%s>" % (self.keytype, self.key)
def __str__(self):
return "BadKeyError: %s key %s did not match the expected format" % (self.keytype, self.key)
class ScribeFile(object):
"""Base class for Scribe file objects. These represent a single log chunk,
and can be read or listed. Scribe File objects are equal if the combination of
their date, stream name, and aggregator are the same. This allows you to, for example,
create a set of files from both s3 and a local cache without reading the same
chunk twice.
Important methods:
read: adds a file's contents to the stream ostream, transparently handling gzip'd data
Properties:
sort_key: A key to sort or compare with
size: The length of the record in bytes
"""
def __init__(self, stream_name, year, month, day):
self.stream_name = stream_name
self.year = year
self.month = month
self.day = day
self.date = datetime.date(self.year, self.month, self.day)
@property
def size(self):
raise NotImplementedError
def read(self, ostream=sys.stdout):
raise NotImplementedError
def read_orig(self, ostream=sys.stdout):
raise NotImplementedError
class ScribeS3File(ScribeFile):
"""Represent scribe log chunks on S3"""
def __init__(self, key):
self.key = key
keymd = S3_KEY_RE.match(key.name)
if not keymd:
raise BadKeyError(key, "S3")
super(ScribeS3File, self).__init__(
keymd.group('stream_name'),
int(keymd.group('year')),
int(keymd.group('month')),
| int(keymd.group('day')),
)
def read(self, ostream=sys | .stdout):
"""Read self into the ostream"""
decompressor = zlib.decompressobj(31)
# Python 2 works with string, python 3 with bytes
remainder = "" if six.PY2 else b""
if self.key.name.endswith(".gz"):
for data in self.key:
remainder += data
try:
ostream.write(decompressor.decompress(remainder))
remainder = decompressor.unconsumed_tail
except zlib.error:
# maybe we didn't have enough data in this chunk to
# decompress any. if so, build up a string to decompress
pass
else:
for data in self.key:
ostream.write(data)
if len(remainder) > 0:
logging.error("Encountered %d extra bits in zlib output", len(remainder))
def read_orig(self, ostream=sys.stdout):
"""Read the original of self (compressed if applicable) to ostream"""
self.key.get_contents_to_file(ostream)
@property
def size(self):
return self.key.size
#----------------------- SCRIBE CONNECTION MANAGERS -----------------------#
class ScribeS3(object):
"""This class represents an S3 connection and abstracts scribe interactions"""
LOGS_BASE_PATH = "{prefix}{stream}/{year:=04d}/{month:=02d}/{day:=02d}"
LOG_FILE_PATH = LOGS_BASE_PATH + "/{aggregator}-{part:=05d}.gz"
COMPLETE_FILE_PATH = LOGS_BASE_PATH + "/COMPLETE"
def __init__(
self,
s3_host,
aws_access_key_id,
aws_secret_access_key,
s3_bucket,
s3_key_prefix=None,
):
self.s3_key_prefix = s3_key_prefix
if self.s3_key_prefix and self.s3_key_prefix[-1] != '/':
self.s3_key_prefix += '/'
self.s3_connection = boto.s3.connection.S3Connection(
host=s3_host,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
self.s3_bucket = self.s3_connection.get_bucket(s3_bucket)
logging.debug('connected to s3 with %s', self.s3_connection)
@property
def streams(self):
ret = set()
for prefix in self.s3_bucket.list(prefix=self.s3_key_prefix, delimiter="/"):
prefix = prefix.name.replace(self.s3_key_prefix or S3PREFIX, "", 1).rstrip('/')
ret.add(prefix)
return ret
def complete_for(self, stream_name, date):
"""Are the S3 uploads for the given stream_name on the given date marked as complete?"""
complete_key_name = self.COMPLETE_FILE_PATH.format(
prefix=self.s3_key_prefix,
stream=stream_name,
year=date.year,
month=date.month,
day=date.day,
)
key = self.s3_bucket.get_key(complete_key_name)
return bool(key)
def get_logs(self, stream_name, date):
prefix = self.LOGS_BASE_PATH.format(
prefix=self.s3_key_prefix,
stream=stream_name,
year=date.year,
month=date.month,
day=date.day,
)
ret = set()
for s3_name in self.s3_bucket.list(prefix=prefix):
if s3_name.name.endswith("COMPLETE"):
continue
if s3_name.name.endswith("_SUCCESS"):
continue
if s3_name.name.endswith(".bad"):
continue
ret.add(ScribeS3File(s3_name))
return ret
def get_log(self, stream_name, date, aggregator, part):
"""Get a specific log
.. warning:: This function is deprecated and should not be used.
"""
key_name = self.LOG_FILE_PATH.format(
prefix=self.s3_key_prefix,
stream=stream_name,
year=date.year,
month=date.month,
day=date.day,
aggregator=aggregator,
part=part,
)
key = self.s3_bucket.get_key(key_name)
if key:
return ScribeS3File(key)
return None
#----------------------- COMMAND OBJECTS -----------------------#
class ScribeReader(object):
"""
ScribeReader provides an interface for interacting with individual log elements
(ScribeFile objects) in Scribe
"""
def __init__(self, stream_name, s3_connections=None, fs_connection=None, ostream=sys.stdout, not_in_s3=False):
"""Initialize the ScribeReader
Args:
stream_name: The stream to read from
s3_connections: Optionally, an iterable of ScribeS3 objects
fs_connection: Optionally, a ScribeFS object
not_in_s3: Remove only keys unique to the fs_connection
Will read from s3_connection and/or fs_connection, depending on which are provided
"""
self.stream_name = stream_name
self.s3_connections = s3_connections
self.fs_connection = fs_connection
self.ostream = ostream
self.not_in_s3 = not_in_s3
def logs_for_date(self, date):
"""Write to the initial ostream for the given date"""
keys = set()
if self.fs_connection:
keys |= self.fs_connection.get_logs(self.stream_name, date)
if self.s3_connections:
for connection in self.s3_connections:
if connection is None:
continue
s3_keys = connection.get_logs(self.stream_name, date)
if self.not_in_s3:
keys -= s3_keys
else:
keys |= s3_keys
re |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# v | im:fenc=utf-8
#
# Copyright © 2017-06-27 michael_yin
#
"""
"""
from django.conf import settings
from django.conf.urls import include, url
from django.core.urlresolvers i | mport reverse
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
import datetime
AUTHOR = u'Jayson Stemmler'
SITENAME = u'Jayson Stemmler'
SITEURL = ''
SITENAME = "Jayson Stemmler's Blog"
SITETITLE = 'Jayson Stemmler'
SITESUBTITLE = 'Research / Data Scientist'
SITEDESCRIPTION = ''
# SITELOGO = SITEURL + '/images/profile.png'
# FAVICON = SITEURL + '/images/favicon.ico'
COPYRIGHT_NAME = "Jayson Stemmler"
COPYRIGHT_YEAR = datetime.datetime.today().strftime('%Y')
# THEME_DIR = os.path.join(os.getenv("HOME"), 'Documents/Blogging/pelican-t | hemes')
# THEME = os.path.join(THEME_DIR, 'Flex')
THEME = 'themes/Flex'
USE_FOLDER_AS_CATEGORY = True
PATH = 'con | tent'
PAGE_PATHS = ['pages']
ARTICLE_PATHS = ['articles']
TIMEZONE = 'America/Los_Angeles'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
STATIC_PATHS = ['images', 'extra/CNAME']
EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},}
ARTICLE_URL = 'posts/{date:%Y}/{date:%b}/{slug}'
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%b}/{slug}.html'
PAGE_URL = 'pages/{slug}'
PAGE_SAVE_AS = 'pages/{slug}.html'
YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'
MONTH_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/{date:%b}/index.html'
## THEME OPTIONS
DISQUS_SITENAME = "jdstemmlerblog"
GOOGLE_ANALYTICS = "UA-99010895-1"
MAIN_MENU = True
SITELOGO = 'https://storage.googleapis.com/jdstemmler-blog-images/profile.png'
LINKS = (('Resume', 'https://represent.io/jdstemmler'),)
SOCIAL = (('linkedin', 'https://linkedin.com/in/jdstemmler/en'),
('github', 'https://github.com/jdstemmler'))
MENUITEMS = (('Archives', '/archives.html'),
('Categories', '/categories.html'),
('Tags', '/tags.html'),)
BROWSER_COLOR = '#333333'
ROBOTS = 'index, follow'
|
.show3(col=col,Id=[Id,k])
# filebox = b.show3(col=col[k],Id=[Id,k])
chaine = "{<"+filebox+"}\n"
fd.write(chaine)
#chaine = "{<cloud.list}\n"
#fd.write(chaine)
fd.close()
if self.parmsh['display']:
chaine = "geomview -nopanel -b 1 1 1 " + filename2 + " 2>/dev/null &"
os.system(chaine)
return(filename)
class BoxN(PyLayers):
"""BoxN Class
A box is determined by its boundary interval along each dimension
Attributes
----------
bd : numpy array 2 x ndim
box boundary
ndim : int
dimension of the box (2D, 3D,...)
self.parmsh : dictionnary
display dictionnary for show 3 method TO BE CHANGE !!!
keys :['display']=True
['interactive']=False
OBTAIN FROM mesure()
self.mes : array 1 x ndim
size of intervals of each dimension
self.ctr : array 1 x ndim
center of intervals of each dimension
self.vol : float
Volume of box
Methods
-------
info() : info about class
def mesure(self):
measure intervals of box
volume() : evaluate volume
inbox(p) : is p in box ?
intersect(box) : intersection of two boxes
show3() : geomview vizualization
cut() : cut a box along given direction
TODO
----
Remove parmsh and replace it by a ini file
"""
# __slots__=('bd','ndim','mes','ctr','vol','parmsh')
def __init__(self,bd=None,ndim=3):
# if bd==None:
# self.bd = np.array([]).astype('float')
# else:
# for i in range(np.shape(bd)[1]):
# assert bd[1,i]>=bd[0,i] , pdb.set_trace()
# self.bd = bd.astype('float')
self.bd=bd
self.ndim = ndim#np.shape(bd)[1]
self.mesure()
self.parmsh={}
self.parmsh['display']=True
self.parmsh['interactive']=False
# print "%s from %s" % (inspect.stack()[1][3],inspect.stack()[1][1])
def mesure(self):
""" Measure BoxN
Obtain measure of :
- size of each interval from each dimension
- center of each interval from each dimension
- Volume of the BoxN
"""
self.mes = self.bd[1,:]-self.bd[0,:]
self.ctr = (self.bd[1,:]+self.bd[0,:])/2.0
self.vol = np.prod(self.mes)
def setbd(self,vmin,vmax,axis=0):
"""
setbd : set boundary value on axis
"""
assert vmin<=vmax, "Incorrect bound"
self.bd[0,axis]= vmin.astype('float')
self.bd[1,axis]= vmax.astype('float')
self.mesure()
def void(self):
""" return True if box is void
"""
b = False
if self.meas==[]:
b = True
else:
pmes = np.prod(self.meas)
if pmes==0:
b = True
return(b)
def info(self):
""" Information on BoxN
"""
print( "Volume (.vol) :",self.vol)
print( "Center (.ctr) :",self.ctr)
def bd2coord(self):
"""Boundary to coordinates
Return an array containing of vertex from a box
3D case :
in :
[xmin ymin zmin]
[xmax ymax zmax]
out :
[xmin ymin zmin]
[xmin ymax zmin]
[xmax ymin zmin]
[xmax ymax zmin]
[xmin ymin zmax]
[xmin ymax zmax]
[xmax ymin zmax]
[xmax ymax zmax]
Returns
-------
P : array 2^ndim x ndim
coordinates of box vertex
"""
if self.ndim == 3:
P=np.array(([self.bd[0,0],self.bd[0,1],self.bd[0,2]],
[self.bd[0,0],self.bd[1,1],self.bd[0,2]],
[self.bd[1,0] | ,self.bd[1,1],self.bd[0,2]],
[self.bd[1,0],self.bd[0,1],self.bd[0,2]],
[self.bd[0,0],self.bd[0,1],self.bd[1,2]],
[self.bd[0,0],self.bd[1,1],self.bd[1,2]],
[self.bd[1,0],self.bd[1,1],self.bd[1,2]],
[self.bd[1,0],self.bd[0,1],self.bd[1,2]]))
return(P) |
if self.ndim == 2:
P=np.array(([self.bd[0,0],self.bd[0,1]],
[self.bd[0,0],self.bd[1,1]],
[self.bd[1,0],self.bd[1,1]],
[self.bd[1,0],self.bd[0,1]]))
return(P)
def coord2bd(self,coord):
"""
Coordinates to boundary
update boundary array from numpy array of coordinates
Parameters
----------
coord : array 2^ndim x ndim
vertexes coordinates of a boxN
Returns
-------
Nothing but fills self.bd
"""
self.bd[0,:]=np.min(coord,axis=0)
self.bd[1,:]=np.max(coord,axis=0)
# def octant(self):
# tlb = []
# lbox = LBoxN([])
# BD = np.array((self.bd[0],(self.bd[0]+self.bd[1])/2,self.bd[1] ))
## Rx = BD[(range(0,2)+range(1,3))*4,0]
## Ry = BD[(range(0,2)*2+range(1,3)*2)*2,1]
## Rz = BD[range(0,2)*4+range(1,3)*4,2]
## O = np.array((Rx,Ry,Rz )).T
## LB = LBoxN([BoxN(O[0:2]),BoxN(O[2:4]),BoxN(O[4:6]),BoxN(O[6:8]),BoxN(O[8:10]),BoxN(O[10:12]),BoxN(O[12:14]),BoxN(O[14:16])])
## # LB.bnum = range(00,010,1)
## return(LB)
##
def octant(self):
""" quadtree on boxes OBSOLETE
Divide boxes into 2^ndim equal parts
aka Split each interval from each dimension into 2 equal part
"""
tlb = []
lbox = LBoxN([])
for k in range(self.ndim):
tlb.append(self.cut(self.ctr[k],axis=k))
lbm = tlb[0]
for l in range(len(tlb)-1):
lbp=lbm.intersect(tlb[l+1])
lbm=lbp
return(lbm)
def intersect(self,box):
""" Find intersection between current box and a given one
Parameters
----------
box : BoxN
a BoxN object
Returns
-------
new_box : BoxN
a BoxN object
"""
new_box = BoxN(np.zeros((2,self.ndim)),ndim=self.ndim)
for k in range(self.ndim):
newmin = max(self.bd[0,k],box.bd[0,k])
newmax = min(self.bd[1,k],box.bd[1,k])
if (newmax>newmin):
new_box.bd[0,k]= newmin
new_box.bd[1,k]= newmax
new_box.mesure()
return(new_box)
def bdiff(self,box):
""" OBSOLETE
USE self.intersect instead !!!
"""
new_box = BoxN(np.zeros((2,self.ndim)),ndim=self.ndim)
for k in range(self.ndim):
newmin = max(self.bd[0,k],box.bd[0,k])
newmax = min(self.bd[1,k],box.bd[1,k])
if (newmax>newmin):
new_box.bd[0,k]= newmin
new_box.bd[1,k]= newmax
new_box.mesure()
return(new_box)
def _show3(self,col='r',Id=[0],H_Id=0,alpha=0.2):
mapp=np.array([[0,0,0],[0,1,0],[1,1,0],[1,0,0],[0,0,1],[0,1,1],[1,1,1],[1,0,1]],dtype='int')
b= np.array([self.bd[mapp[:,0],0],self.bd[mapp[:,1],1],self.bd[mapp[:,2],2]])
edges=[[0,1,2],
[0,2,3],
[0,1,5],
[0,4,5],
[1,5,2],
[5,6,2],
[2,6,7],
[2,7,3],
[0,3,4],
[3,7,4],
[4,5,6],
[4,6,7]]
# trick for correcting color assignement
mesh=tvtk.PolyData(points=b.T,polys=edges)
if col =='r':
color=(1,0,0)
elif col =='b':
color=(0,0,1)
mlab.pipeline.surface(mesh,opacity=alpha,color=color)
def show3(self,dim=(0,1,2),col='r',Id=[0],H_Id=0,alpha=0.2):
"""Show box into geomview
generate a geomview file which allow to represent a box.
Parameters
----------
dim : tuple
chose dimension to display. default : (0,1,2)
col : string
choose box color. compliant with matplotlib colorConverter. default 'r'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date
from django.conf import settings
def settings_context(request):
"""
M | akes available a template var for some interesting var in settings.py
"""
try:
ITEMS_PER_PAGE = settings.ITEMS_PER_PAGE
except AttributeError:
print "oooo"
ITEMS_PER_PAGE = 20
try:
TAGS_PER_PAGE = settings.TAGS_PER_PAGE
except AttributeError:
TAGS_PER_PAGE = 200
return {"ITEMS_PER_PAGE": ITEMS_PER_PAGE, "TAGS_PER_PAGE": TAGS_ | PER_PAGE} |
from d | jango.db import migrations
class Migration(migrations.Migration | ):
dependencies = [
('bookmarks', '0004_auto_20160901_2322'),
]
operations = [
migrations.RunSQL("DROP TABLE bookmarks_bookmark;"),
migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"),
migrations.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"),
] |
'''
cloudminingstatus.py
@summary: Show selected API data from cloudhasher and miningpool.
@author: Andreas Krueger
@since: 12 Feb 2017
@contact: https://github.com/drandreaskrueger
@copyright: @author @since @license
@license: Donationware, see README.md. Plus see LICENSE.
@version: v0.1.0
@status: It is working well.
@todo: Make it into webservice?
'''
from __future__ import print_function
import time
import sys
import pprint
import requests # pip install requests
SLEEP_SECONDS= 5*60
SHOW_COMPOSITE_RESULTS = True
try:
from credentials_ME import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY
except:
from credentials import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY
POOL_API_URL="http://soil.miners-zone.net/apisoil/accounts/%s"
HASHER_ORDERS_API_URL="https://www.nicehash.com/api?method=orders.get&my&algo=20&location=0&id=%s&key=%s"
HASHER_BALANCE_API_URL="https://www.nicehash.com/api?method=balance&id=%s&key=%s" # unused
def humanTime(epoch):
return time.strftime("GMT %H:%M:%S %a %d %b %Y", time.gmtime(epoch))
POOL_JSON=[('currentHashrate', (lambda x: "%6.2f MHash/s 30m average" % (x/1000000.0))),
('hashrate' , (lambda x: "%6.2f MHash/s 3h average" % (x/1000000.0))),
('paymentsTotal' , (lambda x:x)),
('stats' , (lambda x: "%10.4f SOIL paid" % (float(x['paid'])/1000000000))),
('stats' , (lambda x: "%10.4f SOIL balance" % (float(x['balance'])/1000000000))),
('24hreward',(lambda x: "%10.4f SOIL" % (float(x)/1000000000))),
('stats' , (lambda x: "%d blocksFound" % (x['blocksFound']))),
('stats' , (lambda x: "%s lastShare" % (humanTime(x['lastShare'])))),
('workers' , (lambda x: "%s last beat" % (humanTime(x['0']['lastBeat'])))),
('workers' , (lambda x: "%s Online" % (not bool(x['0']['offline'])))),
('workersTotal', (lambda x:x)),
]
HASHER_JSON_PATH=('result', 'orders', 0)
HASHER_JSON=[
('alive', (lambda x: x)),
('workers', (lambda x: x)),
('id', (lambda x: x)),
('pool_host', (lambda x: x)),
('pool_user', (lambda x: x)),
('limit_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))),
('accepted_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))),
('btc_paid', (lambda x: x)),
('btc_avail', (lambda x: x)),
('price', (lambda x: "%s BTC/GH/Day" % x)),
('end', (lambda x: "%4.2f days order lifetime" % (x/1000.0/60/60/24))),
]
def getJsonData(url):
"""
get url, check for status_code==200, return as json
"""
try:
r=requests.get(url)
except Exception as e:
print ("no connection: ", e)
return False
if r.status_code != 200:
print ("not answered OK==200, but ", r.status_code)
return False
try:
j=r.json()
except Exception as e:
print ("no json, text:")
print (r.text)
# raise e
return False
return j
def showPoolData(url):
"""
gets all json data from pool, but shows only what is in POOL_JSON
"""
print ("Pool:")
j=getJsonData(url)
if not j:
return False
# pprint.pprint (j)
for Jkey, Jfn in POOL_JSON:
print (Jfn(j[Jkey]), "(%s)" % Jkey)
return j
def showHasherData(url):
"""
gets all json data from cloudhasher, but shows only what is in HASHER_JSON
| """
print ("CloudHasher:")
j=getJsonData(url)
if not j:
return False
# pprint.pprint (j)
# climb down into the one branch with all the interesting data:
j=j [HASHER_JSON_PATH[0]] [HASHER_JSON_PATH[1]] [HASHER | _JSON_PATH[2]]
# pprint.pprint (j)
for Jkey, Jfn in HASHER_JSON:
print (Jfn(j[Jkey]), "(%s)" % Jkey)
estimate = (float(j['btc_avail']) / ( float(j['price'])*float(j['accepted_speed'])) )
print ("%.2f days" % estimate, end='')
print ("(remaining btc / order price / hashrate)")
return j
def showCompositeResults(pooldata, hasherdata):
"""
Estimates a coin prices by money spent versus money mined.
N.B.: In this form probably only be roughly correct
during first buy order? We'll see.
"""
coinsMined = float(pooldata['stats']['paid'])
coinsMined += float(pooldata['stats']['balance'])
coinsMined /= 1000000000
hashingCostsBtc = float(hasherdata['btc_paid'])
satoshiPrice = hashingCostsBtc / coinsMined * 100000000
print ("%.1f Satoshi/SOIL (mining price approx)" % satoshiPrice)
return satoshiPrice
def loop(sleepseconds):
"""
Shows both, then sleeps, the repeats.
"""
while True:
print ()
pooldata=showPoolData(url=POOL_API_URL%POOL_API_USERNAME)
print ()
hasherdata=showHasherData(url=HASHER_ORDERS_API_URL%(HASHER_API_ID, HASHER_API_KEY))
print ()
if SHOW_COMPOSITE_RESULTS and pooldata and hasherdata:
showCompositeResults(pooldata, hasherdata)
print ()
print (humanTime(time.time()), end='')
print ("... sleep %s seconds ..." % sleepseconds)
time.sleep(sleepseconds)
def checkCredentials():
"""
See credentials.py
"""
yourCredentials=(POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY)
if "" in yourCredentials:
print ("You must fill in credentials.py first.")
print (yourCredentials)
return False
else:
return True
if __name__ == '__main__':
if not checkCredentials():
sys.exit()
try:
loop(sleepseconds=SLEEP_SECONDS)
except KeyboardInterrupt:
print ("Bye.")
sys.exit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in | tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
| stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
|
# coding=utf-8
from kombu import Connection, Exchange, Queue, Consumer
from kombu.async | import Hub
web_exchange = Exchange('web_develop', 'direct', durable=True)
standard_queue = Queue('standard', exchange=web_exchange,
routing_key='web.develop')
URI = 'librabbitmq://dongwm:123456@localhost:5672/web_develop'
hub = Hub()
def on_message(body, message):
print("Body:'%s', Headers:'%s', Payload:'%s'" % (
body, message.content_encoding, message.payload))
me | ssage.ack()
with Connection(URI) as connection:
connection.register_with_event_loop(hub)
with Consumer(connection, standard_queue, callbacks=[on_message]):
try:
hub.run_forever()
except KeyboardInterrupt:
exit(1)
|
#!/usr/bin/env python
import os, sys
from polib import pofile
from config import CONFIGURATION
from extract import SOURCE_WARN
from execute import execute
TRANSIFEX_HEADER = 'Translations in this file have been downloaded from %s'
TRANSIFEX_URL = 'https://www.transifex.com/projects/p/edx-studio/'
def push():
execute('tx push -s')
def pull():
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
#execute('tx pull -l %s' % locale)
execute('tx pull --all')
clean_translated_locales()
def clean_translated_locales():
"""
Strips out the warning from all translated po files
about being an English source file.
"""
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
clean_locale(locale)
def clean_locale(locale):
"""
Strips out the warning from all of a locale's translated po files
about being an English source file.
Iterates over machine-generated files.
"""
dirname = CONFIGURATION.get_messages_dir(locale)
for filename in ('django-partial.po', 'djangojs.po', 'mako.po'):
clean_file(dirname.joinpath(filename))
def clean_file(file):
"""
Strips out the warning from a translated po file about being an English source file.
Replaces warning with a note about coming from Transifex.
"""
po = pofile(file)
if po.header.find(SOURCE_WARN) != -1:
new_header = get_new_head | er(po)
new = po.header.replace(SOURCE_WARN, new_header)
po.header = new
po.save()
def get_new_header(po):
team = po.metadata.get('Language-Team', None)
if not team:
return TRANSIFEX_HEADER % TRANSIFEX_URL
else:
return TRANSIFEX_HEADER % t | eam
if __name__ == '__main__':
if len(sys.argv)<2:
raise Exception("missing argument: push or pull")
arg = sys.argv[1]
if arg == 'push':
push()
elif arg == 'pull':
pull()
else:
raise Exception("unknown argument: (%s)" % arg)
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependen | cies = [
('organizations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name | ='organization',
name='logo',
field=models.ImageField(help_text='Please add only .PNG files for logo images. This logo will be used on certificates.', max_length=255, null=True, upload_to='organization_logos', blank=True),
),
]
|
: "22"},
)
def test_eintr_retry(self):
self.assertEqual("foo", paramiko.util.retry_on_signal(lambda: "foo"))
# Variables that are set by raises_intr
intr_errors_remaining = [3]
call_count = [0]
def raises_intr():
call_count[0] += 1
if intr_errors_remaining[0] > 0:
intr_errors_remaining[0] -= 1
raise IOError(errno.EINTR, "file", "interrupted system call")
self.assertTrue(paramiko.util.retry_on_signal(raises_intr) is None)
self.assertEqual(0, intr_errors_remaining[0])
self.assertEqual(4, call_count[0])
def raises_ioerror_not_eintr():
raise IOError(errno.ENOENT, "file", "file not found")
self.assertRaises(
IOError,
lambda: paramiko.util.retry_on_signal(raises_ioerror_not_eintr),
)
def raises_other_exception():
raise AssertionError("foo")
self.assertRaises(
AssertionError,
lambda: paramiko.util.retry_on_signal(raises_other_exception),
)
def test_proxycommand_config_equals_parsing(self):
"""
ProxyCommand should not split on equals signs within the value.
"""
conf = """
Host space-delimited
ProxyCommand foo bar=biz baz
Host equals-delimited
ProxyCommand=foo bar=biz baz
"""
f = StringIO(conf)
config = paramiko.util.parse_ssh_config(f)
for host in ("space-delimited", "equals-delimited"):
self.assertEqual(
host_config(host, config)["proxycommand"], "foo bar=biz baz"
)
def test_proxycommand_interpolation(self):
"""
ProxyCommand should perform interpolation on the value
"""
config = paramiko.util.parse_ssh_config(
StringIO(
"""
Host specific
Port 37
ProxyCommand host %h port %p lol
Host portonly
Port 155
Host *
Port 25
ProxyCommand host %h port %p
"""
)
)
for host, val in (
("foo.com", "host foo.com port 25"),
("specific", "host specific port 37 lol"),
("portonly", "host portonly port 155"),
):
self.assertEqual(host_config(host, config)["proxycommand"], val)
def test_proxycommand_tilde_expansion(self):
"""
Tilde (~) should be expanded inside ProxyCommand
"""
config = paramiko.util.parse_ssh_config(
StringIO(
"""
Host test
ProxyCommand ssh -F ~/.ssh/test_config bastion nc %h %p
"""
)
)
self.assertEqual(
"ssh -F %s/.ssh/test_config bastion nc test 22"
% os.path.expanduser("~"),
host_config("test", config)["proxycommand"],
)
def test_host_config_test_negation(self):
test_config_file = """
Host www13.* !*.example.com
Port 22
Host *.example.com !www13.*
Port 2222
Host www13.*
Port 8080
Host *
Port 3333
"""
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
host = "www13.example.com"
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config),
{"hostname": host, "port": "8080"},
)
def test_host_config_test_proxycommand(self):
test_config_file = """
Host proxy-with-equal-divisor-and-space
ProxyCommand = foo=bar
Host proxy-with-equal-divisor-and-no-space
ProxyCommand=foo=bar
Host proxy-without-equal-divisor
ProxyCommand foo=bar:%h-%p
"""
for host, values in {
"proxy-with-equal-divisor-and-space": {
"hostname": "proxy-with-equal-divisor-and-space",
"proxycommand": "foo=bar",
},
"proxy-with-equal-divisor-and-no-space": {
"hostname": "proxy-with-equal-divisor-and-no-space",
"proxycommand": "foo=bar",
},
"proxy-without-equal-divisor": {
"hostname": "proxy-without-equal-divisor",
"proxycommand": "foo=bar:proxy-without-equal-divisor-22",
},
}.items():
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_host_config_test_identityfile(self):
test_config_file = """
IdentityFile id_dsa0
Host *
IdentityFile id_dsa1
Host dsa2
IdentityFile id_dsa2
Host dsa2*
IdentityFile id_dsa22
"""
for host, values in {
"foo": {"hostname": "foo", "identityfile": ["id_dsa0", "id_dsa1"]},
"dsa2": {
"hostname": "dsa2",
"identityfile": ["id_dsa0", "id_dsa1", "id_dsa2", "id_dsa22"],
},
"dsa22": {
"hostname": "dsa22",
"identityfile": ["id_dsa0", "id_dsa1", "id_dsa22"],
},
}.items():
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_config_addressfamily_and_lazy_fqdn(self):
"""
Ensure the code path honoring non-'all' AddressFamily doesn't asplode
"""
test_config = """
AddressFamily inet
IdentityFile something_%l_using_fqdn
"""
config = paramiko.util.parse_ssh_config(StringIO(test_config))
assert config.lookup(
"meh"
) # will die during lookup() if | bug regresses
def test_clamp_value(self):
self.assertEqual(32768, paramiko.util.clamp_value(32767, 32768, 32769))
self.assertEqual(32767, paramiko.util.clamp_value(32767, 32765, 32769))
self.assertEqual(32769, | paramiko.util.clamp_value(32767, 32770, 32769))
def test_config_dos_crlf_succeeds(self):
config_file = StringIO("host abcqwerty\r\nHostName 127.0.0.1\r\n")
config = paramiko.SSHConfig()
config.parse(config_file)
self.assertEqual(config.lookup("abcqwerty")["hostname"], "127.0.0.1")
def test_get_hostnames(self):
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
config.get_hostnames(), {"*", "*.example.com", "spoo.example.com"}
)
def test_quoted_host_names(self):
test_config_file = """\
Host "param pam" param "pam"
Port 1111
Host "param2"
Port 2222
Host param3 parara
Port 3333
Host param4 "p a r" "p" "par" para
Port 4444
"""
res = {
"param pam": {"hostname": "param pam", "port": "1111"},
"param": {"hostname": "param", "port": "1111"},
"pam": {"hostname": "pam", "port": "1111"},
"param2": {"hostname": "param2", "port": "2222"},
"param3": {"hostname": "param3", "port": "3333"},
"parara": {"hostname": "parara", "port": "3333"},
"param4": {"hostname": "param4", "port": "4444"},
"p a r": {"hostname": "p a r", "port": "4444"},
"p": {"hostname": "p", "port": "4444"},
"par": {"hostname": "par", "port": "4444"},
"para": {"hostname": "para", "port": "4444"},
}
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
for host, values in res.items():
self.assertEquals(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_quoted_params_in_config(self):
test_config_file = """\
Host "param pam" param "pam"
IdentityFile id_rsa
Host "param2"
IdentityFile "test rsa key"
Host param3 parara
IdentityFile id_rsa
IdentityFile "test rsa key"
"""
res = {
"param pam": {"hostname": "param pam", "identityfile": ["id_rsa"]},
"param": {"hostname": "param", "identityfile": ["id_rsa"]},
"pam": {"hostname": "pam", "identityfile": ["id_rsa"]},
"param2": {"hostname": "param2", "i |
Some toplevel things are sadly types, and those have an
# isInterface that doesn't mean the same thing as IDLObject's
# isInterface()...
if not isinstance(thing, IDLInterface):
continue
| iface = thing
self.interfaces[iface.identifier.name] = iface
if iface.identifier.name not in config:
# | Completely skip consequential interfaces with no descriptor
# if they have no interface object because chances are we
# don't need to do anything interesting with them.
if iface.isConsequential() and not iface.hasInterfaceObject():
continue
entry = {}
else:
entry = config[iface.identifier.name]
if not isinstance(entry, list):
assert isinstance(entry, dict)
entry = [entry]
self.descriptors.extend(
[Descriptor(self, iface, x) for x in entry])
# Mark the descriptors for which only a single nativeType implements
# an interface.
for descriptor in self.descriptors:
intefaceName = descriptor.interface.identifier.name
otherDescriptors = [d for d in self.descriptors
if d.interface.identifier.name == intefaceName]
descriptor.uniqueImplementation = len(otherDescriptors) == 1
self.enums = [e for e in parseData if e.isEnum()]
self.dictionaries = [d for d in parseData if d.isDictionary()]
self.callbacks = [c for c in parseData if
c.isCallback() and not c.isInterface()]
# Keep the descriptor list sorted for determinism.
self.descriptors.sort(lambda x, y: cmp(x.name, y.name))
def getInterface(self, ifname):
return self.interfaces[ifname]
def getDescriptors(self, **filters):
"""Gets the descriptors that match the given filters."""
curr = self.descriptors
for key, val in filters.iteritems():
if key == 'webIDLFile':
getter = lambda x: x.interface.filename()
elif key == 'hasInterfaceObject':
getter = lambda x: x.interface.hasInterfaceObject()
elif key == 'isCallback':
getter = lambda x: x.interface.isCallback()
elif key == 'isJSImplemented':
getter = lambda x: x.interface.isJSImplemented()
else:
getter = lambda x: getattr(x, key)
curr = filter(lambda x: getter(x) == val, curr)
return curr
def getEnums(self, webIDLFile):
return filter(lambda e: e.filename() == webIDLFile, self.enums)
@staticmethod
def _filterForFile(items, webIDLFile=""):
"""Gets the items that match the given filters."""
if not webIDLFile:
return items
return filter(lambda x: x.filename() == webIDLFile, items)
def getDictionaries(self, webIDLFile=""):
return self._filterForFile(self.dictionaries, webIDLFile=webIDLFile)
def getCallbacks(self, webIDLFile=""):
return self._filterForFile(self.callbacks, webIDLFile=webIDLFile)
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name.
"""
iface = self.getInterface(interfaceName)
descriptors = self.getDescriptors(interface=iface)
# We should have exactly one result.
if len(descriptors) != 1:
raise NoSuchDescriptorError("For " + interfaceName + " found " +
str(len(descriptors)) + " matches")
return descriptors[0]
def getDescriptorProvider(self):
"""
Gets a descriptor provider that can provide descriptors as needed.
"""
return DescriptorProvider(self)
class NoSuchDescriptorError(TypeError):
def __init__(self, str):
TypeError.__init__(self, str)
class DescriptorProvider:
"""
A way of getting descriptors for interface names
"""
def __init__(self, config):
self.config = config
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name given the
context of the current descriptor.
"""
return self.config.getDescriptor(interfaceName)
def MemberIsUnforgeable(member, descriptor):
return ((member.isAttr() or member.isMethod()) and
not member.isStatic() and
(member.isUnforgeable() or
bool(descriptor.interface.getExtendedAttribute("Unforgeable"))))
class Descriptor(DescriptorProvider):
"""
Represents a single descriptor for an interface. See Bindings.conf.
"""
def __init__(self, config, interface, desc):
DescriptorProvider.__init__(self, config)
self.interface = interface
# Read the desc, and fill in the relevant defaults.
ifaceName = self.interface.identifier.name
# Callback types do not use JS smart pointers, so we should not use the
# built-in rooting mechanisms for them.
if self.interface.isCallback():
self.needsRooting = False
ty = "%sBinding::%s" % (ifaceName, ifaceName)
self.returnType = "Rc<%s>" % ty
self.argumentType = "???"
self.nativeType = ty
else:
self.needsRooting = True
self.returnType = "Root<%s>" % ifaceName
self.argumentType = "&%s" % ifaceName
self.nativeType = "*const %s" % ifaceName
self.concreteType = ifaceName
self.register = desc.get('register', True)
self.outerObjectHook = desc.get('outerObjectHook', 'None')
self.proxy = False
self.weakReferenceable = desc.get('weakReferenceable', False)
# If we're concrete, we need to crawl our ancestor interfaces and mark
# them as having a concrete descendant.
self.concrete = (not self.interface.isCallback() and
not self.interface.getExtendedAttribute("Abstract"))
self.hasUnforgeableMembers = (self.concrete and
any(MemberIsUnforgeable(m, self) for m in
self.interface.members))
self.operations = {
'IndexedGetter': None,
'IndexedSetter': None,
'IndexedDeleter': None,
'NamedGetter': None,
'NamedSetter': None,
'NamedDeleter': None,
'Stringifier': None,
}
def addOperation(operation, m):
if not self.operations[operation]:
self.operations[operation] = m
# Since stringifiers go on the prototype, we only need to worry
# about our own stringifier, not those of our ancestor interfaces.
for m in self.interface.members:
if m.isMethod() and m.isStringifier():
addOperation('Stringifier', m)
if self.concrete:
iface = self.interface
while iface:
for m in iface.members:
if not m.isMethod():
continue
def addIndexedOrNamedOperation(operation, m):
self.proxy = True
if m.isIndexed():
operation = 'Indexed' + operation
else:
assert m.isNamed()
operation = 'Named' + operation
addOperation(operation, m)
if m.isGetter():
addIndexedOrNamedOperation('Getter', m)
if m.isSetter():
addIndexedOrNamedOperation('Setter', m)
if m.isCreator():
addIndexedOrNamedOperation('Creator', m)
if m.isDeleter():
addIndexedOrNamedOperation('Deleter', m)
iface = iface.parent
if iface:
iface. |
imgPatch = img[row:row+patchSize, col:col+patchSize]
annPatch = ann[row:row+patchSize, col:col+patchSize]
print 'sample#:', counter
print 'original'
print np.unique(annPatch)
print annPatch.flatten()
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
annPatch = np.fliplr(annPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
annPatch = np.rot90(annPatch, rotateInt)
imgPatch, annPatch = deform_images( imgPatch, annPatch )
annPatch = np.round( annPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
#annPatch = annPatch / np.double(np.max(annPatch))
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
annPatch = annPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = np.int32(annPatch.flatten())
#whole_set_labels[counter] = np.int32(annPatch.flatten() > 0)
print 'modified:'
print 'row:', row, 'col:', col
print 'patch'
print whole_set_patches[counter,:]
print np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] )
print 'labels'
print whole_set_labels[counter]
print np.unique( whole_set_labels[counter] )
counter += 1
# advance to next coordinate
i_coord += 2
#n_img_samples -= n_label_samples
#n_samples_remaining -= n_label_samples
n_samples_per_label[ i_label ] -= n_samples
if True:
continue
for i in range(n_img_samples):
randmem = random.choice(xrange(len(indices[0])))
(row,col) = (indices[0][randmem], indices[1][randmem])
print 'sampling from...', row, col
print 'img:', files_gray[img_index]
imgPatch = img[row:row+patchSize, col:col+patchSize]
annPatch = ann[row:row+patchSize, col:col+patchSize]
print 'original'
print np.unique(annPatch)
print annPatch.flatten()
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
annPatch = np.fliplr(annPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
annPatch = np.rot90(annPatch, rotateInt)
imgPatch, annPatch = deform_images( imgPatch, annPatch )
annPatch = np.round( annPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
#annPatch = annPatch / np.double(np.max(annPatch))
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
annPatch = annPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = np.int32(annPatch.flatten())
#whole_set_labels[counter] = np.int32(annPatch.flatten() > 0)
print 'modified:'
print 'row:', row, 'col:', col
print 'patch'
print whole_set_patches[counter,:]
print np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] )
print 'labels'
print whole_set_membranes[counter]
print np.unique( whole_set_membranes[counter] )
counter += 1
print counter
print '-----'
n_samples_remaining -= n_img_samples
print 'counter:', counter
print 'n_samples_per_label:', n_samples_per_label
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
print np.max(whole_data), np.min(whole_data)
print np.max(whole_set_labels), np.min(whole_set_labels)
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
data_set = (whole_data, whole_set_labels)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
# changed the patch sampling to use upper left corner instead of middle pixel
# for patch labels it doesn't matter and it makes sampling even and odd patches easier
def generate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1):
print 'generate_experiment_data_patch_prediction'
exit(1)
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
pathPrefix = '/media/vkaynig/Data2/Cmor_paper_data/not_normalized/'
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Thalamus-LGN/Data/25-175_train/'
#pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cerebellum-P7/Dense/'
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cortex-ECS/'
if not os.path.exists(pathPrefix):
pathPrefix = '/n/pfister_lab/vkaynig/'
# if purpose=='train':
# if np.random.random()>0.5:
# pathPrefix = pathPrefix + 'dense40_train/'
# else:
# pathPrefix = pathPrefix + 'dense49_train/'
# else:
# pathPrefix = pathPrefix + 'dense40_train/'
print "#################################"
print purpose
print pathPrefix
img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
#<felix-addition>
pathPrefix = '/n/home00/fgonda/icon/data/reference/'
#img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
| img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
#</felix-addition>
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( i | mg_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
print len(img_files_gray)
print len(img_files_membrane)
print len(img_files_labels)
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
read_order = np.random.permutation(np.shape(img_files_gray)[0])
read_order = read |
"""
sum(2 * 2**i for i in range(i)) == 2 * (2**i - 1) == n
i == log_2(n // 2 + 1)
"""
from math import ceil, log
import time
def count_ways(n, current_power=None, memo=None):
if memo is None:
memo = {}
if current_power is None:
current_power = ceil(log(n // 2 + 1, 2))
key = (n, current_power)
if key in memo:
return memo[key]
current_term = 2 ** current_power
max_available = 2 * (2 ** (current_power + 1) - 1)
assert n <= max_available
next_max_available = 2 * (2 ** current_power - 1)
ans = 0
if n >= 2 * current_term:
if n == 2 * current_term:
| ans += 1
else:
ans += count_ways(n - 2 * current_term, current_power - 1, memo)
if n >= current_term:
if n == current_term:
ans += 1
elif n - current_term <= next_max_available:
ans += count_ways(n - current_term, current_power - 1, memo)
if n <= next_max_available:
ans += count_ways(n, current_power - 1, memo)
memo[key] = ans
re | turn ans
t0 = time.time()
print(count_ways(10 ** 25))
t1 = time.time()
print('Total time:', (t1 - t0) * 1000, 'ms')
|
import sys
import bpy
from bpy.props import StringProperty
class ExportLog(object):
""" Class which tracks warnings and errors during export """
WARNING = "Warning"
ERROR = "Error"
MESSAGE_SEPERATOR = "\n"
SEVERITY_DIVIDER = "|#|"
EXPORTED_MESSAGE_QUEUE = []
def __init__(self):
self._message_queue = []
def info(self, *args):
""" Adds a new info, this will not be logged but just printed to stdout """
print("Info:", *args)
def warning(self, *args):
""" Adds a new warning to the log """
self._add_entr | y(self.WARNING, *args)
def error(self, *args):
""" Adds a new error to the log """
self._add_entry(self.ERROR, *args)
def _add_entry(self, severity, *args):
"" | " Internal method to append a new entry to the message queue """
content = ' '.join([str(i) for i in args])
self._message_queue.append((severity, content))
print(severity + ":", content, file=sys.stderr)
def report(self):
""" Shows a dialog with all warnings and errors, but only in case
there were some """
if self._message_queue:
ExportLog.EXPORTED_MESSAGE_QUEUE = self._message_queue
bpy.ops.pbe_export.status()
class OperatorExportStatus(bpy.types.Operator):
bl_idname = "pbe_export.status"
bl_label = "Export Status"
def execute(self, context):
wm = context.window_manager
return wm.invoke_popup(self, width=800, height=400)
def draw(self, context):
self.layout.row().label("Export status:")
self.layout.row()
for severity, message in ExportLog.EXPORTED_MESSAGE_QUEUE:
row = self.layout.row()
message = message.replace("\n", "")
row.label(message, icon="CANCEL" if severity == ExportLog.ERROR else "ERROR")
self.layout.row()
def register():
bpy.utils.register_class(OperatorExportStatus)
#bpy.utils.register_class(OperatorExportStatusOk)
def unregister():
bpy.utils.unregister_class(OperatorExportStatus)
#bpy.utils.unregister_class(OperatorExportStatusOk)
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.transaction import atomic
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template import RequestContext
from django.utils import timezone
from ultimate.leagues.models import Game, League
from ultimate.user.models import Player, PlayerRatings
from ultimate.forms import EditPlayerForm, EditPlayerRatingsForm, EditProfileForm, SignupForm
@login_required
def index(request):
leagues = League.objects.filter(state__in=['closed', 'open', 'preview']).order_by('league_start_date')
leagues = [l for l in leagues if l.is_visible(request.user)]
future_games = Game.objects.filter(
Q(league__in=leagues) &
Q(date__gte=timezone.now().date()) &
Q(teams__teammember__user=request.user)
).order_by('date')
future_games = [game for game in future_games if game.get_display_teams().exists()]
try:
next_game = future_games.pop(0)
except (IndexError, Game.DoesNotExist) as e:
next_game = None
try:
following_game = future_games.pop(0)
except (IndexError, Game.DoesNotExist) as e:
following_game = None
registrations = []
for league in leagues:
for registration in league.get_registrations_for_user(request.user):
registrations.append(registration)
return render(request, 'user/index.html',
{
'current_leagues': leagues,
'following_game': following_game,
'next_game': next_game,
'registrations': registrations
})
@atomic
def signup(request):
form = None
if request.method == | 'POST':
f | orm = SignupForm(request.POST)
if form.is_valid():
user = form.save()
Player.objects.get_or_create(user=user,
defaults={'date_of_birth': form.cleaned_data.get('date_of_birth'),
'gender': form.cleaned_data.get('gender')})
messages.success(request, 'Your account was created. You may now log in.')
return HttpResponseRedirect(reverse('user'))
else:
messages.error(request, 'There was an error on the form you submitted.')
if not form:
form = SignupForm()
return render(request, 'user/signup.html',
{'form': form})
@login_required
def editprofile(request):
try:
player = Player.objects.get(user=request.user)
except Player.DoesNotExist:
player = Player(user=request.user)
if request.method == 'POST':
form = EditProfileForm(request.POST, instance=request.user)
if form.is_valid():
form.save(commit=False)
player_form = EditPlayerForm(request.POST, instance=player)
if player_form.is_valid():
form.save()
player_form.save()
messages.success(request, 'Your profile was updated successfully.')
return HttpResponseRedirect(reverse('editprofile'))
else:
messages.error(request, 'There was an error on the form you submitted.')
else:
player_form = EditPlayerForm(request.POST, instance=player)
messages.error(request, 'There was an error on the form you submitted.')
else:
form = EditProfileForm(instance=request.user)
player_form = EditPlayerForm(instance=player)
return render(request, 'user/editprofile.html',
{'form': form, 'player_form': player_form})
@login_required
def editratings(request):
try:
ratings = PlayerRatings.objects.get(user=request.user, submitted_by=request.user, ratings_type=PlayerRatings.RATING_TYPE_USER)
except PlayerRatings.DoesNotExist:
ratings = None
if request.method == 'POST':
form = EditPlayerRatingsForm(request.POST, instance=ratings)
if form.is_valid():
instance = form.save(commit=False)
instance.ratings_type = PlayerRatings.RATING_TYPE_USER
instance.submitted_by = request.user
instance.updated = timezone.now()
instance.user = request.user
instance.save()
messages.success(request, 'Your ratings were updated successfully.')
return HttpResponseRedirect(reverse('editratings'))
else:
messages.error(request, 'There was an error on the form you submitted.')
else:
form = EditPlayerRatingsForm(instance=ratings)
return render(request, 'user/editratings.html',
{
'form': form
}
)
|
c | lass RegMagic:
fixed_registers = []
regmagic = RegMagic()
__all__ = ['regmagic']
| |
1))
assert ((Create(hs=hs1) * Destroy(hs=hs1)) * BasisKet(1, hs=hs1) ==
BasisKet(1, hs=hs1))
assert (
(Create(hs=hs1) * Destroy(hs=hs1)) * BasisKet(0, hs=hs1) ==
ZeroKet)
def test_expand_ketbra():
"""Test expansion of KetBra"""
hs = LocalSpace('0', basis=('0', '1'))
expr = KetBra(
KetPlus(BasisKet('0', hs=hs), BasisKet('1', hs=hs)),
KetPlus(BasisKet('0', hs=hs), BasisKet('1', hs=hs)))
with temporary_rules(KetBra, clear=True):
expr_expand = expr.expand()
assert expr_expand == OperatorPlus(
KetBra(BasisKet('0', hs=hs), BasisKet('0', hs=hs)),
KetBra(Bas | isKet('0', hs=hs), BasisKet('1', hs=hs)),
KetBra(BasisKet('1', hs=hs), BasisKet('0', hs=hs)),
KetBra(BasisKet('1', hs=hs), BasisKet('1', hs=hs)))
def test_orthonormality_fock():
"""Test orthonormality of Fock space BasisKets (including symbo | lic)"""
hs = LocalSpace('tls', basis=('g', 'e'))
i = IdxSym('i')
j = IdxSym('j')
ket_0 = BasisKet(0, hs=hs)
bra_0 = ket_0.dag()
ket_1 = BasisKet(1, hs=hs)
ket_g = BasisKet('g', hs=hs)
bra_g = ket_g.dag()
ket_e = BasisKet('e', hs=hs)
ket_i = BasisKet(FockIndex(i), hs=hs)
ket_j = BasisKet(FockIndex(j), hs=hs)
bra_i = ket_i.dag()
ket_i_lb = BasisKet(FockLabel(i, hs=hs), hs=hs)
ket_j_lb = BasisKet(FockLabel(j, hs=hs), hs=hs)
bra_i_lb = ket_i_lb.dag()
assert bra_0 * ket_1 == Zero
assert bra_0 * ket_0 == One
assert bra_g * ket_g == One
assert bra_g * ket_e == Zero
assert bra_0 * ket_g == One
assert bra_0 * ket_e == Zero
assert bra_g * ket_0 == One
assert bra_g * ket_1 == Zero
delta_ij = KroneckerDelta(i, j)
delta_i0 = KroneckerDelta(i, 0)
delta_0j = KroneckerDelta(0, j)
assert bra_i * ket_j == delta_ij
assert bra_i * ket_0 == delta_i0
assert bra_0 * ket_j == delta_0j
assert bra_i * ket_g == delta_i0
assert bra_g * ket_j == delta_0j
assert delta_ij.substitute({i: 0, j: 0}) == One
assert delta_ij.substitute({i: 0, j: 1}) == Zero
assert delta_i0.substitute({i: 0}) == One
assert delta_i0.substitute({i: 1}) == Zero
delta_ij = KroneckerDelta(i, j)
delta_ig = KroneckerDelta(i, 0)
delta_gj = KroneckerDelta(0, j)
assert bra_i_lb * ket_j_lb == delta_ij
assert bra_i_lb * ket_0 == delta_ig
assert bra_0 * ket_j_lb == delta_gj
assert bra_i_lb * ket_g == delta_ig
assert bra_g * ket_j_lb == delta_gj
assert delta_ij.substitute({i: 0, j: 0}) == One
assert delta_ij.substitute({i: 0, j: 1}) == Zero
assert delta_ig.substitute({i: 0}) == One
assert delta_ig.substitute({i: 1}) == Zero
def test_orthonormality_spin():
hs = SpinSpace('s', spin='1/2')
i = IdxSym('i')
j = IdxSym('j')
ket_dn = SpinBasisKet(-1, 2, hs=hs)
ket_up = SpinBasisKet(1, 2, hs=hs)
bra_dn = ket_dn.dag()
ket_i = BasisKet(SpinIndex(i/2, hs), hs=hs)
bra_i = ket_i.dag()
ket_j = BasisKet(SpinIndex(j/2, hs), hs=hs)
assert bra_dn * ket_dn == One
assert bra_dn * ket_up == Zero
delta_ij = KroneckerDelta(i, j, simplify=False)
delta_i_dn = KroneckerDelta(i, -1, simplify=False)
delta_dn_j = KroneckerDelta(-1, j, simplify=False)
assert bra_i * ket_j == delta_ij
assert bra_i * ket_dn == delta_i_dn
assert bra_dn * ket_j == delta_dn_j
assert delta_ij.substitute({i: 0, j: 0}) == One
assert delta_ij.substitute({i: 0, j: 1}) == Zero
def test_indexed_local_sigma():
"""Test that brakets involving indexed symbols evaluate to Kronecker
deltas"""
hs = LocalSpace('tls', basis=('g', 'e'))
i = IdxSym('i')
j = IdxSym('j')
ket_i = BasisKet(FockIndex(i), hs=hs)
ket_j = BasisKet(FockIndex(j), hs=hs)
expr = LocalSigma('g', 'e', hs=hs) * ket_i
expected = KroneckerDelta(i, 1) * BasisKet('g', hs=hs)
assert expr == expected
assert expr == LocalSigma(0, 1, hs=hs) * ket_i
braopket = BraKet(
ket_i, OperatorTimesKet(
(LocalSigma('g', 'e', hs=hs) + LocalSigma('e', 'g', hs=hs)),
ket_j))
expr = braopket.expand()
assert expr == (
KroneckerDelta(i, 0) * KroneckerDelta(1, j) +
KroneckerDelta(i, 1) * KroneckerDelta(0, j))
def eval_lb(expr, mapping):
"""Evaluate symbolic labels with the given mapping"""
return _apply_rules(expr, rules=[(
wc('label', head=SymbolicLabelBase),
lambda label: label.substitute(mapping))])
def test_ket_symbolic_labels():
"""Test that we can instantiate Kets with symbolic labels"""
i = IdxSym('i')
j = IdxSym('j')
hs0 = LocalSpace(0)
hs1 = LocalSpace(1)
Psi = IndexedBase('Psi')
assert (
eval_lb(BasisKet(FockIndex(2 * i), hs=hs0), {i: 2}) ==
BasisKet(4, hs=hs0))
with pytest.raises(TypeError) as exc_info:
BasisKet(IntIndex(2 * i), hs=hs0)
assert "not IntIndex" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
BasisKet(StrLabel(2 * i), hs=hs0)
assert "not StrLabel" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
BasisKet(2 * i, hs=hs0)
assert "not Mul" in str(exc_info.value)
assert(
eval_lb(KetSymbol(StrLabel(2 * i), hs=hs0), {i: 2}) ==
KetSymbol("4", hs=hs0))
with pytest.raises(TypeError) as exc_info:
eval_lb(KetSymbol(FockIndex(2 * i), hs=hs0), {i: 2})
assert "type of label must be str" in str(exc_info.value)
assert StrLabel(Psi[i, j]).substitute({i: 'i', j: 'j'}) == 'Psi_ij'
assert(
eval_lb(
KetSymbol(StrLabel(Psi[i, j]), hs=hs0*hs1), {i: 'i', j: 'j'}) ==
KetSymbol("Psi_ij", hs=hs0*hs1))
assert(
eval_lb(
KetSymbol(StrLabel(Psi[i, j]), hs=hs0*hs1), {i: 1, j: 2}) ==
KetSymbol("Psi_12", hs=hs0*hs1))
assert (
eval_lb(
LocalSigma(FockIndex(i), FockIndex(j), hs=hs0), {i: 1, j: 2}) ==
LocalSigma(1, 2, hs=hs0))
assert (
BasisKet(FockIndex(i), hs=hs0) * BasisKet(FockIndex(j), hs=hs0).dag() ==
LocalSigma(FockIndex(i), FockIndex(j), hs=hs0))
def test_coherent_state_to_fock_representation():
"""Test the representation of a coherent state in the Fock basis"""
alpha = symbols('alpha')
expr1 = CoherentStateKet(alpha, hs=1).to_fock_representation()
expr2 = CoherentStateKet(alpha, hs=1).to_fock_representation(max_terms=10)
expr3 = CoherentStateKet(alpha, hs=1).to_fock_representation(
index_symbol='i')
expr4 = CoherentStateKet(alpha, hs=1).to_fock_representation(
index_symbol=IdxSym('m', positive=True))
assert (
expr1.term.ranges[0] ==
IndexOverFockSpace(IdxSym('n'), LocalSpace('1')))
assert (
expr2.term.ranges[0] ==
IndexOverRange(IdxSym('n', integer=True), 0, 9))
assert (
expr3.term.ranges[0] ==
IndexOverFockSpace(IdxSym('i'), LocalSpace('1')))
assert (
expr4.term.ranges[0] ==
IndexOverFockSpace(IdxSym('m', positive=True), LocalSpace('1')))
for expr in (expr1, expr2):
assert expr.coeff == exp(-alpha*alpha.conjugate()/2)
sum = expr.term
assert len(sum.ranges) == 1
n = sum.ranges[0].index_symbol
assert sum.term.coeff == alpha**n/sqrt(factorial(n))
assert (
sum.term.term ==
BasisKet(FockIndex(IdxSym('n')), hs=LocalSpace('1')))
def test_scalar_times_bra():
"""Test that multiplication of a scalar with a bra is handled correctly"""
alpha_sym = symbols('alpha')
alpha = ScalarValue(alpha_sym)
ket = KetSymbol('Psi', hs=0)
bra = ket.bra
# first, let's try the ket case, just to establish a working baseline
expr = alpha * ket
assert expr == ScalarTimesKet(alpha, ket)
assert expr == alpha_sym * ket
assert isinstance((alpha_sym * ket).coeff, ScalarValue)
assert expr == ket * alpha
assert expr == ket * alpha_sym
# now, the bra
expr = alpha * bra
assert expr == Bra(ScalarTimesKet(alpha.conjugate(), ket))
assert expr == alpha_sym * bra
assert isinstance((alpha_sym * bra).ke |
from __future__ import print_function
from __future__ import division
ALIGN_LEFT = '<'
ALIGN_CENTER = '_'
ALIGN_RIGHT = '>'
def pprint(data, header=None, dictorder=None, align=None, output_file=None):
if ((dict is type(data[0])) and (dictorder is None)):
dictorder = data[0].keys()
if ((dict is type(data[0])) and (header is None)):
header = data[0].keys()
(sdata, align) = makeStrings(data, dictorder, align)
(widths, percents) = calcSize(sdata, header)
output = ''
if header:
for i in range(len(header)):
output += ((('|' + (' ' * (((widths[i] - len(header[i])) // 2) + 1))) + header[i]) + (' ' * (((widths[i] - len(header[i])) // 2) + 1)))
if ((widths[i] - len(header[i])) % 2):
output += ' '
if percents[i]:
output += (' ' * (percents[i] - header[i].count('%')))
output += '|'
output += '\n'
for i in range(len(widths)):
output += ('+-' + ('-' * ((widths[i] + 1) + percents[i])))
output += '+'
output += '\n'
for j in range(len(sdata)):
d = sdata[j]
a = align[j]
for i in range(len(d)):
if (a[i] == ALIGN_RIGHT):
output += ((('|' + (' ' * ((widths[i] - len(d[i])) + 1))) + d[i]) + ' ')
elif (a[i] == ALIGN_CENTER):
output += ((('|' + (' ' * (((widths[i] - len(d[i])) // 2) + 1))) + d[i]) + (' ' * (((widths[i] - len(d[i])) // 2) + 1)))
if ((widths[i] - len(d[i])) % 2):
output += ' '
else:
output += (('| ' + d[i]) + (' ' * ((widths[i] - len(d[i])) + 1)))
if percents[i]:
output += (' ' * (percents[i] - d[i].count('%')))
output += '|'
output += '\n'
if output_file:
with open(output_file, 'wb') as output_handle:
output_handle.write(output)
else:
print(output, end='')
def makeStrings(data, dictOrder, align):
r = []
a = ([] if (align is None) else None)
for i in data:
c = []
ac = []
if dictOrder:
for k in dictOrder:
c += ([i[k]] if (unicode is type(i[k])) else [(str(i[k]) if (i[k] is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(i[k])) or (float is type(i[k])) or (long is type(i[k]))) else [ALIGN_LEFT])
else:
for k in i:
c += ([k] if (unicode is type(k)) else [(str(k) if (k is not None) else '')])
| if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(k)) or (float is type(k)) or (long is type(k))) else [ALIGN_LEFT])
r += [c]
if (a is not None):
a += [ac]
return (r, (a if (a is not None) else align))
def calcSize(data, header):
widths = range(len(data[0]))
percents = range(len(data[0]))
for i in widths:
widths[i] = 0
percents[i] = 0
if header:
for i in range(len(header)):
r = len(header | [i])
if (r > widths[i]):
widths[i] = r
r = header[i].count('%')
if (r > percents[i]):
percents[i] = r
for d in data:
for i in range(len(d)):
r = len(d[i])
if (r > widths[i]):
widths[i] = r
r = d[i].count('%')
if (r > percents[i]):
percents[i] = r
return (widths, percents) |
# coding=utf-8
import os
import time
from gzip import GzipFile
from StringIO import StringIO
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.http import HttpResponse
from django.core.management import call_command
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import BackupImportForm
from .uploadhandler import TemporaryGzipFileUploadHandler
breadcrumbs = [
['warehouse.skill.views.home', 'главная'],
['warehouse.backup.views.home', 'резервное копирование'],
]
info = [
{
'view': 'warehouse.backup.views.export_gz',
'title': 'Экспорт',
'text': 'Позволяет сохранить данные из системы в файл для последующего восстановления.',
'cls': 'large-4',
},
{
| 'view': 'warehouse.backup.views.import_gz',
'title': 'Импорт',
'text': 'Позволяет восстановить данные из файла. Внимание! Все существующие записи будут безвозвратно утеряны!',
'cls': 'large-4',
},
] |
##### HOME #####
@login_required
def home(request):
return render_to_response('backup/home.html', {'breadcrumbs': breadcrumbs, 'info': info}, RequestContext(request))
##### EXPORT #####
@login_required
def export_gz(request):
filename = 'skill__%s' % time.strftime('%Y%m%d_%H%M%S.gz')
response = HttpResponse(mimetype='application/force-download')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
with GzipFile(fileobj=response, mode='w', filename='skill.json') as gz_stream:
call_command('dumpdata', 'auth', 'skill', stdout=gz_stream, natural=True, indent=2)
return response
##### IMPORT #####
@login_required
@csrf_exempt
def import_gz(request):
# changing suffix to '.gz' for temp file names
request.upload_handlers = [TemporaryGzipFileUploadHandler()]
return _import_gz(request)
@csrf_protect
def _import_gz(request):
if request.method == 'POST':
form = BackupImportForm(request.POST, request.FILES)
if form.is_valid():
message = _process_file(request.FILES['file'])
messages.success(request, message)
return redirect('warehouse.backup.views.home')
else:
form = BackupImportForm()
cur = ['warehouse.backup.views.import_gz', 'импорт']
return render_to_response(
'backup/import.html',
{'form': form, 'breadcrumbs': breadcrumbs + [cur]},
RequestContext(request)
)
def _process_file(f):
file_path = f.temporary_file_path()
if not f.closed:
f.close()
stream = StringIO()
call_command('loaddata', file_path, stdout=stream)
message = stream.getvalue()
stream.close()
os.unlink(file_path)
return message |
#!/usr/bin/env python
import os
from slackclient import SlackClient
BOT_NAME = 'chopbot3000'
s | lack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAM | E:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
Iterator of :class:`~google.api_core.operation.Operation`
resources within the current instance.
"""
database_filter = _DATABASE_METADATA_FILTER.format(self.name)
if filter_:
database_filter = "({0}) AND ({1})".format(filter_, database_filter)
return self._instance.list_database_operations(
filter_=database_filter, page_size=page_size
)
def table(self, table_id):
"""Factory to create a table object within this database.
Note: This method does not create a table in Cloud Spanner, but it can
be used to check if a table exists.
.. code-block:: python
my_table = database.table("my_table")
if my_table.exists():
print("Table with ID 'my_table' exists.")
else:
print("Table with ID 'my_table' does not exist.")
:type table_id: str
:param table_id: The ID of the table.
:rtype: :class:`~google.cloud.spanner_v1.table.Table`
:returns: a table owned by this database.
"""
return Table(table_id, self)
def list_tables(self):
"""List tables within the database.
:type: Iterable
:returns:
Iterable of :class:`~google.cloud.spanner_v1.table.Table`
resources within the current database.
"""
with self.snapshot() as snapshot:
results = snapshot.execute_sql(_LIST_TABLES_QUERY)
for row in results:
yield self.table(row[0])
class BatchCheckout(object):
"""Context manager for using a batch from a database.
Inside the context manager, checks out a session from the database,
creates a batch from it, making the batch available.
Caller must *not* use the batch to perform API requests outside the scope
of the context manager.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type request_options:
:class:`google.cloud.spanner_v1.types.RequestOptions`
:param request_options:
(Optional) Common options for the commit request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.RequestOptions`.
"""
def __init__(self, database, request_options=None):
self._database = database
self._session = self._batch = None
if request_options is None:
self._request_options = RequestOptions()
elif type(request_options) == dict:
self._request_options = RequestOptions(request_options)
else:
self._request_options = request_options
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
batch = self._batch = Batch(session)
if self._request_options.transaction_tag:
batch.transaction_tag = self._request_options.transaction_tag
return batch
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
try:
if exc_type is None:
self._batch.commit(
return_commit_stats=self._database.log_commit_stats,
request_options=self._request_options,
)
finally:
if self._database.log_commit_stats and self._batch.commit_stats:
self._database.logger.info(
"CommitStats: {}".format(self._batch.commit_stats),
extra={"commit_stats": self._batch.commit_stats},
)
self._database._pool.put(self._session)
class SnapshotCheckout(object):
"""Context manager for using a snapshot from a database.
Inside the context manager, checks out a session from the database,
creates a snapshot from it, making the snapshot available.
Caller must *not* use the snapshot to perform API requests outside the
scope of the context manager.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type kw: dict
:param kw:
Passed through to
:class:`~google.cloud.spanner_v1.snapshot.Snapshot` constructor.
"""
def __init__(self, database, **kw):
self._database = database
self._session = None
self._kw = kw
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
return Snapshot(session, **self._kw)
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
self._database._pool.put(self._session)
class BatchSnapshot(object):
"""Wrapper for generating and processing read / query batches.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
"""
def __init__(self, database, read_timestamp=None, exact_staleness=None):
self._database = database
self._session = None
self._snapshot = None
self._read_timestamp = read_timestamp
self._exact_staleness = exact_staleness
@classmethod
def from_dict(cls, database, mapping):
"""Reconstruct an instance from a mapping.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type mapping: mapping
:param mapping: serialized state of the instance
:rtype: :class:`BatchSnapshot`
"""
instance = cls(database)
session = instance._session = database.session()
session._session_id = mapping["session_id"]
snapshot = instance._snapshot = session.snapshot()
snapshot._transaction_id = mapping["transaction_id"]
return instance
def to_dict(self):
"""Return state as a dictionary.
Result can be used to serialize the instance and reconstitute
it later using :meth:`from_dict`.
:rtype: dict
"""
session = self._get_session()
snapshot = self._ | get_snapshot()
return {
"session_id": session._session_id,
"transaction_id": snapshot._transaction_id,
}
def _get_session(self):
"""Create session as needed.
.. note::
Caller is responsible for cleaning up the session after
all partitions have been processed.
"""
if self._session is Non | e:
session = self._session = self._database.session()
session.create()
return self._session
def _get_snapshot(self):
"""Create snapshot if needed."""
if self._snapshot is None:
self._snapshot = self._get_session().snapshot(
read_timestamp=self._read_timestamp,
exact_staleness=self._exact_staleness,
multi_use=True,
)
self._snapshot.begin()
return self._snapshot
def read(self, *args, **kw):
"""Convenience method: perform read operation via snapshot.
See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.read`.
"""
return self._get_snapshot().read(*args, **kw)
def execute_sql(self, *args, **kw):
"""Convenience method: perform query operation via snapshot.
See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.execute_sql`.
"""
return self._get_snapshot().execute_sql(*args, **kw)
def generate_read_batches(
self,
table,
columns,
keyset,
index="",
partition_size_bytes=None,
max_partitions=None,
*,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
):
"""Start a partitioned batch read operatio |
# urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authe | nticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.gethead | ers())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
"""
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
'fs_root': GITHUB_REPO_ROOT,
'render_template': 'mitxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'OPTIONS': modulestore_options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'OPTIONS': modulestore_options
}
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'OPTIONS': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "mitx.db",
}
}
LMS_BASE = "10.129.50.13:8000"
MITX_FEATURES['PREVIEW_LMS_BASE'] = "10.129.50.13:8000"
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': 'git@github.com:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': 'git@github.com:MITx/6002x-fall-2012.git',
'origin': 'git@github.com:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': 'git@github.com:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': 'git@github.com:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': 'git@github.com:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'mitx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'd | ebug_toolbar.panels.logger.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel' | ,
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = True
# disable NPS survey in dev mode
MITX_FEATURES['STUDIO_NPS_SURVEY'] = False
# Enable URL that shows information about the status of variuous services
MITX_FEATURES['ENABLE_SERVICE_STATUS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it and turn on Segment.io
# Note that this is the Studio key. There is a separate key for the LMS.
import os
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
MITX_FEATURES['SEGMENT_IO'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
|
"""
Polygon path.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/02/05 $"
__license__ = 'GPL 3.0'
def getGeometryOutput(xmlElement):
"Get vector3 vertexes from attribute dictionary."
radius = lineation.getRadiusComplex(complex(1.0, 1.0), xmlElement)
sides = evaluate.getSidesMinimumThreeBasedOnPrecisionSides(max(radius.real, radius.imag), xmlElement)
loop = []
start = evaluate.getEvaluatedFloatZero('start', xmlElement)
start = getWrappedFloat(start, 360.0)
extent = evaluate.getEvaluatedFloatDefault(360.0 - start, 'extent', xmlElement)
end = evaluate.getEvaluatedFloatDefault(start + extent, 'end', xmlElement)
end = getWrappedFloat(end, 360.0)
revolutions = evaluate.getEvaluatedFloatOne('revolutions', xmlElement)
if revolutions > 1:
end += 360.0 * (revolutions - 1)
angleTotal = math.radians(start)
extent = end - start
sidesCeiling = int(math.ceil(abs(sides) * extent / 360.0))
sideAngle = math.radians(extent) / sidesCeiling
spiral = lineation.Spiral(0.5 * sideAngle / math.pi, xmlElement)
for side in xrange(sidesCeiling + (extent != 360.0)):
unitPolar = euclidean.getWiddershinsUnitPolar(angleTotal)
vertex = spiral.getSpiralPoint(unitPolar, Vector3(unitPolar.real * radius.real, unitPolar.imag * radius.imag))
angleTotal += sideAngle
loop.append(vertex)
sideLength = sideAngle * lineation.getAverageRadius(radius)
lineation.setClosedAttribute(revolutions, xmlElement)
return lineation.getGeometryOutputByLoop(lineation.SideLoop(loop, sideAngle, sideLength), xmlElement)
def getGeometryOutputByArguments(arguments, xmlElement):
"Get vector3 vertexes from attribute dictionary by arguments."
evaluate.setAttributeDictionaryByArguments(['radius', 'start', 'end', 'revolutions'], arguments, xmlElement)
return getGeometryOutput(xmlElement)
def getWrappedFloat(floatValue, modulo):
"Get wrapped float."
if floatValue >= modulo:
r | eturn modulo
if flo | atValue >= 0:
return floatValue
return floatValue % modulo
def processXMLElement(xmlElement):
"Process the xml element."
lineation.processXMLElementByGeometry(getGeometryOutput(xmlElement), xmlElement)
|
edPoint = np.zeros(2)
rotatedPoint[1] = pivot[1] + np.cos(angle) * (point[1] - pivot[1]) - np.sin(angle) * (point[0] - pivot[0])
rotatedPoint[0] = pivot[0] + np.sin(angle) * (point[1] - pivot[1]) + np.cos(angle) * (point[0] - pivot[0])
return rotatedPoint
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def entry2Int( ax ):
try:
ax = np.mean(np.abs(ax))
except:
pass
return int(ax)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def marginIds( Rxdims, Mw ):
Li = np.zeros(2, int); Ri = Li.copy(); Bi = Li.copy(); Ti = Li.copy()
Li[0]= 0
Li[1]= max( int( np.ceil(Mw[0]*Rxdims[1]-1) ), 1 ) # These can never be -1.
Ri[0]= min( int((1.-Mw[1])*Rxdims[1]+1), Rxdims[1]-1 )
Ri[1]= Rxdims[1]
Bi[0]= | min( int((1.-Mw[2])*Rxdims[0]+1), Rxdims[0]-1 )
Bi[1]= Rxdims[0 | ]
Ti[0]= 0
Ti[1]= max( int( np.ceil(Mw[3]*Rxdims[0]-1) ), 1 ) # These can never be -1.
return Li, Ri, Bi, Ti
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def applyMargins( Rx, Mw, Mr, Mh ):
Rxdims = np.shape(Rx)
if( Mw.count(None) == 0 ):
print(' Zero (or non-zero) margins: L={}, R={}, B={}, T={}'.format(Mw[0],Mw[1],Mw[2],Mw[3]))
L12, R12, B12, T12 = marginIds( Rxdims, Mw )
L1 = L12[0]; L2 = L12[1]
R1 = R12[0]; R2 = R12[1]
B1 = B12[0]; B2 = B12[1]
T1 = T12[0]; T2 = T12[1]
#print('Margin\nL:{},{},R:{},{},T:{},{},B:{},{}'.format(L1,L2,R1,R2,T1,T2,B1,B2))
if( not all( L12 == 0 ) ): Rx[:,L1:L2] = Mh[0]
if( not all( R12 == 0 ) ): Rx[:,R1:R2] = Mh[1]
if( not all( T12 == 0 ) ): Rx[T1:T2,:] = Mh[3]
if( not all( B12 == 0 ) ): Rx[B1:B2,:] = Mh[2]
else:
L1=0; L2=1
R1=Rxdims[1]-1; R2=Rxdims[1]
B1=Rxdims[0]-1; B2=Rxdims[0]
T1=0; T2=1
if( Mr.count(None) == 0 ):
print(' Ramp margins: L={}, R={}, B={}, T={}'.format(Mr[0],Mr[1],Mr[2],Mr[3]))
dL = int(Mr[0]*Rxdims[1]); dR = int(Mr[1]*Rxdims[1])
dB = int(Mr[2]*Rxdims[0]); dT = int(Mr[3]*Rxdims[0])
L11 = max(L2-1,0) ; L22 = L2+dL
R11 = R1-dR ; R22 = min(R1+1, Rxdims[1])
B11 = B1-dB ; B22 = min(B1+1, Rxdims[0])
T11 = max(T2-1,0) ; T22 = T2+dT
#print('Ramp\nL:{},{},R:{},{},T:{},{},B:{},{}'.format(L11,L22,R11,R22,T11,T22,B11,B22))
if( dL != 0 ):
if( (Mw[0] is None) or (Mw[0] ==0.) ):
Rx = applyRamp( Rx, L11, L22, 1, 0, Mh )
else:
Rx = applyRamp( Rx, L11, L22, 1, 0 )
if( dR != 0 ):
if( (Mw[1] is None) or (Mw[1] ==0.) ):
Rx = applyRamp( Rx, R11, R22, 1, 1, Mh )
else:
Rx = applyRamp( Rx, R11, R22, 1, 1 )
if( dB != 0 ):
if( (Mw[2] is None) or (Mw[2] ==0.) ):
Rx = applyRamp( Rx, B11, B22, 0, 1, Mh )
else:
Rx = applyRamp( Rx, B11, B22, 0, 1 )
if( dT != 0 ):
if( (Mw[3] is None) or (Mw[3] ==0.) ):
Rx = applyRamp( Rx, T11, T22, 0, 0, Mh )
else:
Rx = applyRamp( Rx, T11, T22, 0, 0 )
return Rx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def applyRamp( Rz, L1, L2, LeftRight, End, Mh=None ):
dL = (L2-L1)
w = np.arange( L1, L2 ).astype(float)
w -= np.min(w); w /= np.max(w)
w *= np.pi ; w -= (np.pi/2.)
w = np.sin(w)/2. + 0.5
if ( LeftRight and not End ): # Left
if( Mh is None ): Rm = Rz[:,L1]
else: Rm = Mh[0]
#
elif( LeftRight and End ): # Right
if( Mh is None ): Rm = Rz[:,L2]
else: Rm = Mh[1]
elif( not LeftRight and End ): # Bottom
if( Mh is None ): Rm = Rz[L2,:]
else: Rm = Mh[2]
else: # Top
if( Mh is None ): Rm = Rz[L1,:]
else: Rm = Mh[3]
if( End ):
w = (1.-w)
#print(' w = {}, len(w) = {}, len(dL) = {}'.format(w,len(w),dL))
if( LeftRight ):
for i in range(dL):
Rz[:,L1+i] = w[i]*Rz[:,L1+i] + (1.-w[i])*Rm
else: # TopBottom
for i in range(dL):
Rz[L1+i,:] = w[i]*Rz[L1+i,:] + (1.-w[i])*Rm
return Rz
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def filterAndScale(Rxo, Rx, filterInfo, sx=1.0, ix=None, jx=None):
# Check if the indecies are explicitly given.
inxOn = True
if( ix is None or jx is None ):
inxOn = False
if( filterInfo.count(None) == 0):
if( 'user' in filterInfo[0] ):
nI = int(filterInfo[1])
for i in range(nI):
ftmp = raw_input(' Enter <method>, <num> = ').split(',')
if( i == 0 and inxOn ): Rxf = applyFilter(Rx[ix,jx], ftmp)
else: Rxf = applyFilter(Rx, ftmp)
Rx = Rxf.copy()
Rx = None
else:
if( inxOn ): Rxf = applyFilter(Rx[ix,jx], filterInfo)
else: Rxf = applyFilter(Rx, filterInfo)
Rx = None
Rxo += sx*Rxf
else:
if( inxOn ):
Rxo += sx*Rx[ix,jx]
else:
Rxo += sx*Rx
return Rxo
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def applyFilter(Rx, filterInfo ):
import scipy.ndimage as sn # contains the filters
if( 'gauss' in filterInfo[0] ):
try:
Nf = float(filterInfo[1])
except:
print(' Failed to obtain <sigma> for the Gaussian filter. Exiting.')
sys.exit(1)
else:
try:
Nf = int(filterInfo[1])
except:
print(' Failed to obtain <size> for the filters. Exiting.')
sys.exit(1)
if( 'median' in filterInfo[0] ):
print(' Median {0}x{0} filter applied. '.format(Nf))
Rf = sn.median_filter(Rx, size=Nf)
elif( 'perc' in filterInfo[0] ):
print(' Percentile 60 {0}x{0} filter applied. '.format(Nf))
Rf = sn.percentile_filter(Rx, 60, size=Nf)
elif( 'rank' in filterInfo[0] ):
print(' Rank 5 {0}x{0} filter applied. '.format(Nf))
Rf = sn.rank_filter(Rx, 5, size=Nf)
elif( 'gauss' in filterInfo[0] ):
print(' Gaussian sigma={} filter applied. '.format(Nf))
Rf = sn.gaussian_filter(Rx, sigma=Nf)
elif( 'local' in filterInfo[0] ):
print(' Local mean {0}x{0} filter applied. '.format(Nf))
Rf = sn.uniform_filter(Rx, size=Nf)
elif( 'max' in filterInfo[0] ):
print('Max {0}x{0} filter applied. '.format(Nf))
Rf = sn.maximum_filter(Rx, size=Nf)
else:
print(' No filter applied. ')
Rf = Rx
return Rf
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def labelRaster(R, maskId=None):
import scipy.ndimage.measurements as snms
Rm = np.zeros( R.shape, type(R[0,0]) )
if( maskId is not None ):
mIds = list()
if( isinstance( maskId, list) ):
mIds.extend(maskId)
elif( isinstance( maskId, int ) ):
mIds.append(maskId)
else:
sys.exit(' Error in labelRaster: maskId is not a list or int. It is {}'.format(type(maskId)))
idx = np.zeros( R.shape , bool )
for im in mIds:
idx = np.maximum( idx , (R == im ) )
Rm[idx] = R[idx] # Set desired mask values
Rl, shapeCount = snms.label(Rm) # this might be slow for unfiltered data
Rm = None
print(' Found {} shapes from the data.'.format(shapeCount))
return Rl, shapeCount
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def splitLabels(R, shapeCount, labelsize):
import scipy.ndimage as sn
for i in range(1,shapeCount+1):
nlabels = np.count_nonzero(R==i)
if (nlabels > labelsize):
print(' Label no. '+str(i)+' will be split. Original size: '+str(nlabels)+' px.')
nleft = nlabels
while (nleft > labelsize):
L = R==i
LU = np.zeros(L.shape,dtype=bool)
Lnz = np.nonzero(L)
LU[(Lnz[0][0],Lnz[1][0])] = True
nnonzero = np.count_nonzero(LU)
while ( nnonzero < labelsize):
VLU = LU
vnnonzero = nnonzero
LU = sn.binary_dilation(VLU)*L
if (VLU==LU).all():
break
nnonzero = np.count_nonzero(LU)
shapeCount=shapeCount+1
R[VLU]=shapeCount
print(' Created new label no. '+str(shapeCount)+' at size '+str(vnnonzero)+' px.')
nleft = nleft - vnnonzero
print(' Label no. '+str(i)+' is now at size '+str(nleft)+' px.')
return R, shapeCount
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def openTifAsNumpy(tifFile):
from PIL import Image
Image.MAX_IMAGE_PIXELS = 1800000000
im = Image.open(tifFi |
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
from utils import CDNEngine
from utils import request
if sys.version_info >= (3, 0):
import subprocess as commands
import urllib.parse as urlparse
else:
import commands
| import urlparse
def detect(hostname):
"""
P | erforms CDN detection thanks to information disclosure from server error.
Parameters
----------
hostname : str
Hostname to assess
"""
print('[+] Error server detection\n')
hostname = urlparse.urlparse(hostname).netloc
regexp = re.compile('\\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\\b')
out = commands.getoutput("host " + hostname)
addresses = regexp.finditer(out)
for addr in addresses:
res = request.do('http://' + addr.group())
if res is not None and res.status_code == 500:
CDNEngine.find(res.text.lower())
|
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
example_user_input_material
---------------------------
Shows user how to input a mineral of his/her choice without usint the library and which physic | al values
need to be input for BurnMan to calculate :math:`V_P, V_\Phi, V_S` and density at depth.
*Specifically uses:*
* :class:`burnman.mineral.Mineral`
*Demonstr | ates:*
* how to create your own minerals
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
# A note about units: all the material parameters are expected to be in plain SI units.
# This means that the elastic moduli should be in Pascals and NOT Gigapascals,
# and the Debye temperature should be in K not C. Additionally, the reference volume
# should be in m^3/(mol molecule) and not in unit cell volume and 'n' should be
# the number of atoms per molecule. Frequently in the literature the reference volume
# is given in Angstrom^3 per unit cell. To convert this to m^3/(mol of molecule)
#you should multiply by 10^(-30) * N_a / Z, where N_a is Avogadro's number and Z is the number of
# atoms per unit cell. You can look up Z in many places, including www.mindat.org
if __name__ == "__main__":
### input variables ###
#######################
#INPUT for method
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))"""
method = 'slb3'
#in form name_of_mineral (burnman.mineral <- creates list with parameters)
class own_material (burnman.Mineral):
def __init__(self):
self.params = {
'name': 'myownmineral',
'equation_of_state': method,
'V_0': 10.844e-6, #Molar volume [m^3/(mole molecules)]
#at room pressure/temperature
'K_0': 135.19e9, #Reference bulk modulus [Pa]
#at room pressure/temperature
'Kprime_0': 6.04, #pressure derivative of bulk modulus
'G_0': 175.0e9, #reference shear modulus
#at room pressure/temperature
'Gprime_0': 1.7, #pressure derivative of shear modulus
'molar_mass': .055845, #molar mass in units of [kg/mol]
'n': 1, #number of atoms per formula unit
'Debye_0': 998.85, #Debye temperature for material.
#See Stixrude & Lithgow-Bertelloni, 2005 for values
'grueneisen_0': 1.368, #Gruneisen parameter for material.
#See Stixrude & Lithgow-Bertelloni, 2005 for values
'q_0': 0.917, #isotropic strain derivative of gruneisen
#parameter. Values in Stixrude & Lithgow-Bertelloni, 2005
'eta_s_0': 3.0 #full strain derivative of gruneisen parameter
#parameter. Values in Stixrude & Lithgow-Bertelloni, 2005
}
burnman.Mineral.__init__(self)
rock = own_material()
#seismic model for comparison: (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM() # pick from .prem() .slow() .fast()
number_of_points = 20 #set on how many depth slices the computations should be done
depths = np.linspace(700e3,2800e3, number_of_points)
#depths = seismic_model.internal_depth_list()
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
temperature = burnman.geotherm.brown_shankland(seis_p)
# The next line is not required here, because the method is set automatically by defining 'equation_of_state' in mineral.params. This shows an alternative way to set the method later, or reset the method to a different one.
rock.set_method(method)
print "Calculations are done for:"
rock.debug_print()
mat_rho, mat_vp, mat_vs, mat_vphi, mat_K, mat_G = \
burnman.velocities_from_rock(rock, seis_p, temperature, \
burnman.averaging_schemes.VoigtReussHill())
[vs_err, vphi_err, rho_err]= \
burnman.compare_chifactor([mat_vs,mat_vphi,mat_rho], [seis_vs,seis_vphi,seis_rho])
print vs_err, vphi_err, rho_err
|
import sys, os, json
from pathlib import Path
from acmacs_py.mapi_utils import MapiSettings
# ======================================================================
class CladeData:
sSubtypeToCladePrefix = {"h1pdm": "clades-A(H1N1)2009pdm", "h3": "clades-A(H3N2)", "bvic": "clades-B/Vic", "byam": "clades-B/Yam"}
def __init__(self):
self.mapi_settings = MapiSettings("clades.mapi")
def entry_names_for_subtype(self, | subtype):
subtype_prefix = self.sSubtypeToCladePrefix[subtype]
names = sorted(name for name in self.mapi_settings.names() if name.startswith(subtype_prefix))
return names
def chart_draw_modify(self, *args, **kw):
self.mapi_settings.chart_draw_modify(*args, **kw)
def chart_draw_reset(self, *args, **kw):
self.mapi_settings.chart_draw_reset(*args, **kw)
# ======================================================================
def load(app):
app["clade_dat | a"] = CladeData()
# ======================================================================
|
d})
return newword
class Vectorizer(object):
def __init__(self):
self.fit_done = False
def fit(self, input_text, input_scores, max_features=100, min_features=3):
self.spell_corrector = SpellCorrector()
self.stemmer = PorterStemmer()
new_text = self.batch_generate_new_text(input_text)
input_text = [input_text[i] + new_text[i] for i in xrange(0,len(input_text))]
self.vectorizer1 = CountVectorizer(ngram_range=(1,2), min_df = min_features/len(input_text), max_df=.4, stop_words="english")
self.vectorizer1.fit(input_text)
self.vocab = self.get_vocab(input_text, input_scores, max_features)
self.vectorizer = CountVectorizer(ngram_range=(1,2), vocabulary=self.vocab)
self.fit_done = True
self.input_text = input_text
def spell_correct_text(self, text):
text = text.lower()
split = text.split(" ")
corrected = [self.spell_corrector.correct(w) for w in split]
return corrected
def batch_apply(self, all_tokens, applied_func):
for key in all_tokens:
cor = applied_func(all_tokens[key])
all_tokens[key] = cor
return all_tokens
def batch_generate_new_text(self, text):
text = [re.sub("[^A-Za-z0-9]", " ", t.lower()) for t in text]
text = [re.sub("\s+", " ", t) for t in text]
t_tokens = [t.split(" ") for t in text]
all_token_list = list(set(chain.from_iterable(t_tokens)))
all_token_dict = {}
for t in all_token_list:
all_token_dict.update({t : t})
| all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
all_token_dict = self.batch_apply | (all_token_dict, self.stemmer.stem)
for i in xrange(0,len(t_tokens)):
for j in xrange(0,len(t_tokens[i])):
t_tokens[i][j] = all_token_dict.get(t_tokens[i][j], t_tokens[i][j])
new_text = [" ".join(t) for t in t_tokens]
return new_text
def generate_new_text(self, text):
no_punctuation = re.sub("[^A-Za-z0-9]", " ", text.lower())
no_punctuation = re.sub("\s+", " ", no_punctuation)
corrected = self.spell_correct_text(no_punctuation)
corrected = [self.stemmer.stem(w) for w in corrected]
new = " ".join(corrected)
return new
def get_vocab(self, input_text, input_scores, max_features):
train_mat = self.vectorizer1.transform(input_text)
input_score_med = np.median(input_scores)
new_scores = [0 if i<=input_score_med else 1 for i in input_scores]
ind_max_features = math.floor(max_features/max(input_scores))
all_vocab = []
all_cols = [np.asarray(train_mat.getcol(i).todense().transpose())[0] for i in xrange(0,train_mat.shape[1])]
for s in xrange(0,max(input_scores)):
sel_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]==s]
out_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]!=s]
pvalues = []
for i in xrange(0,len(all_cols)):
lcol = all_cols[i]
good_lcol = lcol[sel_inds]
bad_lcol = lcol[out_inds]
good_lcol_present = len(good_lcol[good_lcol > 0])
good_lcol_missing = len(good_lcol[good_lcol == 0])
bad_lcol_present = len(bad_lcol[bad_lcol > 0])
bad_lcol_missing = len(bad_lcol[bad_lcol == 0])
pval = pvalue(good_lcol_present, bad_lcol_present, good_lcol_missing, bad_lcol_missing)
pvalues.append(pval.two_tail)
col_inds = list(xrange(0,train_mat.shape[1]))
p_frame = pd.DataFrame(np.array([col_inds, pvalues]).transpose(), columns=["inds", "pvalues"])
p_frame = p_frame.sort(['pvalues'], ascending=True)
getVar = lambda searchList, ind: [searchList[int(i)] for i in ind]
vocab = getVar(self.vectorizer1.get_feature_names(), p_frame['inds'][:ind_max_features+2])
all_vocab.append(vocab)
return list(set(list(chain.from_iterable(all_vocab))))
def batch_get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
new_text = self.batch_generate_new_text(text)
text = [text[i] + new_text[i] for i in xrange(0,len(text))]
return (self.vectorizer.transform(text).todense())
def get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
itext=text
if isinstance(text, list):
itext = text[0]
new_text = self.generate_new_text(itext)
if isinstance(text, list):
text = [text[0] + new_text]
else:
text = [text + new_text]
return (self.vectorizer.transform(text).todense())
class FeatureExtractor(Task):
data = Complex()
row_data = List()
speaker_code_dict = Dict()
speaker_codes = List()
vectorizer = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
help_text = "Cleanup simpsons scripts."
args = {'scriptfile' : os.path.abspath(os.path.join(settings.DATA_PATH, "script_tasks"))}
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
scriptfile = kwargs.get('scriptfile')
script_data = pickle.load(open(scriptfile))
script = script_data.tasks[2].voice_lines.value
speakers = []
lines = []
for s in script:
for (i,l) in enumerate(s):
if i>0:
previous_line = s[i-1]['line']
previous_speaker = s[i-1]['speaker']
else:
previous_line = ""
previous_speaker = ""
if i>1:
two_back_speaker = s[i-2]['speaker']
else:
two_back_speaker = ""
if len(s)>i+1:
next_line = s[i+1]['line']
else:
next_line = ""
current_line = s[i]['line']
current_speaker = s[i]['speaker']
lines.append(current_line)
speakers.append(current_speaker)
row_data = {
'previous_line' : previous_line,
'previous_speaker' : previous_speaker,
'next_line' : next_line,
'current_line' : current_line,
'current_speaker' : current_speaker,
'two_back_speaker' : two_back_speaker
}
self.row_data.append(row_data)
self.speaker_code_dict = {k:i for (i,k) in enumerate(list(set(speakers)))}
self.speaker_codes = [self.speaker_code_dict[s] for s in speakers]
self.max_features = math.floor(MAX_FEATURES)/3
self.vectorizer = Vectorizer()
self.vectorizer.fit(lines, self.speaker_codes, self.max_features)
prev_features = self.vectorizer.batch_get_features([rd['previous_line'] for rd in self.row_data])
cur_features = self.vectorizer.batch_get_features([rd['current_line'] for rd in self.row_data])
next_features = self.vectorizer.batch_get_features([rd['next_line'] for rd in self.row_data])
self.speaker_code_dict.update({'' : -1})
meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], [self.speaker_code_dict[s['previous_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "previous_speaker", "current_speaker"])
#meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "current_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataF |
# Encoding: UTF-8
"""Czech conjugation
"""
from spline.i18n.formatter import Formatter, BaseWord, parse_bool
class Word(BaseWord):
| @classmethod
def guess_type(cls, word, **props):
if word.endswith(u'í'):
return SoftAdjective
elif word.endswith(u'ý'):
return HardAdjective
else:
return Word
class Adjective(Word):
def _ | _init__(self, word):
self.root = word
_interesting_categories = 'gender number case'.split()
gender = 'm'
case = 1
number = 'sg'
def inflect(self, **props):
gender = props.get('gender', self.gender)
case = int(props.get('case', self.case))
number = props.get('number', self.number)
case_no = (case - 1) + (7 if (number == 'pl') else 0)
if gender == 'm':
if parse_bool(props.get('animate', True)):
return self.root + self.endings_ma[case_no]
else:
return self.root + self.endings_mi[case_no]
elif gender == 'f':
return self.root + self.endings_f[case_no]
else:
return self.root + self.endings_n[case_no]
class SoftAdjective(Adjective):
def __init__(self, word):
if word.endswith(u'í'):
self.root = word[:-1]
else:
self.root = word
endings_ma = u'í,ího,ímu,ího,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',')
endings_mi = u'í,ího,ímu,í,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',')
endings_f = u'í,í,í,í,í,í,í,í,ích,ím,í,í,ích,ími'.split(',')
endings_n = u'í,ího,ímu,í,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',')
class HardAdjective(Adjective):
def __init__(self, word):
if any(word.endswith(x) for x in u'ýáé'):
self.root = word[:-1]
else:
self.root = word
endings_ma = u'ý,ého,ému,ého,ý,ém,ým,í,ých,ým,é,í,ých,ými'.split(',')
endings_mi = u'ý,ého,ému,ý,ý,ém,ým,é,ých,ým,é,é,ých,ými'.split(',')
endings_f = u'á,é,é,ou,á,é,ou,é,ých,ým,é,é,ých,ými'.split(',')
endings_n = u'é,ého,ému,é,é,ém,ým,á,ých,ým,á,á,ých,ými'.split(',')
formatter = Formatter('cs', Word)
class Template(unicode):
def format(self, *args, **kwargs):
return formatter.format(self, *args, **kwargs)
|
pickler.persistent_id = self.persistent_id
pickler.dump(val)
val = file.getvalue()
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could
# import zlib and this string is longer than our min threshold.
if min_compress_len and _supports_compress and lv > min_compress_len:
comp_val = compress(val)
# Only retain the result if the compression result is smaller
# than the original.
if len(comp_val) < lv:
flags |= Client._FLAG_COMPRESSED
val = comp_val
# silently do not store if value length exceeds maximum
if self.server_max_value_length != 0 and \
len(val) > self.server_max_value_length: return(0)
return (flags, len(val), val)
def _set(self, cmd, key, val, time, min_compress_len = 0):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
def _unsafe_set():
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info: return(0)
if cmd == 'cas':
if key not in self.cas_ids:
return self._set('set', key, val, time, min_compress_len)
fullcmd = "%s %s %d %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1],
self.cas_ids[key], store_info[2])
else:
fullcmd = "%s %s %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
server.send_cmd(fullcmd)
return(server.expect("STORED") == "STORED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
try:
return _unsafe_set()
except _ConnectionDeadError:
# retry once
try:
server._get_socket()
return _unsafe_set()
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return 0
def _get(self, cmd, key):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
def _unsafe_get():
self._statlog(cmd)
try:
server.send_cmd("%s %s" % (cmd, key))
rkey = flags = rlen = cas_id = None
if cmd == 'gets':
rkey, flags, rlen, cas_id, = self._expect_cas_value(server)
if rkey and self.cache_cas:
self.cas_ids[rkey] = cas_id
else:
rkey, flags, rlen, = self._expectvalue(server)
if not rkey:
return None
try:
value = self._recv_value(server, flags, rlen)
finally:
| server.expect("END")
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
return value
try:
| return _unsafe_get()
except _ConnectionDeadError:
# retry once
try:
if server.connect():
return _unsafe_get()
return None
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return None
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
return self._get('get', key)
def gets(self, key):
'''Retrieves a key from the memcache. Used in conjunction with 'cas'.
@return: The value or None.
'''
return self._get('gets', key)
def get_multi(self, keys, key_prefix=''):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
>>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
>>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
1
>>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{get} before sending
the next one.
See also L{set_multi}.
@param keys: An array of keys.
@param key_prefix: A string to prefix each key when we communicate with memcache.
Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
@return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
'''
self._statlog('get_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.iterkeys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
line = server.readline()
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return retvals
def _expect_cas_value(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len, cas_id = line.split()
return (rkey, int(flags), int(len), int(cas_id))
else:
return (None, None, None, None)
def _expectvalue(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Erro |
#!/usr/bin/env python
'''
Copyright (C) 2011 Karlisson Bezerra <contact@hacktoon.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import inkex
import simplestyle
|
class Canvas:
"""Canvas API helper class"""
def __init__(self, parent, width, height, context = "ctx"):
self.obj = context
self.code = [] #stores the code
self.style = {}
self.styleCache = {} #stor | es the previous style applied
self.parent = parent
self.width = width
self.height = height
def write(self, text):
self.code.append("\t" + text.replace("ctx", self.obj) + "\n")
def output(self):
from textwrap import dedent
html = """
<!DOCTYPE html>
<html>
<head>
<title>Inkscape Output</title>
</head>
<body>
<canvas id='canvas' width='%d' height='%d'></canvas>
<script>
var %s = document.getElementById("canvas").getContext("2d");
%s
</script>
</body>
</html>
"""
return dedent(html) % (self.width, self.height, self.obj, "".join(self.code))
def equalStyle(self, style, key):
"""Checks if the last style used is the same or there's no style yet"""
if key in self.styleCache:
return True
if key not in style:
return True
return style[key] == self.styleCache[key]
def beginPath(self):
self.write("ctx.beginPath();")
def createLinearGradient(self, href, x1, y1, x2, y2):
data = (href, x1, y1, x2, y2)
self.write("var %s = \
ctx.createLinearGradient(%f,%f,%f,%f);" % data)
def createRadialGradient(self, href, cx1, cy1, rx, cx2, cy2, ry):
data = (href, cx1, cy1, rx, cx2, cy2, ry)
self.write("var %s = ctx.createRadialGradient\
(%f,%f,%f,%f,%f,%f);" % data)
def addColorStop(self, href, pos, color):
self.write("%s.addColorStop(%f, %s);" % (href, pos, color))
def getColor(self, rgb, a):
r, g, b = simplestyle.parseColor(rgb)
a = float(a)
if a < 1:
return "'rgba(%d, %d, %d, %.1f)'" % (r, g, b, a)
else:
return "'rgb(%d, %d, %d)'" % (r, g, b)
def setGradient(self, href):
"""
for stop in gstops:
style = simplestyle.parseStyle(stop.get("style"))
stop_color = style["stop-color"]
opacity = style["stop-opacity"]
color = self.getColor(stop_color, opacity)
pos = float(stop.get("offset"))
self.addColorStop(href, pos, color)
"""
return None #href
def setOpacity(self, value):
self.write("ctx.globalAlpha = %.1f;" % float(value))
def setFill(self, value):
try:
alpha = self.style["fill-opacity"]
except:
alpha = 1
if not value.startswith("url("):
fill = self.getColor(value, alpha)
self.write("ctx.fillStyle = %s;" % fill)
def setStroke(self, value):
try:
alpha = self.style["stroke-opacity"]
except:
alpha = 1
self.write("ctx.strokeStyle = %s;" % self.getColor(value, alpha))
def setStrokeWidth(self, value):
self.write("ctx.lineWidth = %f;" % self.parent.unittouu(value))
def setStrokeLinecap(self, value):
self.write("ctx.lineCap = '%s';" % value)
def setStrokeLinejoin(self, value):
self.write("ctx.lineJoin = '%s';" % value)
def setStrokeMiterlimit(self, value):
self.write("ctx.miterLimit = %s;" % value)
def setFont(self, value):
self.write("ctx.font = \"%s\";" % value)
def moveTo(self, x, y):
self.write("ctx.moveTo(%f, %f);" % (x, y))
def lineTo(self, x, y):
self.write("ctx.lineTo(%f, %f);" % (x, y))
def quadraticCurveTo(self, cpx, cpy, x, y):
data = (cpx, cpy, x, y)
self.write("ctx.quadraticCurveTo(%f, %f, %f, %f);" % data)
def bezierCurveTo(self, x1, y1, x2, y2, x, y):
data = (x1, y1, x2, y2, x, y)
self.write("ctx.bezierCurveTo(%f, %f, %f, %f, %f, %f);" % data)
def rect(self, x, y, w, h, rx = 0, ry = 0):
if rx or ry:
#rounded rectangle, starts top-left anticlockwise
self.moveTo(x, y + ry)
self.lineTo(x, y+h-ry)
self.quadraticCurveTo(x, y+h, x+rx, y+h)
self.lineTo(x+w-rx, y+h)
self.quadraticCurveTo(x+w, y+h, x+w, y+h-ry)
self.lineTo(x+w, y+ry)
self.quadraticCurveTo(x+w, y, x+w-rx, y)
self.lineTo(x+rx, y)
self.quadraticCurveTo(x, y, x, y+ry)
else:
self.write("ctx.rect(%f, %f, %f, %f);" % (x, y, w, h))
def arc(self, x, y, r, a1, a2, flag):
data = (x, y, r, a1, a2, flag)
self.write("ctx.arc(%f, %f, %f, %f, %.8f, %d);" % data)
def fillText(self, text, x, y):
self.write("ctx.fillText(\"%s\", %f, %f);" % (text, x, y))
def translate(self, cx, cy):
self.write("ctx.translate(%f, %f);" % (cx, cy))
def rotate(self, angle):
self.write("ctx.rotate(%f);" % angle)
def scale(self, rx, ry):
self.write("ctx.scale(%f, %f);" % (rx, ry))
def transform(self, m11, m12, m21, m22, dx, dy):
data = (m11, m12, m21, m22, dx, dy)
self.write("ctx.transform(%f, %f, %f, %f, %f, %f);" % data)
def save(self):
self.write("ctx.save();")
def restore(self):
self.write("ctx.restore();")
def closePath(self):
if "fill" in self.style and self.style["fill"] != "none":
self.write("ctx.fill();")
if "stroke" in self.style and self.style["stroke"] != "none":
self.write("ctx.stroke();")
#self.write("%s.closePath();" % self.obj)
|
from codecs import open
from os import path
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import numpy
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Requirements
install_requires=['cython>=0.24.1',
'numpy>=1.6.1',
'scipy>=0.16',
'matplotlib>=1.5.1',
'scikit-learn>=0.17.1',
'nibabel>=2.0.2',
'nil | earn>=0.2.4',
'GPy>=1.0.7']
setup(
name='connectopic_mapping',
version='0.3.0',
description='Connectopic mapping',
long_description=long_description,
author='Michele Damian',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
| 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='neuroscience connectopic mapping research',
packages=['connectopic_mapping'],
install_requires=install_requires,
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("connectopic_mapping.haak", ["connectopic_mapping/haak.pyx"], include_dirs=[numpy.get_include()])],
)
|
ons(self):
return {}
req = Dummy()
req.uri = '/somepath/'
request = ModPythonRequest(req)
request._get = {u'get-key': u'get-value'}
request._post = {u'post-key': u'post-value'}
request._cookies = {u'post-key': u'post-value'}
request._meta = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<ModPythonRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<ModPythonRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid:key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT')
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A | compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertTrue('; httponly' in str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(), 'te')
# Reading | again returns nothing.
self.assertEqual(stream.read(), '')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(5), 'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read sequentially from a stream
stream = LimitedStream(StringIO('12345678'), 8)
self.assertEqual(stream.read(5), '12345')
self.assertEqual(stream.read(5), '678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read lines from a stream
stream = LimitedStream(StringIO('1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), '1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), '56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), '78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), 'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), 'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), '\n')
# Read everything else.
self.assertEqual(stream.readline(), 'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), '1234\n')
self.assertEqual(stream.readline(3), 'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), 'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), '')
# Same test, but with read, not readline.
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.read(6), '1234\na')
self.assertEqual(stream.read(2), 'bc')
self.assertEqual(stream.read(2), 'd')
self.assertEqual(stream.read(2), '')
self.assertEqual(stream.read(), '')
def test_stream(self):
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.read(), 'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or raw_post_data.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertEqual(request.raw_post_data, 'name=value')
self.assertEqual(request.read(), 'name=value')
def test_value_after_read(self):
"""
Construction of POST or raw_post_data is not allowed after reading
from request.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.read(2), 'na')
self.assertRaises(Exception, lambda: request.raw_post_data)
self.assertEqual(request.POST, {})
def test_raw_post_data_after_POST_multipart(self):
"""
Reading raw_post_data after parsing multipart is not allowed
"""
# Because multipart is used for large amounts fo data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting raw_post_data = '' either.
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertRaises(Exception, lambda: request.raw_post_data)
def test_POST_multipart_with_content_length_zero( |
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from anitya.lib.backends import (
BaseBackend, get_versions_by_regex_for_text, REGEX)
from anitya.lib.exceptions import AnityaPluginException
import six
DEFAULT_REGEX = 'href="([0-9][0-9.]*)/"'
class FolderBackend(BaseBackend):
''' The custom class for project having a special hosting.
This backend allows to specify a version_url and a regex that will
be used to retrieve the version information.
'''
name = 'folder'
examples = [
'http://ftp.gnu.org/pub/gnu/gnash/',
'http://subsurface.hohndel.org/downloads/',
]
@classmethod
def get_version(cls, project):
''' Method called to retrieve the latest version of the projects
provided, project that relies on the backend of this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: the latest version found upstream
:return type: str
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the version cannot be retrieved correctly
'''
return cls.get_ordered_versions(project)[-1]
@classmethod
def get_versions(cls, project):
''' Method called to retrieve all the versions (that can be found)
of the projects provided, project that relies on the backend of
this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: a list of all the possible releases found
:return type: list
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the versions cannot be retrieved correctly
'''
url = project.version_url
try:
req = cls.call_url(url, insecure=project.insecure)
except Exception as err:
raise AnityaPluginException(
'Could not call : "%s" of "%s", with error: %s' % (
url, project.name, str(err)))
versions = None
| if not isinstance(req, six.string_types):
req = req.text
try:
regex = REG | EX % {'name': project.name.replace('+', '\+')}
versions = get_versions_by_regex_for_text(
req, url, regex, project)
except AnityaPluginException:
versions = get_versions_by_regex_for_text(
req, url, DEFAULT_REGEX, project)
return versions
|
#!/usr/bin/env python3
import re
from enum import Enum
diags = []
with open('input.txt', 'r') as f:
diags = f.read().splitlines()
#--- challenge 1
gamma = ""
for i in range(0, len(diags[0])):
zeros = len([x for x in diags if x[i] == "0"])
ones = len([x for x in diags | if x[i] == "1"])
gamma += "0" if zeros > ones else "1"
gamma = int(gamma, 2)
epsilon = gamma ^ 0b111111111111
print("Solution to challenge 1: {}".format(gamma * ep | silon))
#--- challenge 2
class Rating(Enum):
OXYGEN = 0
CO2 = 1
def get_val(diags, rating):
for i in range(0, len(diags[0])):
zeros = len([x for x in diags if x[i] == "0"])
ones = len(diags) - zeros
if rating == Rating.OXYGEN:
check_val = "0" if zeros > ones else "1"
else:
check_val = "0" if zeros <= ones else "1"
diags = [x for x in diags if x[i] != check_val]
if len(diags) == 1:
return int(diags[0], 2)
oxygen = get_val(diags, Rating.OXYGEN)
co2 = get_val(diags, Rating.CO2)
print("Solution to challenge 2: {}".format(oxygen * co2))
|
__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class Publication(BaseObject):
pass
class Issue(BaseObject):
pass
class Location(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(getattr(self, 'issue_id', None)), repr(str(self._name.name)))
def _get_name(self):
return self._name
def _set_name(self, name):
session = create_session()
s = session.query(LocationName).filter(LocationName.name==name).first()
session.expunge_all()
if s is not None:
self._name = s
return
found = False
for i in session.new:
if isinstance(i, LocationName) and i.name == name:
self._name = i
found = True
break
if found == False:
self._name = LocationName(name=name)
name = property(_get_name, _set_name)
class LocationName(BaseObject):
def __repr__(self):
return "%s()" % (self.__class__.__name__)
class PageSize(BaseObject):
def __repr__(self):
return "%s(%sx%s, %s)" % (self.__class__.__name__, self.width, self.height, self.name)
class Magazine(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, repr(self.location), repr(self.size))
class Page(BaseObject):
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self.page_no))
class MagazinePage(Page):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(self.page_no), repr(self.magazine))
class ClassifiedPage(MagazinePage):
pass
class MagazineTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global publication_table, issue_table, location_table, location_name_table, magazine_table, \
page_table, magazine_page_table, classified_page_table, page_size_table
publication_table = Table('publication', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
issue_table = Table('issue', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('publication_id', Integer, ForeignKey('publication.id')),
Column('issue', Integer),
)
location_table = Table('location', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('issue_id', Integer, ForeignKey('issue.id')),
Column('ref', CHAR(3), default=''),
Column('location_name_id', Integer, ForeignKey('location_name.id')),
)
location_name_table = Table('location_name', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
magazine_table = Table('magazine', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('location_id', Integer, ForeignKey('location.id')),
Column('page_size_id', Integer, ForeignKey('page_size.id')),
)
page_table = Table('page', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('page_no', Integer),
Column('type', CHAR(1), default='p'),
)
magazine_page_table = Table('magazine_page', metadata,
Column('page_id', Integer, ForeignKey('page.id'), primary_key=True),
Column('magazine_id', Integer, ForeignKey('magazine.id')),
Column('orders', Text, default=''),
)
classified_page_table = Table('classified_page', metadata,
Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True),
Column('titles', String(45), default=''),
)
page_size_table = Table('page_size', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('width', Integer),
Column('height', Integer),
Column('name', String(45), default=''),
)
def _generate_round_trip_test(use_unions=False, use_joins=False):
def test_roundtrip(self):
publication_mapper = mapper(Publication, publication_table)
issue_mapper = mapper(Issue, issue_table, properties = {
'publication': relationship(Publication, backref=backref('issues', cascade="all, delete-orphan")),
})
location_name_mapper = mapper(LocationName, location_name_table)
location_mapper = mapper(Location, location_table, properties = {
'issue': relationship(Issue, backref=backref('locations', lazy='joined', cascade="all, delete-orphan")),
'_name': relationship(LocationName),
})
page_size_mapper = mapper(PageSize, page_size_table)
magazine_mapper = mapper(Magazine, magazine_table, properties = {
'location': relationship(Location, backref=backref('magazine', uselist=False)),
'size': relationship(PageSize),
})
if use_unions:
page_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
'p': page_table.select(page_table.c.type=='p'),
}, None, 'page_join')
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_join.c.type, polymorphic_identity='p')
elif use_joins:
page_join = page_table.outerjoin(magazine_page_table).outerjoin(classified_page_table)
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_table.c.type, polymorphic_identity='p')
else:
page_mapper = mapper(Page, page_table, polymorphic_on=page_table.c.type, polymorphic_identity='p')
if use_unions:
magazine_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
}, None, 'page_join')
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=magazine_join.c.page_no))
})
elif use_joins:
magazine_join = page_table.join(magazine_page_table).outerjoin(classified_page_table)
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
else:
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
classified_page_mapper = mapper(ClassifiedPage,
classified_page_table,
inherits=magazi | ne_page_mapper,
polymorphic_identity='c',
primary_key=[page_table.c.id])
session = create_session()
pub = Publication(name='Test')
issu | e = Issue(issue=46,publication=pub)
location = Location(ref='ABC',name='London',issue=issue)
page_size = PageSize(name='A4',width=210,height=297)
magazine = Magazine(location=location,size=page_size)
page = ClassifiedPage(magazine=magazine,page_no=1)
page2 = MagazinePage(magazine=magazine,page_no=2)
page3 = |
import os
import sys
import signal
import subprocess
import tempfile
import curses
import visidata
visidata.vd.tstp_signal = None
class SuspendCurses:
'Context manager to leave windowed mode on enter and restore it on exit.'
def __enter__(self):
curses.endwin()
if visidata.vd.tstp_signal:
signal.signal(signal.SIGTSTP, visidata.vd.tstp_signal)
def __exit__(self, exc_type, exc_val, tb):
curses.reset_prog_mode()
visidata.vd.scrFull.refresh()
curses.doupdate()
@visidata.VisiData.api
def launchEditor(vd, *args):
'Launch $EDITOR with *args* as arguments.'
editor = os.environ.get('EDITOR') or vd.fail('$EDITOR not set')
args = editor.split() + list(args)
with SuspendCurses():
return subprocess.call(args)
@visidata.VisiData.api
def launchBrowser(vd, *args):
'Launch $BROWSER with *args* as arguments.'
browser = os.environ.get('BROWSER') or vd.fail('(no $BROWSER) for %s' % args[0])
args = [browser] + list(args)
subprocess.call(args)
@visidata.VisiData.api
def launchExternalEditor(vd, v, linenum=0):
'Launch $EDITOR to edit string *v* starting on line *linenum*.'
import tempfile
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, 'w') as fp:
fp.write(v)
return launchExternalEditorPath(visidata.Path(temp.name), linenum)
def launchExternalEditorPath(path, linenum=0):
'Launch $EDITOR to edit *path* starting on line *linenum*.'
if linenum:
visidata.vd.launchEditor(path, '+%s' % linenum)
else:
visidata.vd.launchEditor(path)
with open(path, 'r') as fp:
try:
return fp.read().rstrip('\n') # trim inevitable trailing newlines
except Exception as e:
visidata.vd.exceptionCaught(e)
| return ''
def suspend():
import signal
with SuspendCurses():
os.kill(os.getpid(), signal.SIGSTOP)
def _breakpoint(*args, **kwargs):
import pdb
class VisiDataPdb(pdb.Pdb):
def precmd(self, line):
| r = super().precmd(line)
if not r:
SuspendCurses.__exit__(None, None, None, None)
return r
def postcmd(self, stop, line):
if stop:
SuspendCurses.__enter__(None)
return super().postcmd(stop, line)
SuspendCurses.__enter__(None)
VisiDataPdb(nosigint=True).set_trace()
sys.breakpointhook = _breakpoint
visidata.BaseSheet.addCommand('^Z', 'suspend', 'suspend()', 'suspend VisiData process')
visidata.BaseSheet.addCommand('', 'breakpoint', 'breakpoint()', 'drop into pdb REPL')
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self,
plotly_name="tickvals",
parent_name="scatterternary.marker.colorbar",
** | kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data") | ,
**kwargs
)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import copy
from openerp import models
from openerp.addons.account.report.account_financial_report import\
report_account_common
class report_account_common_horizontal(report_account_common):
def __init__(self, cr, uid, name, context=None):
super(report_account_common_horizontal, self).__init__(
cr, uid, name, context=context)
self.localcontext.update({
'get_left_lines': self.get_left_lines,
'get_right_lines': self.get_right_lines,
})
def get_lines(self, data, side=None):
data = | copy.deepcopy(data)
if data['form']['used_context'] is None:
data['form']['used_context'] = {}
data['form']['used_context'].update(
account_financial_report_horizontal_side=side)
return su | per(report_account_common_horizontal, self).get_lines(
data)
def get_left_lines(self, data):
return self.get_lines(data, side='left')
def get_right_lines(self, data):
return self.get_lines(data, side='right')
class ReportFinancial(models.AbstractModel):
_inherit = 'report.account.report_financial'
_wrapped_report_class = report_account_common_horizontal
|
# coding:utf-8
import MySQLdb as mysql
from flask import Flask, request, render_template
import json
app = Flask(__name__)
con = mysql.connect(user="root", passwd="redhat", db="jiangkun")
con.autocommit(True)
cur = con.cursor()
@app.route('/')
def index():
return render_template("index.html")
@app.route('/list')
def list():
sql = "select | * from user"
cur.execute(sql)
res_json = json.dumps(cur.fetchall())
print res_json
return res_json
@app.route('/add')
def add():
name = request.args.get( | 'name')
passwd = request.args.get('passwd')
sql = "insert into user (name, passwd) values (%s, %s)" % (name, passwd)
cur.execute(sql)
return "ok"
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=9092) |
"""Utility functions for hc-api-python"""
from datetime import datetime
def get_readable_time_string(seconds):
"""Returns human readable string from number of seconds"""
seconds = int(seconds)
minutes = seconds // 60
seconds = seconds % 60
hours = minutes // 60
minutes = minutes % 60
days = hours // 24
hours = hours % 24
result = ""
if days > 0:
result += "%d %s " % (days, "Day" if (days == 1) else "Days")
if hours > 0:
result += "%d %s " % (hours, "Hour" if (hours == 1) else "Hours")
if minutes > 0:
result += "%d %s " % (minutes, "Minute" if (minutes == 1) else "Minutes")
| if seconds > 0:
result += "%d %s " % (seconds, "Second" if (seconds == 1) else "Seconds")
return result.strip()
def get_datetime_from_timestamp(timestamp):
"""Return datetime from unix timestamp"""
try:
return datetime.fromtimestamp(int(timestamp))
except:
return None
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
periods = r | esponse.headers['X-RateLimit-Period']
if not periods:
return []
rate_limits = []
periods = periods.split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
right_now = datetime.now()
if (reset_datetime is not None) and (right_now < reset_datetime):
# add 1 second because of rounding
seconds_remaining = (reset_datetime - right_now).seconds + 1
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits
|
from .manager import Manag | er
__v | ersion__ = '0.2.4'
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by | applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
MsSQL to GCS operator.
"""
import decimal
from airflow.providers.google.cloud.operators.sql_to_gcs import BaseSQLToGCSOperator
from airflow.pro | viders.microsoft.mssql.hooks.mssql import MsSqlHook
from airflow.utils.decorators import apply_defaults
class MSSQLToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from Microsoft SQL Server to Google Cloud Storage
in JSON or CSV format.
:param mssql_conn_id: Reference to a specific MSSQL hook.
:type mssql_conn_id: str
**Example**:
The following operator will export data from the Customers table
within the given MSSQL Database and then upload it to the
'mssql-export' GCS bucket (along with a schema file). ::
export_customers = MsSqlToGoogleCloudStorageOperator(
task_id='export_customers',
sql='SELECT * FROM dbo.Customers;',
bucket='mssql-export',
filename='data/customers/export.json',
schema_filename='schemas/export.json',
mssql_conn_id='mssql_default',
google_cloud_storage_conn_id='google_cloud_default',
dag=dag
)
"""
ui_color = '#e0a98c'
type_map = {
3: 'INTEGER',
4: 'TIMESTAMP',
5: 'NUMERIC'
}
@apply_defaults
def __init__(self,
mssql_conn_id='mssql_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.mssql_conn_id = mssql_conn_id
def query(self):
"""
Queries MSSQL and returns a cursor of results.
:return: mssql cursor
"""
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
conn = mssql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field):
return {
'name': field[0].replace(" ", "_"),
'type': self.type_map.get(field[1], "STRING"),
'mode': "NULLABLE",
}
@classmethod
def convert_type(cls, value, schema_type):
"""
Takes a value from MSSQL, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery.
"""
if isinstance(value, decimal.Decimal):
return float(value)
return value
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from stacks.utils.RMFTestCase import *
from mock.mock import patch
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestKafkaBroker(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "KAFKA/0.8.1/package"
STACK_VERSION = "2.2"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/log/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/var/run/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/config',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/tmp/log/dir',
owner = 'kafka',
create_parents = True,
group = 'hadoop',
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
@patch("os.path.islink")
@patch("os.path.realpath")
def test_configure_custom_paths_default(self, realpath_mock, islink_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "configure",
config_file="default_custom_path_config.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/customdisk/var/log/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/customdisk/var/run/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/config',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/tmp/log/dir',
owner = 'kafka',
create_parents = True,
group = 'hadoop',
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertTrue(islink_mock.called)
self.assertTrue(realpath_mock.called)
def test_pre_upgrade_restart(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_23(self, call_mock):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.asse | rtResourceCalledIgnoreEarlier('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,)
self.assertResourceCalled("Link", "/etc/kafka/conf", to="/usr/hdp/current/kafka-broker/conf")
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEqu | als(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
|
"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""
from urlparse import urlparse, parse_qsl
from urllib import unquote
from .mysql import MySQLDriver
from .sqlite import SQLiteDriver
from .postgresql import PostgreSQLDriver
def parse_dsn(dsn_string):
"""Parse a connection string and return the associated driver"""
dsn = urlparse(dsn_string)
scheme = dsn.scheme.split('+')[0]
username = password = host = port = None
host = dsn.netloc
if '@' in host:
username, host = host.split('@')
if ':' in username:
username, password = username.split(':')
password = unquote(password)
username = unquote(username)
if ':' in host:
host, port = host.split(':')
port = int(port)
database = dsn.path.split('?')[0][1:]
query = dsn.path.split('?')[1] if '?' in dsn.path else dsn.query
kwargs = dict(parse_qsl(query, True))
if scheme == 'sqlite':
return SQLiteDriver, [dsn.path], {}
elif scheme == 'mysql':
kwargs['user'] = username or 'root'
kwargs['db'] = database
if port:
kwargs['port'] = port
if host:
kwargs['host'] = host
if password:
kwargs['passwd'] = password
return MySQLDriver, [], kwargs
elif scheme == 'postgresql':
kwargs['user'] = username or 'postgres'
kwargs['database'] = database
if port:
kwargs['port'] = port
if 'unix_socket' in kwargs:
kwargs['host'] = kwargs.pop('unix_socket')
| elif host:
kwargs['host'] = host
if password:
kwargs['password'] = password
return PostgreSQLDriver, [], kwargs
else:
raise ValueError('Unknown driver %s' % dsn_string)
|
def get_driver(dsn_string):
driver, args, kwargs = parse_dsn(dsn_string)
return driver(*args, **kwargs)
|
#!/usr/bin/python2.5
import sys
import time
import os
import nuke
def launchSubmit():
print("nukeStub(): launch submitter dialog")
submitCmd = "/drd/software/int/bin/launcher.sh -p %s -d %s --launchBlocking farm -o EPA_CMDLINE python2.5 --arg '$ABSUBMIT/nukesubmit/nuke2AB.py'" % (os.environ['DRD_JOB'], os.environ['DRD_DEPT'])
# root.name holds the path to the nuke script
submitCmd += " %s" % nuke.value("r | oot.name")
submitCmd += " %s" % nuke.Root.firstFrame(nuke.root())
submitCmd += " %s" % nuke.Root.lastFrame(nuke.root())
writeNodes = [i for i in nuke.allNodes() if i.Class() == "Write"]
for i in wri | teNodes:
submitCmd += " %s %s" % (i['name'].value(), nuke.filename(i))
print( "nukeStub(): %s" % submitCmd )
os.system(submitCmd)
menubar = nuke.menu("Nuke")
m = menubar.addMenu("&Render")
m.addCommand("Submit to Farm", "nukeStub.launchSubmit()", "Up")
|
from extractors.extract_website import ExtractWebsite
from datawakestreams.extractors.extractor_bolt import ExtractorBolt
class WebsiteBolt(ExtractorBolt):
name | ='website_extractor'
def __init__(self):
ExtractorBolt.__init__(self)
self.extractor = ExtractW | ebsite()
|
import pycountry
from marshmallow import Schema, fields, ValidationError
def validate_currency_symbol(val):
if val not in [x.letter for x in pycountry.currencies.objects]:
raise ValidationError('Symbol is not valid')
class CategoryTypeField(fields.Field):
def _serialize(self, value, attr, obj):
return {'value': value, 'title': dict(obj.CATEGORY_TYPES).get(value)}
class RecordTypeField(fields.Field):
def _serialize(self, value, attr, obj):
return {'value': value, 'title': dict(obj.RECORD_TYPES).get(value)}
class PaymentMethodField(fields.Field):
def _serialize(self, value, attr, obj):
return {'value': value, 'title': dict(obj.PAYMENT_METHODS).get(value)}
class GroupSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
class UserSchema(Schema):
id = fields.Int(dump_only=True)
email = fields.Email(required=True)
first_name = fields.Str(required=True)
last_name = fields.Str()
password = fields.Str(load_only=True, required=True)
active = fields.Bool()
group = fields.Nested(GroupSchema, dump_only=True)
invite_hash = fields.Str()
date_created = fields.DateTime(dump_only=True)
date_modified = fields.DateTime(dump_only=True)
class CategorySchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
category_type = CategoryTypeField()
parent = fields.Nested('self', dump_only=True, exclude=('parent', ))
parent_id = fields.Int(load_only=True, load_from='parent')
colour = fields.Str(required=True)
logo = fields.Str(required=True)
class GroupCategorySchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
category_type = CategoryTypeField()
group = fields.Nested(GroupSchema, dump_only=True)
parent = fields.Nested('self', dump_only=True, exclude=('parent', ))
parent_id = fields.Int(load_only=True, load_from='parent')
colour = fields.Str(required=True)
logo = fields.Str(required=True)
class GroupCurrencySchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
symbol = fields.Str(
required=True,
validate=validate_currency_symbol
)
date_modified = fields.DateTime()
group = fields.Nested(GroupSchema, dump_only=True)
class AccountSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
currency = fields.Nested(GroupCurrencySchema, dump_only=True)
currency_id = fields.Int(required=True, load_only=True, load_from='currency')
user = fields.Nested(UserSchema, dump_only=True)
class TransactionSchema(Schema):
id = fields.Int | (dump_only=True)
amount = fields.Float(required=True)
source_account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name'))
source_account_id = fields.Int(required=True, load_only=True, load_from='source_account')
target_account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name'))
target_account_id = fields.Int(required=True, load_only=True, load_from='target_account')
user = fields.Nested(
UserSchema, dump_only=True, only=('id', 'first_name', | 'last_name', 'email')
)
currency = fields.Nested(GroupCurrencySchema, dump_only=True, only=('id', 'name'))
currency_id = fields.Int(required=True, load_only=True, load_from='currency')
description = fields.Str()
date = fields.DateTime()
class RecordSchema(Schema):
id = fields.Int(dump_only=True)
amount = fields.Float(required=True)
description = fields.Str()
record_type = RecordTypeField(required=True)
payment_method = PaymentMethodField()
date = fields.DateTime()
user = fields.Nested(
UserSchema, dump_only=True, only=('id', 'first_name', 'last_name', 'email')
)
account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name'))
account_id = fields.Int(required=True, load_only=True, load_from='account')
currency = fields.Nested(GroupCurrencySchema, dump_only=True, only=('id', 'name'))
currency_id = fields.Int(required=True, load_only=True, load_from='currency')
transaction = fields.Nested(
TransactionSchema,
dump_only=True,
only=('id', 'source_account', 'target_account', 'amount', 'currency')
)
category = fields.Nested(
GroupCategorySchema, dump_only=True, only=('id', 'name', 'logo', 'colour')
)
category_id = fields.Int(required=True, load_only=True, load_from='category')
class AppSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
secret = fields.Str(required=True, dump_only=True)
user = fields.Nested(UserSchema, dump_only=True)
user_id = fields.Int(required=True, load_only=True, load_from='user')
class TokenSchema(Schema):
email = fields.Email(required=True)
password = fields.Str(load_only=True, required=True)
secret = fields.Str(required=True)
class BalanceSchema(Schema):
cash_flow = fields.Float(required=True)
start_balance = fields.Float()
end_balance = fields.Float()
expense = fields.Float()
income = fields.Float()
date = fields.Date()
record_type = fields.Int()
class DateRangeFilterSchema(Schema):
date_from = fields.Date()
date_to = fields.Date()
class CashFlowSchema(Schema):
cash_flow = fields.Float(required=True)
expense = fields.Float()
income = fields.Float()
date = fields.Date()
class ExpenseSchema(Schema):
amount = fields.Float(required=True)
category_id = fields.Int()
class IncomeSchema(Schema):
amount = fields.Float(required=True)
category_id = fields.Int()
|
from decimal import Decimal
from electrum.util import (format_satoshis, format_fee_satoshis, parse_URI,
is_hash256_str, chunks)
from . import SequentialTestCase
class TestUtil(SequentialTestCase):
def test_format_satoshis(self):
self.assertEqual("0.00001234", format_satoshis(1234))
def test_format_satoshis_negative(self):
self.assertEqual("-0.00001234", format_satoshis(-1234))
def test_format_fee_float(self):
self.assertEqual("1.7", format_fee_satoshis(1700/1000))
def test_format_fee_decimal(self):
self.assertEqual("1.7", format_fee_satoshis(Decimal("1.7")))
def test_format_fee_precision(self):
self.assertEqual("1.666",
format_fee_satoshis(1666/1000, precision=6))
self.assertEqual("1.7",
format_fee_satoshis(1666/1000, precision=1))
def test_format_satoshis_whitespaces(self):
self.assertEqual(" 0.0001234 ",
format_satoshis(12340, whitespaces=True))
self.assertEqual(" 0.00001234",
format_satoshis(1234, whitespaces=True))
def test_format_satoshis_whitespaces_negative(self):
self.assertEqual(" -0.0001234 ",
format_satoshis(-12340, whitespaces=True))
self.assertEqual(" -0.00001234",
format_satoshis(-1234, whitespaces=True))
def test_format_satoshis_diff_positive(self):
self.assertEqual("+0.00001234",
format_satoshis(1234, is_diff=True))
def test_format_satoshis_diff_negative(self):
self.assertEqual("-0.00001234", format_satoshis(-1234, is_diff=True))
def _do_test_parse_URI(self, uri, expected):
result = parse_URI(uri)
self.assertEqual(expected, result)
def test_parse_URI_address(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'})
def test_parse_URI_only_address(self):
self._do_test_parse_URI('15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'})
def test_parse_URI_address_label(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?label=electrum%20test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'label': 'electrum test'})
def test_parse_URI_address_message(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?message=electrum%20test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'message': 'electrum test', 'memo': 'electrum test'})
def test_parse_URI_address_amount(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 30000})
def test_parse_URI_address_request_url(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_ignore_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?test=test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'test': 'test'})
def test_parse_URI_multiple_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.00004&label=electrum-test&message=electrum%20test&test=none&r=http://domain.tld/page',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 4000, 'label': 'electrum-test', 'message': u'electrum test', 'memo': u'electrum test', 'r': 'http://domain.tld/page', 'test': 'none'})
def test_parse_URI_no_address_request_url(self):
self._do_test_parse_URI('bitcoin:?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_invalid_address(self):
self.assertRaises(BaseException, parse_URI, 'bitcoin:invalidaddress')
def test_parse_URI_invalid(self):
self.assertRaises(BaseException, parse_URI, 'notbitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma')
def test_parse_URI_parameter_polution(self):
self.assertRaises(Exception, parse_URI, 'bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003&label=test&amount=30.0')
def test_is_hash256_str(self):
self.assertTrue(is_hash256_str('09a4c03e3bdf83bbe3955f907ee52d | a4fc12f4813d459bc75228b64ad08617c7'))
self.assertTrue(is_hash256_str('2A5C3F4062E4F2FCCE7A1C7B4310CB647B327409F580F4ED72CB8FC0B1804DFA'))
self.assertTrue(is_hash256_str('00' * 32))
self.assertFalse(is_hash256_str('00' * 33))
self.assertFalse(is_hash256_str('qweqwe'))
self.assertFalse(is_hash256_str(None))
self.asse | rtFalse(is_hash256_str(7))
def test_chunks(self):
self.assertEqual([[1, 2], [3, 4], [5]],
list(chunks([1, 2, 3, 4, 5], 2)))
with self.assertRaises(ValueError):
list(chunks([1, 2, 3], 0))
|
User-Agent': get_user_agent(),
'Cache-Control': 'no-cache',
})
self.multipart_session.auth = fetch_credentials(auth_id, auth_token)
self.proxies = proxies
self.timeout = timeout
self.account = Accounts(self)
self.subaccounts = Subaccounts(self)
self.applications = Applications(self)
self.calls = Calls(self)
self.live_calls = LiveCalls(self)
self.queued_calls = QueuedCalls(self)
self.conferences = Conferences(self)
self.endpoints = Endpoints(self)
self.messages = Messages(self)
self.lookup = Lookup(self)
sel | f.numbers = Numbers(self)
self.powerpacks = Powerpacks(self)
self.brand = Brand(self)
self.campaign = Campaign(self)
self.media = Media(self)
self.pricing = Pricings(self)
self.recordings = Recordings(self)
self.addresses = Addresses(self)
self.identities = Identities(self)
self.call_feedback = CallFeedback(self)
self.end_users = EndUsers( | self)
self.compliance_document_types = ComplianceDocumentTypes(self)
self.compliance_documents = ComplianceDocuments(self)
self.compliance_requirements = ComplianceRequirements(self)
self.compliance_applications = ComplianceApplications(self)
self.multi_party_calls = MultiPartyCalls(self)
self.voice_retry_count = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
self.multipart_session.close()
def process_response(self,
method,
response,
response_type=None,
objects_type=None):
"""Processes the API response based on the status codes and method used
to access the API
"""
try:
response_json = response.json(
object_hook=lambda x: ResponseObject(x) if isinstance(x, dict) else x)
if response_type:
r = response_type(self, response_json.__dict__)
response_json = r
if 'objects' in response_json and objects_type:
response_json.objects = [
objects_type(self, obj.__dict__)
for obj in response_json.objects
]
except ValueError:
response_json = None
if response.status_code == 400:
if response_json is not None and 'error' in response_json:
raise ValidationError(response_json.error)
raise ValidationError(
'A parameter is missing or is invalid while accessing resource'
'at: {url}'.format(url=response.url))
if response.status_code == 401:
if response_json and 'error' in response_json:
raise AuthenticationError(response_json.error)
raise AuthenticationError(
'Failed to authenticate while accessing resource at: '
'{url}'.format(url=response.url))
if response.status_code == 404:
if response_json and 'error' in response_json:
raise ResourceNotFoundError(response_json.error)
raise ResourceNotFoundError(
'Resource not found at: {url}'.format(url=response.url))
if response.status_code == 405:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'HTTP method "{method}" not allowed to access resource at: '
'{url}'.format(method=method, url=response.url))
if response.status_code == 409:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'Conflict: '
'{url}'.format(url=response.url))
if response.status_code == 422:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'Unprocessable Entity: '
'{url}'.format(url=response.url))
if response.status_code == 500:
if response_json and 'error' in response_json:
raise PlivoServerError(response_json.error)
raise PlivoServerError(
'A server error occurred while accessing resource at: '
'{url}'.format(url=response.url))
if method == 'DELETE':
if response.status_code not in [200, 204]:
raise PlivoRestError('Resource at {url} could not be '
'deleted'.format(url=response.url))
elif response.status_code not in [200, 201, 202, 204, 207]:
raise PlivoRestError(
'Received status code {status_code} for the HTTP method '
'"{method}"'.format(
status_code=response.status_code, method=method))
self.voice_retry_count = 0
return response_json
def create_request(self, method, path=None, data=None, **kwargs):
# The abstraction created by request() and create_request() is moot
# now since several product-specific handling have been aded.
# Requires a refactor.
if 'is_callinsights_request' in kwargs:
url = '/'.join([CALLINSIGHTS_BASE_URL, kwargs['callinsights_request_path']])
req = Request(method, url, **({'params': data} if method == 'GET' else {'json': data}))
elif kwargs.get('is_lookup_request', False):
path = path or []
url = '/'.join(list([str(p) for p in path]))
req = Request(method, url, **({'params': data} if method == 'GET' else {'json': data}))
else:
path = path or []
req = Request(method, '/'.join([self.base_uri, self.session.auth[0]] +
list([str(p) for p in path])) + '/',
**({
'params': data
} if method == 'GET' else {
'json': data
}))
return self.session.prepare_request(req)
def create_multipart_request(self,
method,
path=None,
data=None,
files=None):
path = path or []
data_args = {}
if method == 'GET':
data_args['params'] = data
else:
data_args['data'] = data
try:
if files:
data_args['files'] = files
except Exception as e:
print(e)
url = '/'.join([self.base_uri, self.multipart_session.auth[0]] + list([str(p) for p in path])) + '/'
req = Request(method, url, **data_args)
return self.multipart_session.prepare_request(req)
def send_request(self, request, **kwargs):
if 'session' in kwargs:
session = kwargs['session']
del kwargs['session']
else:
session = self.session
return session.send(
request, proxies=self.proxies, timeout=self.timeout, **kwargs)
def request(self,
method,
path=None,
data=None,
response_type=None,
objects_type=None,
files=None,
**kwargs):
if files is not None:
req = self.create_multipart_request(method, path, data, files)
session = self.multipart_session
else:
if not kwargs.get("is_voice_request", False):
self.base_uri = PLIVO_API_BASE_URI
if data and 'is_callinsights_request' in data:
params_dict = {}
if 'callinsights_request_path' in data:
params_dict['is_callinsights_request'] = data |
import time
import RPi.GPIO as GPIO
# Constants
PULSE_LEN = 0.03 # length of clock motor pulse
A_PIN = 18 # one motor drive pin
B_PIN = 23 # second motor drive pin
# Configure the GPIO pins
GPIO.setmode(GPIO.BCM)
GPIO.setup(A_PIN, GPIO.OUT)
GPIO.setup(B_PIN, GPIO.OUT)
# Glogal variables
positive_polarity = True
period = 2.0 # 2 second tick
last_tick_time = 0 | # the time at which last tick occured
def tick():
# Alternate positive and negative pulses
global positive_polarity
if positive_polarity:
pu | lse(A_PIN, B_PIN)
else:
pulse(B_PIN, A_PIN)
# Flip the polarity ready for the next tick
positive_polarity = not positive_polarity
def pulse(pos_pin, neg_pin):
# Turn on the pulse
GPIO.output(pos_pin, True)
GPIO.output(neg_pin, False)
time.sleep(PULSE_LEN)
# Turn the power off until the next tick
GPIO.output(pos_pin, False)
try:
while True:
t = time.time()
if t > last_tick_time + period:
# its time for the next tick
tick()
last_tick_time = t
finally:
print('Cleaning up GPIO')
GPIO.cleanup()
|
htsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.area.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", | "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
| Returns
-------
plotly.graph_objs.area.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.area.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in |
# SecuML
# Copyright (C) 2016-2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import abc
import numpy as np
import os.path as path
import pandas as pd
import time
from .AnnotationQuery import AnnotationQuery
class AnnotationQueries(object):
def __init__(self, iteration, label):
self.iteration = iteration
self.label = label
self.annotation_queries = []
def run(self):
self.predictions = self.getPredictedProbabilities()
self.runModels()
start_time = time.time()
self.generateAnnotationQueries()
self.generate_queries_time = time.time() - start_time
self.exportAnnotationQueries()
@abc.abstractmethod
def runModels(self):
return
@abc.abstractmethod
def generateAnnotationQueries(self):
return
def generateAnnotationQuery(self, instance_id, predicted_proba,
suggested_label, suggested_family, confidence=None):
return Annotatio | nQuery(instance_id, predicted_proba,
suggested_label, suggested_family, confidence=confidence)
def getPredictedProbabilities(self):
models_conf = self.iteration.conf.models_conf
if 'binary' in models_conf:
classifier = self.iteration.update_model.models['binary']
predictions = classifier.testing_monitoring.predictions_monitoring.predictions
e | lse:
test_instances = self.iteration.datasets.getTestInstances()
num_instances = test_instances.numInstances()
predictions = pd.DataFrame(
np.zeros((num_instances, 4)),
index=test_instances.ids.getIds(),
columns=['predicted_proba', 'predicted_labels', 'ground_truth', 'scores'])
predictions['predicted_proba'] = [0.5] * num_instances
predictions['predicted_labels'] = [False] * num_instances
predictions['ground_truth'] = test_instances.ground_truth.getLabels()
predictions['scores'] = [0.5] * num_instances
return predictions
def exportAnnotationQueries(self):
iteration_dir = self.iteration.iteration_dir
if iteration_dir is None:
return
filename = path.join(iteration_dir,
'toannotate_' + self.label + '.csv')
with open(filename, 'w') as f:
for i, annotation_query in enumerate(self.annotation_queries):
if i == 0:
annotation_query.displayHeader(f)
annotation_query.export(f)
def annotateAuto(self):
for annotation_query in self.annotation_queries:
annotation_query.annotateAuto(self.iteration, self.label)
def getManualAnnotations(self):
for annotation_query in self.annotation_queries:
annotation_query.getManualAnnotation(self.iteration)
def checkAnnotationQueriesAnswered(self):
for annotation_query in self.annotation_queries:
if not annotation_query.checkAnswered(self.iteration):
return False
return True
def getInstanceIds(self):
return [annotation_query.instance_id
for annotation_query in self.annotation_queries]
|
che['slopes']
except AttributeError:
self._cache = {}
self._cache['slopes'] = self.betas[1:] * self.scale
except KeyError:
self._cache['slopes'] = self.betas[1:] * self.scale
return self._cache['slopes']
@slopes.setter
def slopes(self, val):
try:
self._cache['slopes'] = val
except AttributeError:
self._cache = {}
self._cache['slopes'] = val
@property
def slopes_vm(self):
try:
return self._cache['slopes_vm']
except AttributeError:
self._cache = {}
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
except KeyError:
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
return self._cache['slopes_vm']
@slopes_vm.setter
def slopes_vm(self, val):
try:
self._cache['slopes_vm'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_vm'] = val
@property
def LR(self):
try:
return self._cache['LR']
except AttributeError:
self._cache = {}
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
except KeyError:
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * ( | P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
return self._cache['LR']
@LR.setter
def LR(self, val):
try:
| self._cache['LR'] = val
except AttributeError:
self._cache = {}
self._cache['LR'] = val
@property
def u_naive(self):
try:
return self._cache['u_naive']
except AttributeError:
self._cache = {}
self._cache['u_naive'] = self.y - self.predy
except KeyError:
u_naive = self.y - self.predy
self._cache['u_naive'] = u_naive
return self._cache['u_naive']
@u_naive.setter
def u_naive(self, val):
try:
self._cache['u_naive'] = val
except AttributeError:
self._cache = {}
self._cache['u_naive'] = val
@property
def u_gen(self):
try:
return self._cache['u_gen']
except AttributeError:
self._cache = {}
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
except KeyError:
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
return self._cache['u_gen']
@u_gen.setter
def u_gen(self, val):
try:
self._cache['u_gen'] = val
except AttributeError:
self._cache = {}
self._cache['u_gen'] = val
@property
def Pinkse_error(self):
try:
return self._cache['Pinkse_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['Pinkse_error']
@Pinkse_error.setter
def Pinkse_error(self, val):
try:
self._cache['Pinkse_error'] = val
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'] = val
@property
def KP_error(self):
try:
return self._cache['KP_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['KP_error']
@KP_error.setter
def KP_error(self, val):
try:
self._cache['KP_error'] = val
except AttributeError:
self._cache = {}
self._cache['KP_error'] = val
@property
def PS_error(self):
try:
return self._cache['PS_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['PS_error']
@PS_error.setter
def PS_error(self, val):
try:
self._cache['PS_error'] = val
except AttributeError:
self._cache = {}
self._cache['PS_error'] = val
def par_est(self):
start = np.dot(la.inv(spdot(self.x.T, self.x)),
spdot(self.x.T, self.y))
flogl = lambda par: -self.ll(par)
if self.optim == 'newton':
fgrad = lambda par: self.gradient(par)
fhess = lambda par: self.hessian(par)
par_hat = newton(flogl, start, fgrad, fhess, self.maxiter)
warn = par_hat[2]
else:
fgrad = lambda par: -self.gradient(par)
if self.optim == 'bfgs':
par_hat = op.fmin_bfgs(
flogl, start, fgrad, full_output=1, disp=0)
warn = par_hat[6]
if self.optim == 'ncg':
fhess = lambda par: -self.hessian(par)
par_hat = op.fmin_ncg(
flogl, start, fgrad, fhess=fhess, full_output=1, disp=0)
warn = par_hat[5]
if warn > 0:
warn = True
else:
warn = False
return par_hat, warn
def ll(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
ll = sum(np.log(norm.cdf(qxb)))
return ll
def gradient(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
gradient = spdot(lamb.T, self.x)[0]
return gradient
def hessian(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
xb = spdot(self.x, beta)
qxb = q * xb
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
hessian = spdot(self.x.T, spbroadcast(self.x,-lamb * (lamb + xb)))
return hessian
class Probit(BaseProbit):
"""
Classic non-spatial Probit and spatial diagnostics. The class includes a
printout that formats all the results and tests in a nice format.
The diagnostics for spatial dependence currently implemented are:
* Pinkse Error [Pinkse2004]_
* Kelejian and Prucha Moran's I [Kelejian2001]_
* Pinkse & Slade Error [Pinkse1998]_
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 a |
se, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> "_models.DdosCustomPolicy":
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> "_models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosCustomPolicy"]:
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters | : Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own in | itialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling |
from .MidiOutFile import MidiOutFile
from .MidiInFile import MidiInFile
"""
This is an example of the smallest possible type 0 midi file, where
all the midi events are in the same track. |
"""
class Transposer(MidiOutFile):
"Transposes all notes by 1 octave"
def _transp(self, ch, note):
if ch != 9: # not the drums!
note += 12
if note > 1 | 27:
note = 127
return note
def note_on(self, channel=0, note=0x40, velocity=0x40):
note = self._transp(channel, note)
MidiOutFile.note_on(self, channel, note, velocity)
def note_off(self, channel=0, note=0x40, velocity=0x40):
note = self._transp(channel, note)
MidiOutFile.note_off(self, channel, note, velocity)
out_file = 'midiout/transposed.mid'
midi_out = Transposer(out_file)
#in_file = 'midiout/minimal_type0.mid'
#in_file = 'test/midifiles/Lola.mid'
in_file = 'test/midifiles/tennessee_waltz.mid'
midi_in = MidiInFile(midi_out, in_file)
midi_in.read()
|
# region Description
"""
nmap_scanner.py: Scan local network with NMAP
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from raw_packet.Utils.base import Base
import xml.etree.ElementTree as ET
import subprocess as sub
from tempfile import gettempdir
from os.path import isfile, join
from os import remove
from typing import Union, List, Dict, NamedTuple
from collections import namedtuple
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
# endregion
# region Main class - NmapScanner
class NmapScanner:
# region Variables
_base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows'])
try:
Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports',
defaults=('', '', '', '', []))
except TypeError:
Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports')
# endregion
# region Init
def __init__(self, network_interface: str):
self._your: Dict[str, Union[None, str]] = \
self._base.get_interface_settings(interface_name=network_interface,
required_parameters=['mac-address', 'ipv4-address',
'first-ipv4-address', 'last-ipv4-address'])
self.local_network: str = \
self._your['first-ipv4-address'] + '-' + \
self._your['last-ipv4-address'].split('.')[3]
if self._base.get_platform().startswith('Darwin'):
self._nmap_scan_result: str = '/tmp/nmap_scan.xml'
else:
self._nmap_scan_result: str = join(gettempdir(), 'nmap_scan.xml')
# endregion
# region Find devices in local network with nmap
def scan(self,
exit_on_failure: bool = True,
quiet: bool = False) -> Union[None, List[NamedTuple]]:
try:
# region Variables
network_devices: List[NamedTuple] = list()
ipv4_address: str = ''
mac_address: str = ''
vendor: str = ''
os: str = ''
ports: List[int] = list()
# endregion
nmap_command: str = 'nmap ' + self.local_network + \
' --open -n -O --osscan-guess -T5 -oX ' + self._nmap_scan_result
if not quiet:
self._base.print_info('Start nmap scan: ', nmap_command)
if self._base.get_platform().startswith('Windows'):
nmap_process = sub.Popen(nmap_command, shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
else:
nmap_process = sub.Popen([nmap_command], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
nmap_process.wait()
assert isfile(self._nmap_scan_result), \
'Not found nmap scan result file: ' + self._base.er | ror_text(self._nmap_scan_result)
nmap_report = ET.parse(self._nmap_scan_result)
root_tree = nmap_report.getroot()
for element in root_tree:
try:
assert element.tag == 'host'
state = element.find('status').attrib['state']
| assert state == 'up'
# region Address
for address in element.findall('address'):
if address.attrib['addrtype'] == 'ipv4':
ipv4_address = address.attrib['addr']
if address.attrib['addrtype'] == 'mac':
mac_address = address.attrib['addr'].lower()
try:
vendor = address.attrib['vendor']
except KeyError:
pass
# endregion
# region Open TCP ports
for ports_info in element.find('ports'):
if ports_info.tag == 'port':
ports.append(ports_info.attrib['portid'])
# endregion
# region OS
for os_info in element.find('os'):
if os_info.tag == 'osmatch':
try:
os = os_info.attrib['name']
except TypeError:
pass
break
# endregion
network_devices.append(self.Info(vendor=vendor, os=os, mac_address=mac_address,
ipv4_address=ipv4_address, ports=ports))
except AssertionError:
pass
remove(self._nmap_scan_result)
assert len(network_devices) != 0, \
'Could not find any devices on interface: ' + self._base.error_text(self._your['network-interface'])
return network_devices
except OSError:
self._base.print_error('Something went wrong while trying to run ', 'nmap')
if exit_on_failure:
exit(2)
except KeyboardInterrupt:
self._base.print_info('Exit')
exit(0)
except AssertionError as Error:
self._base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# endregion
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions | as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core | import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v10.services.types import remarketing_action_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class RemarketingActionServiceTransport(abc.ABC):
"""Abstract transport class for RemarketingActionService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_remarketing_actions: gapic_v1.method.wrap_method(
self.mutate_remarketing_actions,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_remarketing_actions(
self,
) -> Callable[
[remarketing_action_service.MutateRemarketingActionsRequest],
Union[
remarketing_action_service.MutateRemarketingActionsResponse,
Awaitable[
remarketing_action_service.MutateRemarketingActionsResponse
],
],
]:
raise NotImplementedError()
__all__ = ("RemarketingActionServiceTransport",)
|
import math
import string
from Conundrum.utils import sanitize
letter_to_value = dict(zip('z' + string.ascii_lowercase, range(0, 27)))
value_to_letter = dict(zip(range(0, 27), 'z' + string.ascii_lowercase))
def encrypt(msg: str, key: str) -> str:
msg = sanitize(msg)
key = sanitize(key)
repeat = int(math.ceil(len(msg) / len(key)))
key = key * repeat
return ''.join([value_to_letter[(letter_to_value[msg_letter] +
letter_to_value[key_letter]) % 26]
for msg_letter, key_letter in zip(msg, key)])
def decrypt(ms | g: str, key: str) -> str:
msg = sanitize(msg)
key = sanitize(key)
repeat = int(math.ceil(len(msg) / len(key)))
key = key * repeat
return ''.join([value_to_letter[(letter_to_value[msg_letter] -
letter_to_value[key_letter]) % 26]
for msg_letter, key_letter in zip(msg, key)])
if __name__ == '__main__':
# Use | d in Movies 1
encrypted_msg = 'oape dhzoawx cz hny'
guessed_key = 'plum scarlett green mustard'
print(decrypt(encrypted_msg, guessed_key))
# Used in Movies 3
# decrypted_msg = 'metropolis'
# film_key = 'Close Encounters Of The Third Kind'
# print(encrypt(decrypted_msg, film_key)) |
#!/usr/bin/env python
# pylint: disable=invalid-name
"""
2520 is th | e smallest number that can be divided by each of the numbers from 1 t | o 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
"""
import sys
from problembaseclass import ProblemBaseClass
class Problem5(ProblemBaseClass):
"""
@class Solution for Problem 5
@brief
"""
def __init__(self, range):
self.result = None
self.range = range
def compute(self):
notfound=True
val=0
while(notfound):
notfound = False
val = val + 1
for n in range(1, self.range):
if (val % n):
notfound = True
self.result = val
if __name__ == '__main__':
problem = Problem5(10)
problem.compute()
print problem.result
del problem
problem = Problem5(20)
problem.compute()
print problem.result #232792560
del problem
|
lambda: 0)
self.add_words(word_list)
self.widgets = set()
def add_widget(self, widget):
"""Add a widget to the list of widgets to do auto-completion for."""
if widget in self.widgets:
return # Widget already added
if isinstance(widget, TextBox):
self._add_text_box(widget)
return
raise ValueError("Widget type %s not supported." % (type(widget)))
def add_words(self, words, update=True):
"""Add a word or words to the list of words to auto-complete."""
for word in words:
if self.isusable(word):
self._word_freq[word] += 1
if update:
self._update_word_list()
def add_words_from_units(self, units):
"""Collect all words from the given translation units to use for
auto-completion.
@type units: list
@param units: The translation units to collect words from.
"""
for unit in units:
target = unit.target
if not target:
continue
self.add_words(self.wordsep_re.split(target), update=False)
if len(self._word_freq) > self.MAX_WORDS:
break
self._update_word_list()
def autocomplete(self, word):
for w in self._word_list:
if w.startswith(word):
return w, w[len(word):]
return None, u''
def clear_widgets(self):
"""Release all registered widgets from the spell of auto-completion."""
for w in set(self.widgets):
self.remove_widget(w)
def clear_words(self):
"""Remove all registered words; effectively turns off auto-completion."""
self._word_freq = []
self._word_list = defaultdict(lambda: 0)
def isusable(self, word):
"""Returns a value indicating if the given word should be kept as a
suggestion for autocomplete."""
return len(word) > self.comp_len + 2
def remove_widget(self, widget):
"""Remove a widget (currently only L{TextBox}s are accepted) from
the list of widgets to do auto-correction for.
"""
if isinstance(widget, TextBox) and widget in self.widgets:
self._remove_textbox(widget)
def remove_words(self, words):
"""Remove a word or words from the list of words to auto-complete."""
if isinstance(words, basestring):
del self._word_freq[words]
self._word_list.remove(words)
else:
for w in words:
try:
del self._word_freq[w]
self._word_list.remove(w)
except KeyError:
pass
def _add_text_box(self, textbox):
"""Add the given L{TextBox} to the list of widgets to do auto-
correction on."""
if not hasattr(self, '_textbox_insert_ids'):
self._textbox_insert_ids = {}
handler_id = textbox.connect('text-inserted', self._on_insert_text)
self._textbox_insert_ids[textbox] = handler_id
self.widgets.add(textbox)
def _on_insert_text(self, textbox, text, offset, elem):
if not isinstance(text, basestring) or self.wordsep_re.match(text):
return
# We are only interested in single character insertions, otherwise we
# react similarly for paste and similar events
if len(text.decode('utf-8')) > 1:
return
prefix = unicode(textbox.get_text(0, offset) + text)
postfix = unicode(textbox.get_text(offset))
buffer = textbox.buffer
# Quick fix to check that we don't autocomplete in the middle of a word.
right_lim = len(postfix) > 0 and postfix[0] or ' '
if not self.wordsep_re.match(right_lim):
return
lastword = self.wordsep_re.split(prefix)[-1]
if len(lastword) >= self.comp_len:
completed_word, word_postfix = self.autocomplete(lastword)
if completed_word == lastword:
return
if completed_word:
# Updating of the buffer is deferred until after this signal
# and its side effects are taken care of. We abuse
# gobject.idle_add for that.
insert_offset = offset + len(text)
def suggest_completion():
textbox.handler_block(self._textbox_insert_ids[textbox])
#logging.debug("textbox.suggestion = {'text': u'%s', 'offset': %d}" % (word_postfix, insert_offset))
textbox.suggestion = {'text': word_postfix, 'offset': insert_offset}
textbox.handler_unblock(self._textbox_insert_ids[textbox])
sel_iter_start = buffer.get_iter_at_offset(insert_offset)
sel_iter_end = buffer.get_iter_at_offset(insert_offset + len(word_postfix))
buffer.select_range(sel_iter_start, sel_iter_end)
return False
gobject.idle_add(suggest_completion, priority=gobject.PRIORITY_HIGH)
def _remove_textbox(self, textbox):
"""Remove the given L{TextBox} from the list of widgets to do
auto-correction on.
"""
if not hasattr(self, '_textbox_insert_ids'):
return
# Disconnect the "insert-text" event handler
textbox.disconnect(self._textbox_insert_ids[textbox])
self.widgets.remove(textbox)
def _update_word_list(self):
"""Update and sort found words according to frequency."""
wordlist = self._word_freq.items()
wordlist.sort(key=lambda x:x[1], reverse=True)
self._word_list = [items[0] for items in wordlist]
class Plugin(BasePlugin):
description = _('Automatically complete long words wh | ile you type')
display_name = _('AutoCompletor')
version = 0.1
# INITIALIZERS #
def __init__(self, internal_name, main_controller):
self.internal_name = internal_name
self.main_controller = main_controller
self._init_plugin()
def _init_plugin(self):
from virtaal.common import pan_app
| self.autocomp = AutoCompletor(self.main_controller)
self._store_loaded_id = self.main_controller.store_controller.connect('store-loaded', self._on_store_loaded)
if self.main_controller.store_controller.get_store():
# Connect to already loaded store. This happens when the plug-in is enabled after loading a store.
self._on_store_loaded(self.main_controller.store_controller)
self._unitview_id = None
unitview = self.main_controller.unit_controller.view
if unitview.targets:
self._connect_to_textboxes(unitview, unitview.targets)
else:
self._unitview_id = unitview.connect('targets-created', self._connect_to_textboxes)
def _connect_to_textboxes(self, unitview, textboxes):
for target in textboxes:
self.autocomp.add_widget(target)
# METHDOS #
def destroy(self):
"""Remove all signal-connections."""
self.autocomp.clear_words()
self.autocomp.clear_widgets()
self.main_controller.store_controller.disconnect(self._store_loaded_id)
if getattr(self, '_cursor_changed_id', None):
self.store_cursor.disconnect(self._cursor_changed_id)
if self._unitview_id:
self.main_controller.unit_controller.view.disconnect(self._unitview_id)
# EVENT HANDLERS #
def _on_cursor_change(self, cursor):
def add_widgets():
if hasattr(self, 'lastunit'):
if self.lastunit.hasplural():
for target in self.lastunit.target:
if target:
#logging.debug('Adding words: %s' % (self.autocomp.wordsep_re.split(unicode(target))))
self.autocomp.add_words(self.autocomp.wordsep_re.split(unicode(target)))
else:
if self.lastunit.target:
#logging.debug('Adding words: %s' % (self.autocomp.wordsep_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.