prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
def action_vakai_load_more(context, action, parent_entity_bundle, last_id, parent_entity_id, **args):
try:
parent_entity_type = 'Vakai'
parent_entity_id = int(parent_entity_id)
last_id = int(last_id)
output = Object()
db = IN.db
connection = db.connection
# TODO: paging
# get total
total = 0
limit = 10
# TODO: make it dynamic
cursor = db.execute('''SELECT
count(field_vakai_parent.value)
FROM
field.field_vakai_parent
JOIN
config.vakai ON field_vakai_parent.entity_id = vakai.id
WHERE
vakai.type = %(parent_entity_bundle)s AND
field_vakai_parent.value = %(parent_id)s AND
vakai.id < %(last_id)s AND
vakai.status != 0
''', {
'parent_entity_bundle' : parent_entity_bundle,
'parent_id' : parent_entity_id,
'last_id' : last_id,
})
if cursor.rowcount >= 0:
total = int(cursor.fetchone()[0])
more_id = '_'.join(('more-vakais', parent_entity_type, str(parent_entity_id)))
if total > 0:
cursor = db.execute('''SELECT
field_vakai_parent.entity_type,
field_vakai_parent.entity_id,
field_vakai_parent.value,
vakai.weight
FROM
field.field_vakai_parent
JOIN
config.vakai ON field_vakai_parent.entity_id = vakai.id
WHERE
vakai.type = %(parent_ent | ity_bundle)s AND
field_vakai_parent.value = %(parent_id)s AND
vakai.id < %(last_id)s AND
vakai.status != 0
ORDER BY
vakai.weight
LIMIT %(limit)s
''', {
'parent_entity_bundle' : parent_entity_bundle,
'parent_id' : parent_entity_id,
'last_id' : last_id,
'limit' : limit,
})
ids = []
last_id = 0
if cursor.rowcount >= 0:
for row in curso | r:
# reverse reference
ids.append(row['entity_id'])
last_id = ids[-1] # last id
vakais = IN.entitier.load_multiple('Vakai', ids)
for id, vakai in vakais.items():
obj = ThemeArgs(vakai, {'view_mode' : 'adminlist'})
output.add(obj)
remaining = total - limit
if remaining > 0 and last_id > 0:
output.add('TextDiv', {
'id' : more_id,
'value' : str(remaining) + ' more...',
'css' : ['ajax i-text-center pointer i-panel-box i-panel-box-primary'],
'attributes' : {
'data-href' : ''.join(('/vakai/more/!', str(parent_entity_bundle), '/', str(last_id), '/', str(parent_entity_id)))
},
'weight' : -1
})
output = {more_id : output}
context.response = In.core.response.PartialResponse(output = output)
except:
IN.logger.debug() |
Error("Path already registered to an endpoint")
self._endpoints[path] = callback
def register_mux_endpoint(self, path, key, value, callback):
prev = self._mux_endpoints.get(path)
if prev is None:
self.register_endpoint(path, self._handle_mux)
self._mux_endpoints[path] = prev = (key, {})
prev_key, prev_values = prev
if prev_key != key:
raise self.printer.config_error(
"mux endpoint %s %s %s may have only one key (%s)"
% (path, key, value, prev_key))
if value in prev_values:
raise self.printer.config_error(
"mux endpoint %s %s %s already registered (%s)"
% (path, key, value, prev_values))
prev_values[value] = callback
def _handle_mux(self, web_request):
key, values = self._mux_endpoints[web_request.get_method()]
if None in values:
key_param = web_request.get(key, None)
else:
key_param = web_request.get(key)
if key_param not in values:
raise web_request.error("The value '%s' is not valid for %s"
% (key_param, key))
values[key_param](web_request)
def _handle_list_endpoints(self, web_request):
web_request.send({'endpoints': list(self._endpoints.keys())})
def _handle_info_request(self, web_request):
client_info = web_request.get_dict('client_info', None)
if client_info is not None:
web_request.get_client_connection().set_client_info(client_info)
state_message, state = self.printer.get_state_message()
src_path = os.path.dirname(__file__)
klipper_path = os.path.normpath(os.path.join(src_path, ".."))
response = {'state': state, 'state_message': state_message,
'hostname': socket.gethostname(),
'klipper_path': klipper_path, 'python_path': sys.executable}
start_args = self.printer.get_start_args()
for sa in ['log_file', 'config_file', 'software_version', 'cpu_info']:
response[sa] = start_args.get(sa)
web_request.send(response)
def _handle_estop_request(self, web_request):
self.printer.invoke_shutdown("Shutdown due to webhooks request")
def _handle_rpc_registration(self, web_request):
template = web_request.get_dict('response_template')
method = web_request.get_str('remote_method')
new_conn = web_request.get_client_connection()
logging.info("webhooks: registering remote method '%s' "
"for connection id: %d" % (method, id(new_conn)))
self._remote_methods.setdefault(method, {})[new_conn] = template
def get_connection(self):
return self.sconn
def get_callback(self, path):
cb = self._endpoints.get(path, None)
if cb is None:
msg = "webhooks: No registered callback for path '%s'" % (path)
logging.info(msg)
raise WebRequestError(msg)
return cb
def get_status(self, eventtime):
state_message, state = self.printer.get_state_message()
return {'state': state, 'state_message': state_message}
def call_remote_method(self, method, **kwargs):
if method not in self._remote_methods:
raise self.printer.command_error(
"Remote method '%s' not registered" % (method))
conn_map = self._remote_methods[method]
valid_conns = {}
for conn, template in conn_map.items():
if not conn.is_closed():
valid_conns[conn] = template
out = {'params': kwargs}
out.update(template)
conn.send(out)
if not valid_conns:
del self._remote_methods[method]
raise self.printer.command_error(
"No active connections for method '%s'" % (method))
self._remote_methods[method] = valid_conns
class GCodeHelper:
def __init__(self, printer):
self.printer = printer
self.gcode = printer.lookup_object("gcode")
# Output subscription tracking
self.is_output_registered = False
self.clients = {}
# Register webhooks
wh = printer.lookup_object('webhooks')
wh.register_endpoint("gcode/help", self._handle_help)
wh.register_endpoint("gcode/script", self._handle_script)
wh.register_endpoint("gcode/restart", self._handle_restart)
wh.register_endpoint("gcode/firmware_restart",
self._handle_firmware_restart)
wh.register_endpoint("gcode/subscribe_output",
self._handle_subscribe_output)
def _handle_help(self, web_request):
web_request.send(self.gcode.get_command_help())
def _handle_script(self, web_request):
self.gcode.run_script(web_request.get_str('script'))
def _handle_restart(self, web_request):
self.gcode.run_script('restart')
def _handle_firmware_restart(self, web_request):
self.gcode.run_script('firmware_restart')
def _output_callback(self, msg):
for cconn, template in list(self.clients.items()):
if cconn.is_closed():
del self.clients[cconn]
continue
tmp = dict(template)
tmp['params'] = {'response': msg}
cconn.send(tmp)
def _handle_subscribe_output(self, web_request):
cconn = web_request.get_client_connection()
template = web_request.get_dict('response_template', {})
self.clients[cconn] = template
if not self.is_output_registered:
self.gcode.register_output_handler(self._output_callback)
self.is_output_registered = True
SUBSCRIPTION_REFRESH_TIME = .25
class QueryStatusHelpe | r:
def __init__(self, printer):
self.printer = printer
self.clients = {}
self.pending_queries = []
self.query_timer = None
self.last_query = {}
# Register webhooks
webhooks = printer.lookup_object('webhooks')
webhooks.register_endpoint("objects/list", self._handle_list)
webhooks.register_endpoint("objects/query", self._hand | le_query)
webhooks.register_endpoint("objects/subscribe", self._handle_subscribe)
def _handle_list(self, web_request):
objects = [n for n, o in self.printer.lookup_objects()
if hasattr(o, 'get_status')]
web_request.send({'objects': objects})
def _do_query(self, eventtime):
last_query = self.last_query
query = self.last_query = {}
msglist = self.pending_queries
self.pending_queries = []
msglist.extend(self.clients.values())
# Generate get_status() info for each client
for cconn, subscription, send_func, template in msglist:
is_query = cconn is None
if not is_query and cconn.is_closed():
del self.clients[cconn]
continue
# Query each requested printer object
cquery = {}
for obj_name, req_items in subscription.items():
res = query.get(obj_name, None)
if res is None:
po = self.printer.lookup_object(obj_name, None)
if po is None or not hasattr(po, 'get_status'):
res = query[obj_name] = {}
else:
res = query[obj_name] = po.get_status(eventtime)
if req_items is None:
req_items = list(res.keys())
if req_items:
subscription[obj_name] = req_items
lres = last_query.get(obj_name, {})
cres = {}
for ri in req_items:
rd = res.get(ri, None)
if is_query or rd != lres.get(ri):
cres[ri] = rd
if cres or is_query:
cquery[obj_name] = cres
# Send data
if cquery or is_query:
tmp = dict(template)
tmp['params'] |
# -*- coding: utf-8 -*-
import pytest
from django.core.urlresolvers import reverse
pytestmark = pytest.mark.django_db
global_footer_links = [
'About',
'Developers',
'Privacy',
'Report an issue',
]
def assert_title_and_links_on_page( | browser, url, title, links_text):
browser.visit(url)
assert title in browser.title
for link_text in links_text:
assert browser.find_link_by_text(link_text)
def test_homepage(browser):
url = reverse('pages:home')
assert_title_and_links_on_page(browser, url, "FossEvents", global_footer_links)
def test_about_page(browser):
url = reverse('pages:about')
assert_title_and_links_on_page(browser, url, "About", globa | l_footer_links)
def test_privacy_page(browser):
url = reverse('pages:privacy')
assert_title_and_links_on_page(browser, url, "Privacy", global_footer_links)
|
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
class MassMailController(http.Controller):
@http.route('/mail/track/<int:mail_id>/blank.gif', type='http', auth='none')
def track_mail_open(self, mail_id, **post):
""" Email trac | king. """
mail_mail_stats = request.registry.get('mail.mail.statistics')
mail_mail_stats.set_opened(request.cr, SUPERUSER_ID, mail_mail_ids=[mail_id])
response = werkzeug.wrappers.Response()
response.mimetype = 'image/gif'
res | ponse.data = 'R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='.decode('base64')
return response
@http.route(['/mail/mailing/<int:mailing_id>/unsubscribe'], type='http', auth='none')
def mailing(self, mailing_id, email=None, res_id=None, **post):
cr, uid, context = request.cr, request.uid, request.context
MassMailing = request.registry['mail.mass_mailing']
mailing_ids = MassMailing.exists(cr, SUPERUSER_ID, [mailing_id], context=context)
if not mailing_ids:
return 'KO'
mailing = MassMailing.browse(cr, SUPERUSER_ID, mailing_ids[0], context=context)
if mailing.mailing_model == 'mail.mass_mailing.contact':
list_ids = [l.id for l in mailing.contact_list_ids]
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('list_id', 'in', list_ids), ('id', '=', res_id), ('email', 'ilike', email)], context=context)
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
else:
email_fname = None
if 'email_from' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email_from'
elif 'email' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email'
if email_fname:
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('id', '=', res_id), (email_fname, 'ilike', email)], context=context)
if 'opt_out' in request.registry[mailing.mailing_model]._all_columns:
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
return 'OK'
|
on't need to add two
spaces at the end of a line.
"""
self.linebreak = re.compile(r'^ *\n(?!\s*$)')
self.text = re.compile(
r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)'
)
class InlineLexer(object):
"""Inline level lexer for inline grammars."""
default_features = [
'escape', 'autolink', 'url', 'tag',
'footnote', 'link', 'reflink', 'nolink',
'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
def __init__(self, renderer, rules=None, **kwargs):
self.renderer = renderer
self.links = {}
self.footnotes = {}
self.footnote_index = 0
if not rules:
rules = InlineGrammar()
self.rules = rules
self._in_link = False
self._in_footnote = False
def __call__(self, text):
return self.output(text)
def setup(self, links, footnotes):
self.footnote_index = 0
self.links = links or {}
self.footnotes = footnotes or {}
def output(self, text, features=None):
text = text.rstrip('\n')
if not features:
features = list(self.default_features)
if self._in_footnote and 'footnote' in features:
features.remove('footnote')
output = ''
def manipulate(text):
for key in features:
pattern = getattr(self.rules, key)
m = pattern.match(text)
if not m:
continue
self.line_match = m
out = getattr(self, 'output_%s' % key)(m)
if out is not None:
return m, out
return False
self.line_started = False
while text:
ret = manipulate(text)
self.line_started = True
if ret is not False:
m, out = ret
output += out
text = text[len(m.group(0)):]
continue
if text:
raise RuntimeError('Infinite loop at: %s' % text)
return output
def output_escape(self, m):
return m.group(1)
def output_autolink(self, m):
link = m.group(1)
if m.group(2) == '@':
is_email = True
else:
is_email = False
return self.renderer.autolink(link, is_email)
def output_url(self, m):
link = m.group(1)
if self._in_link:
return self.renderer.text(link)
return self.renderer.autolink(link, False)
def output_tag(self, m):
text = m.group(0)
lower_text = text.lower()
if lower_text.startswith('<a '):
self._in_link = True
if lower_text.startswith('</a>'):
self._in_link = False
return self.renderer.tag(text)
def output_footnote(self, m):
key = _keyify(m.group(1))
if key not in self.footnotes:
return None
if self.footnotes[key]:
return None
self.footnote_index += 1
self.footnotes[key] = self.footnote_index
return self.renderer.footnote_ref(key, self.footnote_index)
def output_link(self, m):
return self._process_link(m, m.group(2), m.group(3))
def output_reflink(self, m):
key = _keyify(m.group(2) or m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def output_nolink(self, m):
key = _keyify(m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def _process_link(self, m, link, title=None):
line = m.group(0)
text = m.group(1)
if line[0] == '!':
return self.renderer.image(link, title, text)
self._in_link = True
text = self.output(text)
self._in_link = False
return self.renderer.link(link, title, text)
def output_double_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.double_emphasis(text)
def output_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.emphasis(text)
def output_code(self, m):
text = m.group(2)
return self.renderer.codespan(text)
def output_linebreak(self, m):
return self.renderer.linebreak()
def output_strikethrough(self, m):
text = self.output(m.group(1))
return self.renderer.strikethrough(text)
def output_text(self, m):
text = m.group(0)
return self.renderer.text(text)
class Renderer(object):
"""The default HTML renderer for rendering Markdown.
"""
def __init__(self, **kwargs):
self.options = kwargs
def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
"""
if not lang:
code = escape(code, smart_amp=False)
return '<pre><code>%s</code></pre>\n' % code
code = escape(code, quote=True, smart_amp=False)
return '<pre><code class="lang-%s">%s</code></pre>\n' % (lang, code)
def block_quote(self, text):
"""Rendering <blockquote> with the given text.
:param text: text content of the blockquote.
"""
return '<blockquote>%s</blockquote>\n' % text
def block_html(self, html):
"""Rendering block level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('skip_style') and \
html.lower().startswith('<style'):
return ''
if self.options.get('escape'):
return escape(html)
return html
def header(self, text, level, raw=None):
"""Rendering header/heading tags like ``<h1>`` ``<h2>``.
:param text: rendered text content for the header.
:param level: a number for the header level, for example: 1.
:param raw: raw text content of the header.
"""
return '<h%d>%s</h%d>\n' % (level, text, level)
def hrule(self):
"""Rendering method for ``<hr>`` tag."""
if self.options.get('use_xhtml'):
return '<hr />\n'
return '<hr>\n'
def list(self, body, ordered=True):
"""Rendering list tags like ``<ul>`` and ``<ol>``.
:param body: body contents of the list.
:param ordered: whether this list is ordered or not.
"""
tag = 'ul'
if ordered:
tag = 'ol'
return '<%s>\n%s</%s>\n' % (tag, body, tag)
def list_item(self, text):
"""Rendering list item snippet. Like ``<li>``."""
return '<li>%s</li>\n' % text
def paragraph(self, text):
"""Rendering paragraph tags. Like ``<p>``."""
return '<p>%s</p>\n' % text
def table(self, header, body):
"""Rendering table element. Wrap header and body in it.
:param header: header part of the table.
:param body: body part of the table.
"""
return (
'<table>\n<thead>%s</thead>\n'
'<tbody>\n%s</tbody>\n</table>\n'
) % (header, body)
def table_row(self, content):
"""Rendering a table row. Like ``<tr>``.
:param cont | ent: content of current table row.
"""
return '<tr>\n%s</tr>\n' % content
def table_cell(self, content, **flags):
"""Rendering a table cell. Like ``<th>`` ``<td>``.
:param content: content of current table cell.
:param | header: whether this is header or not.
:param align: align of current table cell.
"""
if flags['header']:
tag = 'th'
else:
tag = 'td'
align = flags['align']
if not align:
return '<%s>%s</%s>\n' % (tag, content, tag)
return '<%s style="text-align:%s">%s</%s>\n' % (
|
# -*- coding: utf-8 -*-
#
# zambiaureport documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named | 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of sourc | e files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'zambiaureport'
copyright = u'2014, Andre Lesa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'zambiaureportdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'zambiaureport.tex', u'zambiaureport Documentation',
u'Andre Lesa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zambiaureport', u'zambiaureport Documentation',
[u'Andre Lesa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'zambiaureport', u'zambiaureport Documentation',
u'Andre Lesa', 'zambiaureport',
'Zambia U-Report reference implementation.','Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote' |
from .binary_searc | h_tree import BinarySe | archTree
|
# -*- coding: utf-8 -*
from distutils.core import setup
import os
PACKA | GE_NAME = "railgun"
def recurse(d):
ret = []
for f in os.listdir(d):
if f.startswith("."): continue
df = os.path.join(d, f)
if os.path.isfile(df):
ret.append(df)
elif f != "bu | ild":
ret += recurse(df)
return ret
def structure(fs):
s = {}
for f in fs:
d = os.path.dirname(f)
if not d.startswith("meta/"): continue
d = PACKAGE_NAME + d[4:]
v = s.get(d, [])
s[d] = v
v.append(f)
return s.items()
setup(name='docker-railgun',
version='0.1',
description='Self-organizing Docker-based container building and provisioning',
author='Rickard Petzäll',
author_email='rickard@evolviq.com',
url='https://github.com/evolvIQ/railgun',
packages=[PACKAGE_NAME, "%s.host_providers" % PACKAGE_NAME],
scripts=['bin/railgun'],
data_files=structure(recurse("meta"))
)
|
rn hasattr(os, 'getuid') and os.getuid() == 0
def determine_num_of_cpus():
''' Number of virtual or physical CPUs on this system '''
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
return 1
def to_winpath(path):
if path.startswith('/'):
path = '%s:%s' % (path[1], path[2:])
return path.replace('/', '\\')
def to_unixpath(path):
if path[1] == ':':
path = '/%s%s' % (path[0], path[2:])
return path
def to_winepath(path):
path = path.replace('/', '\\\\')
# wine maps the filesystem root '/' to 'z:\'
path = 'z:\\%s' % path
return path
def fix_winpath(path):
return path.replace('\\', '/')
def system_info():
'''
Get the sysem information.
Return a tuple with the platform type, the architecture and the
distribution
'''
# Get the platform info
platform = sys.platform
if platform.startswith('win'):
platform = Platform.WINDOWS
elif platform.startswith('darwin'):
platform = Platform.DARWIN
elif platform.startswith('linux'):
platform = Platform.LINUX
else:
raise FatalError(_("Platform %s not supported") % platform)
# Get the architecture info
if platform == Platform.WINDOWS:
platform_str = sysconfig.get_platform()
if platform_str in ['win-amd64', 'win-ia64']:
arch = Architecture.X86_64
else:
arch = Architecture.X86
else:
uname = os.uname()
arch = uname[4]
if arch == 'x86_64':
arch = Architecture.X86_64
elif arch.endswith('86'):
arch = Architecture.X86
else:
raise FatalError(_("Architecture %s not supported") % arch)
# Get the distro info
if platform == Platform.LINUX:
d = pplatform.linux_distribution()
if d[0] in ['Ubuntu', 'debian', 'LinuxMint']:
distro = Distro.DEBIAN
if d[2] in ['maverick', 'isadora']:
distro_version = DistroVersion.UBUNTU_MAVERICK
elif d[2] in ['lucid', 'julia']:
distro_version = DistroVersion.UBUNTU_LUCID
elif d[2] in ['natty', 'katya']:
distro_version = DistroVersion.UBUNTU_NATTY
elif d[2] in ['oneiric', 'lisa']:
distro_version = DistroVersion.UBUNTU_ONEIRIC
elif d[2] in ['precise', 'maya']:
distro_version = DistroVersion.UBUNTU_PRECISE
elif d[2] in ['quantal', 'nadia']:
distro_version = DistroVersion.UBUNTU_QUANTAL
elif d[2] in ['raring', 'olivia']:
distro_version = DistroVersion.UBUNTU_RARING
elif d[2] in ['saucy', 'petra']:
distro_version = DistroVersion.UBUNTU_SAUCY
elif d[2] in ['trusty', 'qiana']:
distro_version = DistroVersion.UBUNTU_TRUSTY
elif d[2] in ['utopic']:
distro_version = DistroVersion.UBUNTU_UTOPIC
elif d[1].startswith('6.'):
distro_version = DistroVersion.DEBIAN_SQUEEZE
elif d[1].startswith('7.') or d[1].startswith('wheezy'):
distro_version = DistroVersion.DEBIAN_WHEEZY
elif d[1].startswith('8.') or d[1].startswith('jessie'):
distro_version = DistroVersion.DEBIAN_JESSIE
else:
raise FatalError("Distribution '%s' not supported" % str(d))
elif d[0] in ['RedHat', 'Fedora', 'CentOS', 'Red Hat Enterprise Linux Server', 'CentOS Linux']:
distro = Distro.REDHAT
if d[1] == '16':
distro_version = DistroVersion.FEDORA_16
elif d[1] == '17':
distro_version = DistroVersion.FEDORA_17
elif d[1] == '18':
distro_version = DistroVersion.FEDORA_18
elif d[1] == '19':
distro_version = DistroVersion.FEDORA_19
elif d[1] == '20':
distro_version = DistroVersion.FEDORA_20
elif d[1] == '21':
distro_version = DistroVersion.FEDORA_21
elif d[1] == '22':
distro_version = DistroVersion.FEDORA_22
elif d[1].startswith('6.'):
distro_version = | DistroVersion.REDHAT_6
elif d[1].startswith('7.'):
distro_version = DistroVersion.REDHAT_7
| else:
# FIXME Fill this
raise FatalError("Distribution '%s' not supported" % str(d))
elif d[0].strip() in ['openSUSE']:
distro = Distro.SUSE
if d[1] == '12.1':
distro_version = DistroVersion.OPENSUSE_12_1
elif d[1] == '12.2':
distro_version = DistroVersion.OPENSUSE_12_2
elif d[1] == '12.3':
distro_version = DistroVersion.OPENSUSE_12_3
else:
# FIXME Fill this
raise FatalError("Distribution OpenSuse '%s' "
"not supported" % str(d))
else:
raise FatalError("Distribution '%s' not supported" % str(d))
elif platform == Platform.WINDOWS:
distro = Distro.WINDOWS
win32_ver = pplatform.win32_ver()[0]
dmap = {'xp': DistroVersion.WINDOWS_XP,
'vista': DistroVersion.WINDOWS_VISTA,
'7': DistroVersion.WINDOWS_7,
'post2008Server': DistroVersion.WINDOWS_8,
'8': DistroVersion.WINDOWS_8}
if win32_ver in dmap:
distro_version = dmap[win32_ver]
else:
raise FatalError("Windows version '%s' not supported" % win32_ver)
elif platform == Platform.DARWIN:
distro = Distro.OS_X
ver = pplatform.mac_ver()[0]
if ver.startswith('10.10'):
distro_version = DistroVersion.OS_X_YOSEMITE
elif ver.startswith('10.9'):
distro_version = DistroVersion.OS_X_MAVERICKS
elif ver.startswith('10.8'):
distro_version = DistroVersion.OS_X_MOUNTAIN_LION
else:
raise FatalError("Mac version %s not supported" % ver)
num_of_cpus = determine_num_of_cpus()
return platform, arch, distro, distro_version, num_of_cpus
def validate_packager(packager):
# match packager in the form 'Name <email>'
expr = r'(.*\s)*[<]([a-zA-Z0-9+_\-\.]+@'\
'[0-9a-zA-Z][.-0-9a-zA-Z]*.[a-zA-Z]+)[>]$'
return bool(re.match(expr, packager))
def copy_files(origdir, destdir, files, extensions, target_platform):
for f in files:
f = f % extensions
install_dir = os.path.dirname(os.path.join(destdir, f))
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if destdir[1] == ':':
# windows path
relprefix = to_unixpath(destdir)[2:]
else:
relprefix = destdir[1:]
orig = os.path.join(origdir, relprefix, f)
dest = os.path.join(destdir, f)
m.action("copying %s to %s" % (orig, dest))
try:
shutil.copy(orig, dest)
except IOError:
m.warning("Could not copy %s to %s" % (orig, dest))
def remove_list_duplicates(seq):
''' Remove list duplicates maintaining the order '''
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def parse_file(filename, dict):
try:
execfile(filename, dict)
except Exception, ex:
import traceback
traceback.print_exc()
raise ex
def escape_path(path):
path = path.replace('\\', '/')
path = path.replace('(', '\\\(').replace(')', '\\\)')
path = path.replace(' ', '\\\\ ')
return path
def get_wix_prefix():
if 'WIX' in os.environ:
wix_prefix = os.path.join(os.environ['WIX'], 'bin')
else:
wix_prefix = 'C:/Program Files%s/Windows Installer XML v3.5/bin'
if not os.path.exists(wix_prefix):
wix_prefix = wix_prefix % ' (x86)'
if not os.path.exists(wix_prefix):
raise FatalError("The required |
from __future__ import unicode_literals
from django.test import TestCase
from accelerator.tests.factories import ProgramPartnerTypeFactory
|
class TestProgramPartnerType(TestCase):
def test_str(self):
program_partner_type = ProgramPartnerTypeFactory()
assert program_partner_ | type.partner_type in str(program_partner_type)
assert program_partner_type.program.name in str(program_partner_type)
|
ors
frameRotor = Frame(frame, background='white')
var4 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var4, width=44)
var4.set("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]")
spinbox.grid(row=0, column=1)
var5 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var5, width=4 | 4)
var5.set("rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]")
spin | box.grid(row=1, column=1)
var6 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var6, width=44)
var6.set("rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]")
spinbox.grid(row=2, column=1)
var7 = StringVar()
spinbox = Spinbox(frameRotor, values = ("reflec=[Y,R,U,H,Q,S,L,D,P,X,N,G,O,K,M,I,E,B,F,Z,C,W,V,J,A,T]"), textvariable=var7, width=44)
var7.set("reflec=[Y,R,U,H,Q,S,L,D,P,X,N,G,O,K,M,I,E,B,F,Z,C,W,V,J,A,T]")
spinbox.grid(row=3, column=1)
rotorn1 = Label(frameRotor, text='Slot n°=1:', padx=10, pady=5, background="white")
rotorn1.grid(row=0, column=0)
rotorn2 = Label(frameRotor, text='Slot n°=2:', padx=10, pady=5, background="white")
rotorn2.grid(row=1, column=0)
rotorn3 = Label(frameRotor, text='Slot n°=3:', padx=10, pady=5, background="white")
rotorn3.grid(row=2, column=0)
reflectorn = Label(frameRotor, text='Reflector:', padx=10, pady=5, background="white")
reflectorn.grid(row=3, column=0)
frameRotor.pack()
#frame_to_set_rotor_position
frame1 = Frame(frame, borderwidth=0, relief=FLAT, background='white')
frame1.pack(side=TOP, padx=10, pady=10)
def update1(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab1.configure(text='position : {}'.format(alphabetList[x-1]))
def update2(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab2.configure(text='position : {}'.format(alphabetList[x-1]))
def update3(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab3.configure(text='position : {}'.format(alphabetList[x-1]))
rotor1lab = Label(frame1, text='Rotor 1', padx=10, pady=5, background="white")
rotor1lab.grid(row=0, column=0)
rotor2lab = Label(frame1, text='Rotor 2', padx=10, pady=5, background="white")
rotor2lab.grid(row=0, column=1)
rotor3lab = Label(frame1, text='Rotor 3', padx=10, pady=5, background="white")
rotor3lab.grid(row=0, column=2)
#scales_choose_position
var1 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var1, cursor='dot', showvalue=0, command=update1, length= 100, background="white")
scale.grid(row=1, column=0, padx=60, pady=10)
var2 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var2, cursor='dot', showvalue=0, command=update2, length= 100, background="white")
scale.grid(row=1, column=1, padx=60, pady=10)
var3 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var3, cursor='dot', showvalue=0, command=update3, length= 100, background="white")
scale.grid(row=1, column=2, padx=60, pady=10)
lab1 = Label(frame1, background="white")
lab1.grid(row=2, column=0)
lab2 = Label(frame1, background="white")
lab2.grid(row=2, column=1)
lab3 = Label(frame1, background="white")
lab3.grid(row=2, column=2)
#function_code
def code(event=None):
a = int(var1.get())
b = int(var2.get())
c = int(var3.get())
def rotationRotor(liste1):
liste1.append(liste1[0])
del liste1[0]
return liste1
def estValide(liste1):
if liste1 == []:
return False
for elt in liste1:
if alphabetList.count(elt.upper()) < 1:
return False
return True
sortie = entryvar.get()
var4str = var4.get()
var4list = list(var4str)
var5str = var5.get()
var5list = list(var5str)
var6str = var6.get()
var6list = list(var6str)
if var4list[5] == '1':
rotor1 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var4list[5] == '2':
rotor1 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var4list[5] == '3':
rotor1 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
if var5list[5] == '1':
rotor2 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var5list[5] == '2':
rotor2 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var5list[5] == '3':
rotor2 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
if var6list[5] == '1':
rotor3 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var6list[5] == '2':
rotor3 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var6list[5] == '3':
rotor3 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',' ']
alphabetDict = {'G': 7, 'U': 21, 'T': 20, 'L': 12, 'Y': 25, 'Q': 17, 'V': 22, 'J': 10, 'O': 15, 'W': 23, 'N': 14, 'R': 18, 'Z': 26, 'S': 19, 'X': 24, 'A': 1, 'M': 13, 'E': 5, 'D': 4, 'I': 9, 'F': 6, 'P': 16, 'B': 2, 'H': 8, 'K': 11, 'C': 3}
#rotor1 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
#rotor2 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
#rotor3 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
reflector = ['Y', 'R', 'U', 'H', 'Q', 'S', 'L', 'D', 'P', 'X', 'N', 'G', 'O', 'K', 'M', 'I', 'E', 'B', 'F', 'Z', 'C', 'W', 'V', 'J', 'A', 'T']
for loop1 in range(a):
rotationRotor(rotor1)
for loop2 in range(b):
rotationRotor(rotor2)
for loop3 in range(c):
rotationRotor(rotor3)
sortieListe = list(sortie)
print(sortieListe)
sortieListe = [x for x in sortieListe if x != " "]
print(sortieListe)
if not estValide(sortieListe):
value = StringVar()
value.set('Please enter only letters and spaces!')
liste.insert(END, value.get())
liste.itemconfig(END, {'bg':'red'})
liste.see("end")
elif (var4list[5] == var5list[5] == var6list[5]) or (var4list[5] == var5list[5]) or (var4list[5] == var6list[5]) or (var5list[5] == var6list[5]):
value = StringVar()
value.set('You can only use a rotor once!')
liste.insert(END, value.get())
liste.itemconfig(END, {'bg':'red'})
liste.see("end")
else:
s = []
for i in range(0,len(sortieListe),1):
a = alphabetDict[sortieListe[i].upper()]
b = rotor1[a-1]
c = alphabetDict[b]
d = rotor2[c-1]
e = alphabetDict[d]
f = rotor3[e-1]
g = alphabetDict[f]
h = reflector[g-1]
j = rotor3.index(h)
k = alphabetList[j]
l = rotor2.index(k)
m = alphabetList[l]
n = rotor1.index(m)
o = alphabetList[n]
|
# encoding: utf-8
from yast import import_module
import_module('U | I')
from yast import *
class Frame1Client:
def main(self):
UI.OpenDialog(
VBox(
Frame("Hey! I&mportant!", Label("Hello, World!")),
PushBu | tton("&OK")
)
)
UI.UserInput()
UI.CloseDialog()
Frame1Client().main()
|
import django_filters
from django_filters import r | est_framework as filters
from django_rv_apps.apps.believe_his_prophets.models.book import Book
from django_rv_apps.apps.believe_his_prophets.models.bible_read import BibleRead
from django_rv_apps.apps.believe_his_prophets.models.te | stament import Testament
class BookFilter(django_filters.FilterSet):
testament = filters.ModelChoiceFilter(
queryset=Testament.objects.all())
class Meta:
model = Book
fields = ('id', 'testament',
'book_order')
|
s required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the aggrega | tes admin api."""
from webob import exc
from nova.api.openstack.compute.contrib import aggregates
from nova import context
fr | om nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
AGGREGATE_LIST = [
{"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
{"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
{"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
{"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1",
"metadata": {"foo": "bar"},
"hosts": ["host1, host2"]}
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class AggregateTestCase(test.NoDBTestCase):
"""Test Case for aggregates admin api."""
def setUp(self):
super(AggregateTestCase, self).setUp()
self.controller = aggregates.AggregateController()
self.req = FakeRequest()
self.user_req = fakes.HTTPRequest.blank('/v2/os-aggregates')
self.context = self.req.environ['nova.context']
def test_index(self):
def stub_list_aggregates(context):
if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
stub_list_aggregates)
result = self.controller.index(self.req)
self.assertEqual(AGGREGATE_LIST, result["aggregates"])
def test_index_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index,
self.user_req)
def test_create(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual("nova1", availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.user_req,
{"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.InvalidAggregateAction(action='create_aggregate',
aggregate_id="'N/A'",
reason='invalid zone')
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exception.InvalidAggregateAction,
self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
def test_create_with_no_aggregate(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"foo":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_no_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"foo": "test",
"availability_zone": "nova1"}})
def test_create_with_no_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertIsNone(availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req,
{"aggregate": {"name": "test"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_null_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "",
"availability_zone": "nova1"}})
def test_create_with_name_too_long(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "x" * 256,
"availability_zone": "nova1"}})
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
availability_zone="nova1",
foo='bar'))
def test_show(self):
def stub_get_aggregate(context, id):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", id, "id")
return AGGREGATE
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
aggregate = self.controller.show(self.req, "1")
self.assertEqual(AGGREGATE, aggregate["aggregate"])
def test_show_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show,
self.user_req, "1")
def test_show_with_invalid_id(self):
def stub_get_aggregate(context, id):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, "2")
def test_update(self):
body = {"aggregate": {"name": "new_name",
"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual(body["aggregate"], values, "values")
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_no_admin( |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# | published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY | ; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Deposit API."""
from __future__ import absolute_import, print_function
from flask import current_app
from invenio_pidstore.models import PersistentIdentifier
from invenio_indexer.api import RecordIndexer
from invenio_deposit.receivers import \
index_deposit_after_publish as original_index_deposit_after_publish
from invenio_jsonschemas import current_jsonschemas
from .api import Project
from .tasks import datacite_register
def index_deposit_after_publish(sender, action=None, pid=None, deposit=None):
"""Index the record after publishing."""
project_schema = current_jsonschemas.path_to_url(Project._schema)
if deposit['$schema'] == project_schema:
if action == 'publish':
# index videos (records)
pid_values = Project(data=deposit).video_ids
ids = [str(p.object_uuid)
for p in PersistentIdentifier.query.filter(
PersistentIdentifier.pid_value.in_(pid_values)).all()]
# index project (record)
_, record = deposit.fetch_published()
ids.append(str(record.id))
RecordIndexer().bulk_index(iter(ids))
else:
original_index_deposit_after_publish(sender=sender, action=action,
pid=pid, deposit=deposit)
def datacite_register_after_publish(sender, action=None, pid=None,
deposit=None):
"""Mind DOI with DataCite after the deposit has been published."""
if action == "publish" and \
current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']:
recid_pid, record = deposit.fetch_published()
datacite_register.delay(recid_pid.pid_value, str(record.id))
|
e given filename and retrieve registry settings for me"""
return self.inventory.loadConfiguration(filename)
def updateConfiguration(self, registry):
"""load the user setting in <registry> into my inventory"""
return self.inventory.updateConfiguration(registry)
def applyConfiguration(self):
"""transfer user settings to my inventory"""
# apply user settings to my properties
up, uc = self.configureProperties()
unknownProperties = up
unknownComponents = uc
# apply user settings to my components
up, uc = self.configureComponents()
unknownProperties += up
unknownComponents += uc
# give descendants a chance to adjust to configuration changes
self._configure()
return (unknownProperties, unknownComponents)
def configureProperties(self):
"""set the values of all the properties and facilities in my inventory"""
up, uc = self.inventory.configureProperties()
return self._claim(up, uc)
def configureComponents(self):
"""guide my subcomponents through the configuration process"""
up, uc = self.inventory.configureComponents()
return self._claim(up, uc)
def getDepositories(self):
return self.inventory.getDepositories()
# single component management
def retrieveComponent(self, name, factory, args=(), encoding='odb', vault=[], extras=[]):
"""retrieve component <name> from the persistent store"""
return self.inventory.retrieveComponent(name, factory, args, encoding, vault, extras)
def configureComponent(self, component, registry=None):
"""guide <component> through the configuration process"""
up, uc = self.inventory.configureComponent(component, registry)
return up, uc
# curator accessors
def getCurator(self):
"""return my persistent store manager"""
return self.inventory.getCurator()
def setCurator(self, curator):
"""set my persistent store manager"""
return self.inventory.setCurator(curator)
# accessors for the inventory items by category
def properties(self):
"""return a list of all the property objects in my inventory"""
return self.inventory.properties()
def facilities(self):
"""return a list of all the facility objects in my inventory"""
return self.inventory.facilities()
def components(self):
"""return a list of all the components in my inventory"""
return self.inventory.components()
# access to trait values and descriptors by name
# used by clients that obtain a listing of these names
# and want to access the underlying objects
def getTraitValue(self, name):
try:
return self.inventory.getTraitValue(name)
except KeyError:
pass
raise AttributeError("object '%s' of type '%s' has no attribute '%s'" % (
self.name, self.__class__.__name__, name))
def getTraitDescriptor(self, name):
try:
return self.inventory.getTraitDescriptor(name)
except KeyError:
pass
raise AttributeError("object '%s' of type '%s' has no attribute '%s'" % (
self.name, self.__class__.__name__, name))
# support for the help facility
def showProperties(self):
"""print a report describing my properties"""
facilityNames = self.inventory.facilityNames()
propertyNames = self.inventory.propertyNames()
propertyNames.sort()
print "properties of %r:" % self.name
for name in propertyNames:
if name in facilityNames:
continue
# get the trait object
trait = self.inventory.getTrait(name)
# get the common trait attributes
traitType = trait.type
default = trait.default
meta = trait.meta
validator = trait.validator
try:
tip = meta['tip']
except KeyError:
tip = '(no documentation available)'
# get the trait descriptor from the instance
descriptor = self.inventory.getTraitDescriptor(name)
# extract the instance specific values
value = descriptor.value
locator = descriptor.locator
print " %s=<%s>: %s" % (name, traitType, tip)
print " default value: %r" % default
print " current value: %r, from %s" % (value, locator)
if validator:
print " validator: %s" % validator
return
def showComponents(self):
facilityNames = self.inventory.facilityNames()
facilityNames.sort()
print "facilities of %r:" % self.name
for name in facilityNames:
# get the facility object
facility = self.inventory.getTrait(name)
meta = facility.meta
try:
tip = meta['tip']
except KeyError:
tip = '(no documentation available)'
# get the trait descriptor from the instance
descriptor = self.inventory.getTraitDescriptor(name)
# extract the instance specific values
value = descriptor.value
locator = descriptor.locator
print " %s=<component name>: %s" % (name, tip)
print " current value: %r, from %s" % (value.name, locator)
print " configurable as: %s" % ", ".join(value.aliases)
return
def showUsage(self):
"""print a high level usage screen"""
propertyNames = self.inventory.propertyNames()
propertyNames.sort()
facilityNames = self.inventory.facilityNames()
facilityNames.sort()
print "component %r" % self.name
if propertyNames:
print " properties:", ", ".join(propertyNames)
if facilityNames:
print " facilities:", ",".join(facilityName | s)
print "For more information:"
print " --help-properties: prints details about user settable properties"
print " --help-components: prints details about user settable facilities and components"
return
def showCurator(self):
"""print a description of | the manager of my persistence store"""
self.inventory.dumpCurator()
return
# default implementations of the various factories
def createRegistry(self, name=None):
"""create a registry instance to store my configuration"""
if name is None:
name = self.name
import pyre.inventory
return pyre.inventory.registry(name)
def createInventory(self):
"""create my inventory instance"""
return self.Inventory(self.name)
def __init__(self, name):
Traceable.__init__(self)
self.name = name
self.inventory = self.createInventory()
# other names by which I am known for configuration purposes
self.aliases = [ name ]
import journal
self._info = journal.info(name)
self._debug = journal.debug(name)
# modify the inventory defaults that were hardwired at compile time
# gives derived components an opportunity to modify their default behavior
# from what was inherited from their parent's inventory
self._defaults()
return
# default implementations for the lifecycle management hooks
def _defaults(self):
"""modify the default inventory values"""
return
def _configure(self):
"""modify the configuration programmatically"""
return
def _init(self):
"""wake up"""
return
def _fini(self):
"""all done"""
return
# misc
def _claim(self, up, uc):
"""decorate the missing traits with my name"""
rup = [ (self.name + '.' + key, value, locator) for key, value, locator in up ]
ruc = [ self.name + '.' + key for ke |
import synapse.tests.utils as s_t_utils
class UsGovTest(s_t_utils.SynTest):
async def test_models_usgov_cage(self):
async with self.getTestCore() as core:
input_props = {
'street': '123 Main St',
'city': 'Smallville',
'state': 'Kansas',
'zip': 12345,
'cc': 'US',
'country': 'United States of America',
'phone0': '17035551212',
'phone1': 17035551213,
'name0': 'Kent Labs',
}
expected_props = {
'street': '123 main st',
'city': 'smallville',
'state': 'kansas',
'zip': 12345,
'cc': 'us',
'country': 'united states of america',
| 'phone0': '17035551212',
'phone1': '17035551213',
'name0': 'kent labs',
}
formname = 'gov:us:cage'
valu = '7qe71'
expected_ndef = (formname, va | lu)
async with await core.snap() as snap:
n0 = await snap.addNode(formname, valu.upper(), input_props)
self.eq(n0.ndef, expected_ndef)
for prop, valu in expected_props.items():
self.eq(n0.get(prop), valu)
|
self, message):
console = {}
console['type'] = 'console'
console['identifier'] = self.identifier
console['message'] = message
self.write_message(json.dumps(console))
def sendPath(self, message):
console = {}
console['type'] = 'path'
console['identifier'] = self.identifier
console['message'] = message
self.write_message(json.dumps(console))
def open(self):
print 'new connection'
self.identifier = str(int(time.time()))
system_connection[self.identifier] = self
m = createmessage('user', AG_COORDINATOR, 'send_message', "new_connection(%s)" % self.identifier)
wrm = write_message(m)
sock.send(wrm)
self.sendConsoleMessage('System Ready')
def on_message(self, message):
print message
jsonmessage = json.loads(message)
# print jsonmessage
#
# m = createmessage(jsonmessage['sender'], jsonmessage['destination'], jsonmessage['typefunc'], jsonmessage['message'])
# print m
# wrm = write_message(m)
# print 'message received %s' % message
# print wrm
# sock.send(wrm)
# self.write_message(wrm)
def on_close(self):
print 'connection closed'
system_connection.pop(self.identifier)
class MainHandler(tornado.web.RequestHandler):
def get(self):
try:
with open(os.path.join(root, 'knight' + os.sep + 'index.html')) as f:
self.write(f.read())
except IOError as e:
self.write("404: Not Found")
class PlanHandler(tornado.web.RequestHandler):
def prepare(self):
if self.request.headers["Content-Type"].startswith("application/json"):
self.json_args = json.loads(self.request.body)
else:
self.json_args = None
def post(self):
identifier = self.json_args.get('identifier')
forbidden = self.json_args.get('forbidden')
mandatory = self.json_args.get('mandatory')
size = self.json_args.get('size')
f = open('dlvprogram/instance.dl', 'w')
f.write('size(%s). ' % size)
for forb in forbidden:
f.write("forbidden(%s,%s). " % (forb.get('x'), forb.get('y')))
for mark in mandatory:
f.write("must_reach(%s,%s). " % (mark.get('x'), mark.get('y')))
f.close()
m = "instanceReady(%s, %s)" % (size, len(forbidden))
m = createmessage('user', AG_COORDINATOR, 'send_message', m)
wrm = write_message(m)
sock.send(wrm)
time.sleep(0.2)
for forb in forbidden:
mess = "forbidden_of_problem([%s,%s])" % (forb.get('x'), forb.get('y'))
m = createmessage('user', AG_COORDINATOR, 'send_message', mess)
wrm = write_message(m)
sock.send(wrm)
time.sleep(0.2)
system_connection[identifier].sendConsoleMessage('Request sent to system')
class ResetHandler(tornado.web.RequestHandler):
def prepare(self):
if self.request.headers["Content-Type"].startswith("application/json"):
self.json_args = json.loads(self.request.body)
else:
self.json_args = None
def post(self):
identifier = self.json_args.get('identifier')
m = createmessage('user', AG_COORDINATOR, 'send_message', "new_connection(%s)" % identifier)
wrm = write_message(m)
sock.send(wrm)
application = tornado.web.Application([
(r'/ws', WSHandler),
(r"/", MainHandler),
(r"/api/plan", PlanHandler),
(r"/api/reset", ResetHandler),
(r"/(.*)", tornado.web.StaticFileHandler, dict(path=root)),
])
temporaryresult = None
class DALI(protocol.Protocol):
def notifyFailure(self):
message = 'problem_failed(%s)' % self.currentproblem
m = createmessage('user', AG_METAPLANNER, 'send_message', message)
| wrm = write_message(m)
| sock.send(wrm)
def checkPlan(self):
if not self.planner.is_alive():
print 'DLV ended.'
try:
self.planner.readresult()
global temporaryresult
temporaryresult = self.planner.getresult()
if self.currentproblem == 1:
system_connection[self.identifier].sendConsoleMessage(
'Hamiltonian Tour Problem has found a solution')
elif self.currentproblem == 2:
system_connection[self.identifier].sendConsoleMessage('Weak Constraint Problem has found a solution')
elif self.currentproblem == 3:
system_connection[self.identifier].sendConsoleMessage('With Blank Problem has found a solution')
message = 'new_moves_for_evaluate(%s)' % len(temporaryresult)
m = createmessage('user', AG_METAPLANNER, 'send_message', message)
wrm = write_message(m)
sock.send(wrm)
system_connection[self.identifier].sendConsoleMessage('Plan sent to MAS')
except:
self.notifyFailure()
else:
print 'DLV is alive'
dt = time.time() - self.t0
print dt, 'secs elapsed'
if dt > TMAX:
self.planner.terminate()
print 'DLV terminated'
self.notifyFailure()
threading.Timer(1, self.checkPlan).start()
def makePlan(self, problem):
path = "dlvprogram" + os.sep + "problem%s.dl" % problem
self.currentproblem = problem
self.planner = AspSolver("dlvprogram" + os.sep + "instance.dl", path)
self.planner.run()
self.t0 = time.time()
time.sleep(5)
threading.Timer(1, self.checkPlan).start()
def dataReceived(self, data):
# print 'data', data
fs = data.split('_.._')
identifier = fs[1]
self.identifier = identifier
if len(fs) > 3:
cmd = fs[2]
if cmd == 'path':
strJSONPath = fs[3]
print strJSONPath
system_connection[identifier].sendPath(strJSONPath)
elif cmd == 'as':
state = fs[3]
system_connection[identifier].sendConsoleMessage('State of agent: ' + str(state))
elif len(fs) > 2:
cmd = fs[2]
if cmd == 'pr':
system_connection[identifier].sendConsoleMessage('Plan Received From MAS')
elif cmd == 'ss1':
self.makePlan(1)
system_connection[identifier].sendConsoleMessage('Testing problem Hamiltonian Tour')
elif cmd == 'ss2':
self.makePlan(2)
system_connection[identifier].sendConsoleMessage('Testing problem Weak Constraint')
elif cmd == 'ss3':
system_connection[identifier].sendConsoleMessage('Trivial Solution')
elif cmd == 'ss4':
self.makePlan(3)
system_connection[identifier].sendConsoleMessage('Testing problem must reach')
elif cmd == 'pf1':
system_connection[identifier].sendConsoleMessage('Hamiltonian Tour Failed')
elif cmd == 'pf2':
system_connection[identifier].sendConsoleMessage('Weak Constraint Failed')
elif cmd == 'pf3':
system_connection[identifier].sendConsoleMessage('Blank Failed')
elif cmd == 'pft':
system_connection[identifier].sendConsoleMessage('Weak Constraint is not optimal')
elif cmd == 'rs':
system_connection[identifier].sendConsoleMessage('State of agent: 0')
elif cmd == 'smr':
for mv in temporaryresult:
mv = mv[5:-1]
x1, y1, x2, y2 = mv.split(',')
message = 'moves_for_evaluate([%s,%s,%s,%s])' % (x1, y1, x2, y2)
|
xt displayed before user input, cannot be edited by user (this can be changed later via the prefix property)
:param font: the font to use
:param pt_size: the point size of the text to draw
:param color: color of the font for this widget
:param aspect: constrain the widget size to a specified aspect ratio
:param size: a tuple containing the width and height
:param pos: a tuple containing the x and y position
:param sub_theme: name of a sub_theme defined in the theme file (similar to CSS classes)
:param options: various other options
"""
Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)
self.text_prefix = prefix
self.pos = len(text)
self.input_options = input_options
self.colors = {}
#create widgets
self.frame = Frame(self, size=[1, 1], options=BGUI_NO_FOCUS | BGUI_DEFAULT | BGUI_CENTERY)
self.highli | ght = Frame(self, size=self.frame.size, border=0, options=BGUI_NO_FOCUS | BGUI_CENTERY | BGUI_NO_NORMALIZE)
self.cursor = Frame(self, size=[1, 1], border=0, options=BGUI_NO_FOCUS | BGUI_CENTERY | BGUI_NO_NORMALIZE)
self.label = Label(self, text=text, font=font, pt_size=pt_size, sub_theme=self.theme['LabelSubTheme'], options=BGU | I_NO_FOCUS | BGUI_DEFAULT)
#Color and setting initialization
self.colormode = 0
theme = self.theme
self.colors["text"] = [None, None]
self.colors["text"][0] = theme['InactiveTextColor']
self.colors["text"][1] = theme['TextColor']
self.colors["frame"] = [None, None]
self.colors["frame"][0] = theme['InactiveFrameColor']
self.colors["frame"][1] = theme['FrameColor']
self.colors["border"] = [None, None]
self.colors["border"][0] = theme['InactiveBorderColor']
self.colors["border"][1] = theme['BorderColor']
self.colors["highlight"] = [None, None]
self.colors["highlight"][0] = theme['HighlightColor']
self.colors["highlight"][1] = theme['HighlightColor']
self.border_size = [None, None]
self.border_size[0] = theme['InactiveBorderSize']
self.border_size[1] = theme['BorderSize']
self.swapcolors(0)
#gauge height of the drawn font
fd = self.system.textlib.dimensions(self.label.fontid, "Egj/}|^,")
py = .5 - (fd[1] / self.size[1] / 2)
px = fd[1] / self.size[0] - fd[1] / 1.5 / self.size[0]
self.label.position = [px, py]
self.fd = self.system.textlib.dimensions(self.label.fontid, self.text_prefix)[0] + fd[1] / 3.2
self.frame.size = [1, 1]
self.frame.position = [0, 0]
self.slice = [len(text), len(text)]
self.slice_direction = 0
self.mouse_slice_start = 0
self.mouse_slice_end = 0
#create the char width list
self._update_char_widths()
#initial call to update_selection
self.selection_refresh = 1
self.just_activated = 0
self._active = 0 # internal active state to avoid confusion from parent active chain
#blinking cursor
self.time = time.time()
#double/triple click functionality
self.click_counter = 0
self.single_click_time = 0.0
self.double_click_time = 0.0
# On Enter callback
self._on_enter_key = None
@property
def text(self):
return self.label.text
@text.setter
def text(self, value):
#setter intended for external access, internal changes can just change self.label.text
self.label.text = value
self._update_char_widths()
self.slice = [0, 0]
self.update_selection()
@property
def prefix(self):
return self.text_prefix
@prefix.setter
def prefix(self, value):
self.fd = self.system.textlib.dimensions(self.label.fontid, value)[0] + fd[1] / 3.2
self.text_prefix = value
@property
def on_enter_key(self):
"""A callback for when the enter key is pressed while the TextInput has focus"""
return self._on_enter_key
@on_enter_key.setter
def on_enter_key(self, value):
self._on_enter_key = WeakMethod(value)
#utility functions
def _update_char_widths(self):
self.char_widths = []
for char in self.text:
self.char_widths.append(self.system.textlib.dimensions(self.label.fontid, char * 20)[0] / 20)
def select_all(self):
"""Change the selection to include all of the text"""
self.slice = [0, len(self.text)]
self.update_selection()
def select_none(self):
"""Change the selection to include none of the text"""
self.slice = [0, 0]
self.update_selection()
#Activation Code
def activate(self):
if self.frozen:
return
self.system.focused_widget = self
self.swapcolors(1)
self.colormode = 1
if self.input_options & BGUI_INPUT_SELECT_ALL:
self.slice = [0, len(self.text)]
self.slice_direction = -1
self.just_activated = 1
self._active = 1
def deactivate(self):
self.system.focused_widget = self.system
self.swapcolors(0)
self.colormode = 0
self.just_activated = 0
self._active = 0
def swapcolors(self, state=0): # 0 inactive 1 active
self.frame.colors = [self.colors["frame"][state]] * 4
self.frame.border = self.border_size[state]
self.frame.border_color = self.colors["border"][state]
self.highlight.colors = [self.colors["highlight"][state]] * 4
self.label.color = self.colors["text"][state]
if state == 0:
self.cursor.colors = [[0.0, 0.0, 0.0, 0.0]] * 4
else:
self.cursor.colors = [self.colors["text"][state]] * 4
#Selection Code
def update_selection(self):
left = self.fd + self.system.textlib.dimensions(self.label.fontid, self.text[:self.slice[0]])[0]
right = self.fd + self.system.textlib.dimensions(self.label.fontid, self.text[:self.slice[1]])[0]
self.highlight.position = [left, 1]
self.highlight.size = [right - left, self.frame.size[1] * .8]
if self.slice_direction in [0, -1]:
self.cursor.position = [left, 1]
else:
self.cursor.position = [right, 1]
self.cursor.size = [2, self.frame.size[1] * .8]
def find_mouse_slice(self, pos):
cmc = self.calc_mouse_cursor(pos)
mss = self.mouse_slice_start
self.mouse_slice_end = cmc
if cmc < mss:
self.slice_direction = -1
self.slice = [self.mouse_slice_end, self.mouse_slice_start]
elif cmc > mss:
self.slice_direction = 1
self.slice = [self.mouse_slice_start, self.mouse_slice_end]
else:
self.slice_direction = 0
self.slice = [self.mouse_slice_start, self.mouse_slice_start]
self.selection_refresh = 1
def calc_mouse_cursor(self, pos):
adj_pos = pos[0] - (self.position[0] + self.fd)
find_slice = 0
i = 0
for entry in self.char_widths:
if find_slice + entry > adj_pos:
if abs((find_slice + entry) - adj_pos) >= abs(adj_pos - find_slice):
return i
else:
return i + 1
else:
find_slice += entry
i += 1
self.time = time.time() - 0.501
return i
def _handle_mouse(self, pos, event):
"""Extend function's behaviour by providing focus to unfrozen inactive TextInput,
swapping out colors.
"""
if self.frozen:
return
if event == BGUI_MOUSE_CLICK:
self.mouse_slice_start = self.calc_mouse_cursor(pos)
if not self._active:
self.activate()
if not self.input_options & BGUI_INPUT_SELECT_ALL:
self.find_mouse_slice(pos)
elif event == BGUI_MOUSE_ACTIVE:
if not self.just_activated or self.just_activated and not self.input_options & BGUI_INPUT_SELECT_ALL:
self.find_mouse_slice(pos)
if event == BGUI_MOUSE_RELEASE:
self.selection_refresh = 1
if self.slice[0] == self.slice[1]:
self.slice_direction = 0
self.just_activated = 0
#work out single / double / triple clicks
if self.click_counter == 0:
self.single_click_time = time.time()
self.click_counter = 1
elif self.click_counter == 1:
if time.time() - self.single_click_time < .2:
self.click_counter = 2
self.double_click_time = time.time()
words = self.text.split(" ")
i = 0
for entry in words:
if self.slice[0] < i + len(entry):
self.slice = [i, i + len(entry) + 1]
break
i += len(entry) + 1
else:
self.click_counter = 1
self.single_click_time = time.time()
elif self.click_counter == 2:
if time.time() - self.double_click_time < .2:
self.click_counter = 3
self.slice = [0, len(self.text)]
self.slice_direction = -1
else:
self.click_counter = 1
self.single_click_time = time.time()
elif self.click_counter == 3:
self.single_click_time = time.time()
s |
}
# Athena query statement length limit
MAX_QUERY_LENGTH = 262144
def add_partition_statements(partitions, bucket, table_name):
"""Generate ALTER TABLE commands from existing partitions. It wil yield Athena
statement string(s), the length of each string should be less than Athena query
statement length limit, 262144 bytes.
https://docs.aws.amazon.com/athena/latest/ug/service-limits.html
Args:
partitions (set): The unique set of partitions gathered from Athena
bucket (str): The bucket name
table_name (str): The name of the Athena table
Yields:
string: The ALTER TABLE statements to add the new partitions
"""
# Each add partition statement starting with "ALTER TABLE"
initial_statement = 'ALTER TABLE {} ADD IF NOT EXISTS'.format(table_name)
initial_statement_len = len(initial_statement)
# The statement will be stored in a list of string format before join into a string
statement = [initial_statement]
statement_len = initial_statement_len
fmt_values = {
'bucket': bucket,
'table_name': table_name
}
for partition in sorted(partitions):
parts = PARTITION_PARTS.match(partition)
if not parts:
continue
fmt_values.update(parts.groupdict())
partition_stmt = PARTITION_STMT.format(**fmt_values)
partition_stmt_len = len(partition_stmt)
# It will add a space between sub strings when join the whole statement
space_count = len(statement)
# Monitor the lenght of whole statement and make sure it won't exceed the limit
if statement_len + partition_stmt_len + space_count >= MAX_QUERY_LENGTH:
# If the length of whole statement about to exceed the limit, yield
# the statement and reset it for rest of partitions
yield ' '.join(statement)
statement = [initial_statement]
statement_len = initial_statement_len
statement_len += partition_stmt_len
statement.append(partition_stmt)
yield ' '.join(statement)
def logs_schema_to_athena_schema(log_schema, ddl_statement=True):
"""Convert streamalert log schema to athena schema
Args:
log_schema (dict): StreamAlert log schema object.
ddl_statement (bool): Indicate if the Athena table created by Athena
DDL query or terraform aws_glue_catalog_table resource
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
athena_schema = {}
for key_name, key_type in log_schema.items():
if ddl_statement:
# Backticks are needed for backward compatibility when creating Athena
# table via Athena DDL query.
key_name = '`{}`'.format(key_name)
if key_type == {}:
# For empty dicts
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[dict]
elif key_type == []:
# For empty array
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[list]
elif isinstance(key_type, dict):
# For recursion
athena_schema[key_name] = logs_schema_to_athena_schema(key_type, ddl_statement)
else:
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[key_type]
return athena_schema
def unique_values_from_query(query_result):
"""Simplify Athena query results into a set of values.
Useful for listing tables, partitions, databases, enable_metrics
Args:
query_result (dict): The result of run_athena_query
Returns:
set: Unique values from the query result
"""
return {
value
for row in query_result['ResultSet']['Rows'] for result in row['Data']
for value in list(result.values())
}
def format_schema_tf(schema):
"""Format schema for an Athena table for terraform.
Args:
schema (dict): Equivalent Athena schema used for generating create table statement
Returns:
formatted_schema (list(tuple))
"""
# Construct the main Athena Schema
formatted_schema = []
for key_name in sorted(schema.keys()):
key_type = schema[key_name]
if isinstance(key_type, str):
formatted_schema.append((key_name.lower(), key_type))
# Account for nested structs
elif isinstance(key_type, dict):
struct_schema = ','.join(
'{0}:{1}'.format(sub_key.lower(), key_type[sub_key])
for sub_key in sorted(key_type.keys())
)
formatted_schema.append((key_name.lower(), 'struct<{}>'.format(struct_schema)))
return formatted_schema
def generate_alerts_table_schema():
"""Generate the schema for alerts table in terraform by using a fake alert
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
alert = Alert('temp_rule_name', {}, {})
output = alert.output_dict()
schema = record_to_schema(output)
athena_schema = logs_schema_to_athena_schema(schema, False)
return format_schema_tf(athena_schema)
def generate_data_table_schema(config, table, schema_override=None):
"""Generate the schema for data table in terraform
Args:
config (CLIConfig): Loaded StreamAlert config
table (string): The name of data table
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
# Convert special characters in schema name to underscores
sanitized_table_name = FirehoseClient.sanitized_value(table)
# Check that the log type is enabled via Firehose
if sanitized_table_name not in enabled_logs:
LOGGER.error('Table name %s missing from configuration or '
'is not enabled.', sanitized_table_name)
return None
log_info = config['logs'][enabled_logs.get(sanitized_table_name)]
schema = dict(log_info['schema'])
sanitized_schema = FirehoseClient.sanitize_keys(schema)
athena_schema = logs_schema_to_athena_schema(sanitized_schema, False)
# Add envelope keys to Athena Schema
configuration_options = log_info.get('configuration')
if configuration_options:
e | nvelope_keys = configuration_options.get('envelope_keys')
| if envelope_keys:
sanitized_envelope_key_schema = FirehoseClient.sanitize_keys(envelope_keys)
# Note: this key is wrapped in backticks to be Hive compliant
athena_schema['streamalert:envelope_keys'] = logs_schema_to_athena_schema(
sanitized_envelope_key_schema, False
)
# Handle Schema overrides
# This is useful when an Athena schema needs to differ from the normal log schema
if schema_override:
for override in schema_override:
column_name, column_type = override.split('=')
# Columns are escaped to avoid Hive issues with special characters
column_name = '{}'.format(column_name)
if column_name in athena_schema:
athena_schema[column_name] = column_type
LOGGER.info('Applied schema override: %s:%s', column_name, column_type)
else:
LOGGER.error(
'Schema override column %s not found in Athena Schema, skipping',
column_name
)
return format_schema_tf(athena_schema)
def generate_artifacts_table_schema():
"""Generate the schema for artifacts table in terraform by using a test artifact instance
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
artifact = artifact = Artifact(
normalized_type='test_normalized_type',
value='test_value',
source_type='test_source_type',
record_id='test_record_id',
function=None
)
schema = record_to_schema(artifact.artifact)
athena_schema = logs_schema_to_athena_schema(schema, Fal |
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'routeDomain': 'route_domain',
'answerDefaultZones': 'answer_default_zones',
'forwardZones': 'forward_zones',
}
api_attributes = [
'routeDomain',
'answerDefaultZones',
'forwardZones',
]
returnables = [
'route_domain',
'answer_default_zones',
'forward_zones',
]
updatables = [
'route_domain',
'answer_default_zones',
'forward_zones',
]
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
return fq_name(self.partition, self._values['route_domain'])
@property
def answer_default_zones(self):
return flatten_boolean(self._values['answer_default_zones'])
class ApiParameters(Parameters):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
result = []
for x in self._values['forward_zones']:
tmp = dict(
name=x['name'],
nameservers=[]
)
if 'nameservers' in x:
tmp['nameservers'] = [y['name'] for y in x['nameservers']]
tmp['nameservers'].sort()
result.append(tmp)
return result
class ModuleParameters(Parameters):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
elif self._values['forward_zones'] in ['', 'none']:
return ''
result = []
for x in self._values['forward_zones']:
if 'name' not in x:
raise F5ModuleError(
"A 'name' key must be provided when specifying a list of forward zones."
)
tmp = dict(
name=x['name'],
nameservers=[]
)
if 'nameservers' in x:
for ns in x['nameservers']:
if 'address' not in ns:
raise F5ModuleError(
"An 'address' key must be provided when specifying a list of forward zone nameservers."
)
item = '{0}:{1}'.format(ns['address'], ns.get('port', 53))
tmp['nameservers'].append(item)
tmp['nameservers'].sort()
result.append(tmp)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
result = []
for x in self._values['forward_zones']:
tmp = {'name': x['name']}
if 'nameservers' in x:
tmp['nameservers'] = []
for y in x['nameservers']:
tmp['nameservers'].append(dict(name=y))
result.append(tmp)
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def forward_zones(self):
if self.want.forward_zones is None:
return None
if self.have.forward_zones is None and self.want.forward_zones in ['', 'none']:
return None
if self.have.forward_zones is not None and self.want.forward_zones in ['', 'none']:
return []
if self.have.forward_zones is None:
return dict(
forward_zones=self.want.forward_zones
)
want = sorted(self.want.forward_zones, key=lambda x: x['name'])
have = sorted(self.have.forward_zones, key=lambda x: x['name'])
wnames = [x['name'] for x in want]
hnames = [x['name'] for x in have]
if set(wnames) != set(hnames):
return dict(
forward_zones=self.want.forward_zones
)
for idx, x in enumerate(want):
wns = x.get('nameservers', [])
hns = have[idx].get('nameservers', [])
if set(wns) != set(hns):
return dict(
forward_zones=self.want.forward_zones
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=change | d)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.c | ompare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/cache/resolver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have |
import numpy as np
from ..filters import gaussian
def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2,
volume_fraction=0.5, seed=None):
"""
Generate synthetic binary image with several rounded blob-like objects.
Parameters
----------
length : int, optional
Linear size of output image.
blob_size_fraction : float, optional
Typical linear size of blob, as a fraction of ``length``, should be
smaller than 1.
n_dim : int, optional
Number of dimensions of output image.
volume_fraction : float, default 0.5
Fraction of image pixels covered by the blobs (where the output is 1).
Should be in [0, 1].
seed : int, optional
Seed to initialize the random number generator.
Returns
-------
blobs : ndarray of bools
| Output binary image
Examples
--------
>>> from skimage import data
>>> data.binary_blobs(length=5, blob_size_fraction=0.2, seed=1)
array([[ True, False, True, True, True],
[ True, T | rue, True, False, True],
[False, True, False, True, True],
[ True, False, False, True, True],
[ True, False, False, False, True]], dtype=bool)
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.1)
>>> # Finer structures
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.05)
>>> # Blobs cover a smaller volume fraction of the image
>>> blobs = data.binary_blobs(length=256, volume_fraction=0.3)
"""
rs = np.random.RandomState(seed)
shape = tuple([length] * n_dim)
mask = np.zeros(shape)
n_pts = max(int(1. / blob_size_fraction) ** n_dim, 1)
points = (length * rs.rand(n_dim, n_pts)).astype(np.int)
mask[[indices for indices in points]] = 1
mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction)
threshold = np.percentile(mask, 100 * (1 - volume_fraction))
return np.logical_not(mask < threshold)
|
reate = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# Default: False
# c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# Default: ''
# c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# Default: ''
# c.BaseIPythonApplication.ipython_dir = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.BaseIPythonApplication.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.BaseIPythonApplication.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.BaseIPythonApplication.log_level = 30
## Whether to overwrite existing config files when copying
# Default: False
# c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
# Default: 'default'
# c.BaseIPythonApplication.profile = 'default'
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.BaseIPythonApplication.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.BaseIPythonApplication.show_config_json = False
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# Default: False
# c.BaseIPythonApplication.verbose_crash = False
# ------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp) configuration
# ------------------------------------------------------------------------------
## Execute the given command string.
# See also: InteractiveShellApp.code_to_run
# c.TerminalIPythonApp.code_to_run = ''
## Whether to install the default config files into the profile dir.
# See also: BaseIPythonApplication.copy_config_files
# c.TerminalIPythonApp.copy_config_files = False
## Whether to display a banner upon starting IPython.
# Default: True
# c.TerminalIPythonApp.display_banner = True
## Run the file referenced by the PYTHONSTARTUP environment
# See also: InteractiveShellApp.exec_PYTHONSTARTUP
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# See also: InteractiveShellApp.exec_files
# c.TerminalIPythonApp.exec_files = []
## lines of code to run at IPython startup.
# See also: InteractiveShellApp.exec_lines
# c.TerminalIPythonApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
# See also: InteractiveShellApp.extensions
# c.TerminalIPythonApp.extensions = []
## Path to an extra config file to load.
# See also: BaseIPythonApplication.extra_config_file
# c.TerminalIPythonApp.extra_config_file = ''
## dotted module name of an IPython extension to load.
# See also: InteractiveShellApp.extra_extension
# c.TerminalIPythonApp.extra_extension = ''
## A file to be run
# See also: InteractiveShellApp.file_to_run
# c.TerminalIPythonApp.file_to_run = ''
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# Default: False
# c.TerminalIPythonApp.force_interact = False
## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk',
# 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2',
# 'qt4').
# See also: InteractiveShellApp.gui
# c.TerminalIPythonApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.)
# See also: InteractiveShellApp.hide_initial_ns
# c.TerminalIPythonApp.hide_initial_ns = True
## If True, IPython will not add the current working directory to sys.path.
# See also: InteractiveShellApp.ignore_cwd
# c.Term | inalIPythonApp.ignore_cwd = False
## Cla | ss to use to instantiate the TerminalInteractiveShell object. Useful for
# custom Frontends
# Default: 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
# c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
##
# See also: BaseIPythonApplication.ipython_dir
# c.TerminalIPythonApp.ipython_dir = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.TerminalIPythonApp.log_level = 30
## Configure matplotlib for interactive use with
# See also: InteractiveShellApp.matplotlib
# c.TerminalIPythonApp.matplotlib = None
## Run the module as a script.
# See also: InteractiveShellApp.module_to_run
# c.TerminalIPythonApp.module_to_run = ''
## Whether to overwrite existing config files when copying
# See also: BaseIPythonApplication.overwrite
# c.TerminalIPythonApp.overwrite = False
## The IPython profile to use.
# See also: BaseIPythonApplication.profile
# c.TerminalIPythonApp.profile = 'default'
## Pre-load matplotlib and numpy for interactive use,
# See also: InteractiveShellApp.pylab
# c.TerminalIPythonApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc.
# See also: InteractiveShellApp.pylab_import_all
# c.TerminalIPythonApp.pylab_import_all = True
## Start IPython quickly by skipping the loading of config files.
# Default: False
# c.TerminalIPythonApp.quick = False
## Reraise exceptions encountered loading IPython extensions?
# See also: InteractiveShellApp.reraise_ipython_extension_failures
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.TerminalIPythonApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.TerminalIPythonApp.show_config_json = False
## Create a massive crash report when IPython encounters what may be an
# See also: BaseIPythonApplication.verbose_crash
# c.TerminalIPythonApp.verbose_crash = False
# ------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which
# nodes should be run interactively (displaying output from expressions).
# Choices: any of ['all', 'last', 'last_expr', 'none', 'last_expr_or_assign']
# Default: 'last_expr'
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# Default: []
# c.InteractiveShell.ast_transformers = []
## Automatically run await statement in the top level repl.
# Default: True
# c.InteractiveShell.autoawait = True
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are pres |
"""SCons.Tool.zip
Tool-specific initialization for zip.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE | FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/zip.py 2014/01/04 01:12:18 root"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
try:
import zipfile
internal_zip = 1
except ImportError:
internal_zip = 0
if internal_zip:
zipcompression = zipf | ile.ZIP_DEFLATED
def zip(target, source, env):
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path)
else:
zf.write(str(s))
zf.close()
else:
zipcompression = 0
zip = "$ZIP $ZIPFLAGS ${TARGET.abspath} $SOURCES"
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for zip to an Environment."""
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
def exists(env):
return internal_zip or env.Detect('zip')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) XXN, 2017
#
from __future__ import absolute_import, unicode_literals
import os, re, sys, time
import pywikibot
def main():
site = pywikibot.Site('wikidata', 'wikidata')
repo = site.data_repository()
mylist = \
[
u"Q3001778",
u"Q37115",
u"Q55246",
u"Q55563",
u"Q55611",
]
targetlangs = ['bg', 'bs', 'ca', 'cs', 'da', 'de', 'en', 'en-ca', 'en-gb', 'el', 'es', 'fi', 'fr', 'hr', 'hu', 'it', 'lv', 'mk', 'nb', 'nl', 'nn', 'pl', 'pt', | 'pt-br', 'ro', 'ru', 'sco', 'sh', 'sk', 'sl', 'sr', 'sv', 'tr', 'uk', ]
for q in mylist[0:]:
item = pywikibot.ItemPage(repo, q)
item.get()
if 'P577' in item.claims and item.claims['P31'][0].getTarget().title() == 'Q7889':
try:
myclaim = item.get()['claims']['P577'][0].getTarget()
dic = myclaim.toWikibase()
Text = myclaim.toTimestr()
if dic['p | recision'] >= 9:
year = str(int(Text[8:12]))
for targetlang in targetlangs:
translations = {
'bg': 'видеоигра от ~YEAR~ година',
'bs': 'videoigra',
'ca': 'videojoc de ~YEAR~',
'cs': 'videohra z roku ~YEAR~',
'da': 'computerspil fra ~YEAR~',
'de': 'Videospiel',
'en': '~YEAR~ video game',
'en-ca': '~YEAR~ video game',
'en-gb': '~YEAR~ video game',
'el': 'βιντεοπαιχνίδι του ~YEAR~',
'es': 'videojuego de ~YEAR~',
'fi': '~YEAR~ videopeli',
'fr': 'jeu vidéo de ~YEAR~',
'hr': 'videoigra',
'hu': 'videojáték',
'it': 'videogioco del ~YEAR~',
'lv': 'videospēle',
'mk': 'видеоигра од ~YEAR~ година',
'nb': 'videospill fra ~YEAR~',
'nl': 'computerspel uit ~YEAR~',
'nn': 'dataspel frå ~YEAR~',
'pl': 'gra wideo z ~YEAR~ roku',
'pt': 'vídeojogo de ~YEAR~',
'pt-br': 'jogo eletrônico de ~YEAR~',
'ro': 'joc video din ~YEAR~',
'ru': 'видеоигра ~YEAR~ года',
'sco': 'video gemme',
'sh': 'videoigra',
'sk': 'počítačová hra z ~YEAR~',
'sl': 'videoigra iz leta ~YEAR~',
'sr': 'видео-игра',
'sv': 'datorspel från ~YEAR~',
'tr': '~YEAR~ video oyunu',
'uk': 'відеогра ~YEAR~ року',
}
descriptions = item.descriptions
addedlangs = []
for lang in translations.keys():
if not lang in descriptions.keys():
translation = translations[lang]
translation = translation.replace('~YEAR~', str(year))
descriptions[lang] = translation
addedlangs.append(lang)
data = { 'descriptions': descriptions }
addedlangs.sort()
if addedlangs:
summary = 'Bot: Adding descriptions (%s languages): %s' % (len(addedlangs), ', '.join(addedlangs))
try:
item.editEntity(data, summary=summary)
pywikibot.output(u'{} - \03{{lightgreen}}{}\03{{default}}'.format(q,translations['en'].replace('~YEAR~', str(year))))
except:
pywikibot.output('Error while saving {}'.format(q))
continue
except:
continue
else:#no P577
for targetlang in targetlangs:
translations = {
'bg': 'видеоигра',
'bs': 'videoigra',
'ca': 'videojoc',
'cs': 'videohra',
'da': 'computerspil',
'de': 'Videospiel',
'en': 'video game',
'en-ca': 'video game',
'en-gb': 'video game',
'el': 'βιντεοπαιχνίδι',
'es': 'videojuego',
'fi': 'videopeli',
'fr': 'jeu vidéo',
'hr': 'videoigra',
'hu': 'videojáték',
'it': 'videogioco',
'lv': 'videospēle',
'mk': 'видеоигра',
'nb': 'videospill',
'nn': 'dataspel',
'nl': 'computerspel',
'pl': 'gra wideo',
'pt': 'vídeojogo',
'pt-br': 'jogo eletrônico',
'ro': 'joc video',
'ru': 'видеоигра',
'sco': 'video gemme',
'sh': 'videoigra',
'sk': 'počítačová hra',
'sl': 'videoigra',
'sr': 'видео-игра',
'sv': 'datorspel',
'tr': 'video oyunu',
'uk': 'відеогра',
}
descriptions = item.descriptions
addedlangs = []
for lang in translations.keys():
if not lang in descriptions.keys():
translation = translations[lang]
descriptions[lang] = translation
addedlangs.append(lang)
data = { 'descriptions': descriptions }
addedlangs.sort()
if addedlangs:
summary = 'Bot: Adding descriptions (%s languages): %s' % (len(addedlangs), ', '.join(addedlangs))
print(summary)
try:
item.editEntity(data, summary=summary)
pywikibot.output(u'{} - \03{{lightgreen}}{}\03{{default}}'.format(q,translations['en']))
except:
pywikibot.output('Error while saving {}'.format(q))
continue
if __name__ == "__main__":
main()
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = [
'Neema Kotonya (neemak@google.com)',
'Gun Pinyo (gunpinyo@google.com)'
]
import os
from xml.etree import cElementTree
import appengine_config
from common import schema_fields
from common import tags
from controllers import sites
from models import custom_modules
from models import services
from modules.math import messages
MATH_MODULE_URI = '/modules/math'
RESOURCES_URI = MATH_MODULE_URI + '/resources'
MATHJAX_URI = MATH_MODULE_URI + '/MathJax'
class MathTag(tags.ContextAwareTag):
"""Custom tag for mathematical notation using MathJax."""
binding_name = 'gcb-math'
@classmethod
def name(cls):
return 'Mathematical Formula'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, context):
math_script = cElementTree.XML('<script/>')
# The formula is "text" type in the schema and so is presented in the
# tag's body.
math_script.text = node.text
input_type = node.attrib.get('input_type')
if input_type == 'MML':
math_script.set('type', 'math/mml')
else:
math_script.set('type', 'math/tex')
return math_script
def rollup_header_footer(self, context):
"""Include MathJax library only when a math tag is present."""
header = tags.html_string_to_element_tree("""
<script src="%s/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</ | script>""" % MATHJAX_URI)
footer = tags.html_string_to_element_tree('')
return (h | eader, footer)
def get_icon_url(self):
return RESOURCES_URI + '/math.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(MathTag.name())
reg.add_property(
schema_fields.SchemaField(
'input_type', 'Type', 'string', i18n=False,
optional=True,
select_data=[('TeX', 'TeX'), ('MML', 'MathML')],
extra_schema_dict_values={'value': 'TeX'},
description=services.help_urls.make_learn_more_message(
messages.RTE_MATH_TYPE, 'math:math:input_type')))
reg.add_property(
schema_fields.SchemaField(
'formula', 'Mathematical Formula', 'text',
optional=True,
description=messages.RTE_MATH_MATHEMATICAL_FORMULA))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(MathTag.binding_name)
def on_module_enable():
tags.Registry.add_tag_binding(MathTag.binding_name, MathTag)
global_routes = [
(RESOURCES_URI + '/.*', tags.ResourcesHandler),
(MATHJAX_URI + '/(fonts/.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-fonts-2.3.0.zip'))),
(MATHJAX_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-2.3.0.zip')))]
namespaced_routes = []
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Mathematical Formula Display',
'Provides a custom tag to embed mathematical formulas using TeX or MML.'
, global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable)
return custom_module
|
"""
ICS Ops Common Library
"""
import os
from os.path import dirname
from os.path import realpath
from os.path import join as pathjoin
import boto
__version__ = "0.0.3.3"
__release__ = "alpha"
CONFIG = "opslib.ini"
LOG_NAME = "opslib"
AWS_ACCESS_KEY_NAME = "aws_access_key_id"
AWS_SECRET_KEY_NAME = "aws_secret_access_key"
def init_config(filepath=None, enable_boto=True, enable_botocore=False):
# Default credential file will be located at current folder
if filepath is None or not os.path.exists(filepath):
| pwdpath = dirname(realpath(__file__))
filepath = pathjoin(pwdpath, CONFIG)
if enable_boto:
# Initialize credentials for boto
from boto.pyami.config import Config
boto.config = Config(filepath)
access_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None)
secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None)
# FIXME: a tr | ick when the value is empty
if not access_key or not secret_key:
boto.config.remove_section('Credentials')
if enable_botocore:
# Initialize credentials for botocore
import botocore.credentials
if access_key and secret_key:
def get_credentials(session, metadata=None):
return botocore.credentials.Credentials(access_key, secret_key)
botocore.credentials.get_credentials = get_credentials
if access_key and secret_key:
return access_key, secret_key
def init_logging(name=LOG_NAME, logfile=None,
console=False, loglevel="INFO",
enable_boto_log=False):
global logger
from opslib.icslog import IcsLog
logger = IcsLog(name, level=loglevel, console=console, logfile=logfile)
if enable_boto_log:
boto.log = logger
return logger
init_config()
init_logging()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
logger.error(l)
logger.error(ptraj.dtype)
logger.error(l.dtype)
d = metric.one_to_all(ptraj, pmedoids, l)
argmin = np.argmin(d)
new_assignments[l] = new_medoids[argmin]
new_distances[l] = d[argmin]
new_cost = np.sum(new_distances)
if new_cost < current_cost:
logger.info('Accept')
medoids = new_medoids
assignments = new_assignments
distance_to_current = new_distances
current_cost = new_cost
j = 0
else:
j += 1
logger.info('Reject')
if current_cost < min_cost:
min_cost = current_cost
optimal_medoids = medoids.copy()
optimal_assignments = assignments.copy()
optimal_distances = distance_to_current.copy()
return optimal_medoids, optimal_assignments, optimal_distances
def _clarans_helper(args):
return _clarans(*args)
def _hybrid_kmedoids(metric, ptraj, k=None, distance_cutoff=None, num_iters=10, local_swap=True, norm_exponent=2.0, too_close_cutoff=0.0001, ignore_max_objective=False, initial_medoids='kcenters', initial_assignments=None, initial_distance=None):
"""Run the hybrid kmedoids clustering algorithm to cluster a trajectory
References
----------
.. [1] Beauchamp, K. MSMBuilder2
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
ptraj : prepared trajectory
ptraj return by the action of the preceding metric on a msmbuilder trajectory
k : int
number of desired clusters
num_iters : int
number of swaps to attempt per medoid
local_swap : boolean, optional
If true, proposed swaps will be between a medoid and a data point
currently assigned to that medoid. If false, the data point for the
proposed swap is selected randomly.
norm_exponent : float, optional
exponent to use in pnorm of the distance to generate objective function
too_close_cutoff : float, optional
Summarily reject proposed swaps if the distance of the medoid to the trial
medoid is less than thus value
ignore_max_objective : boolean, optional
Ignore changes to the distance of the worst classified point, and only
reject or accept swaps based on changes to the p norm of all the data
points.
initial_medoids : {'kcenters', ndarray}
If 'kcenters', run kcenters clustering first to get the initial medoids,
and then run the swaps to improve it. If 'random', select the medoids at
random. Otherwise, initial_medoids should be a numpy array of the
indices of the medoids.
initial_assignments : {None, ndarray}, optional
If None, initial_assignments will be computed based on the
initial_medoids. If you pass in your own initial_medoids, you can also
pass in initial_assignments to avoid recomputing them.
initial_distances : {None, ndarray}, optional
If None, initial_distances will be computed based on the initial_medoids.
If you pass in your own initial_medoids, you can also pass in
initial_distances to avoid recomputing them.
"""
if k is None and distance_cutoff is None:
raise ValueError("I need some cutoff criterion! both k and distance_cutoff can't both be none")
if k is None and distance_cutoff <= 0:
raise ValueError("With k=None you need to supply a legit distance_cutoff")
if distance_cutoff is None:
# set it below anything that can ever be reached
distance_cutoff = -1
num_frames = len(ptraj)
if initial_medoids == 'kcenters':
initial_medoids, initial_assignments, initial_distance = _kcenters(metric, ptraj, k, distance_cutoff)
elif initial_medoids == 'random':
if k is None:
raise ValueError('You need to supply the number of clusters, k, you want')
initial_medoids = np.random.permutation(np.arange(num_frames))[0:k]
initial_assignments, initial_distance = _assign(metric, ptraj, initial_medoids)
else:
if not isinstance(initial_medoids, np.ndarray):
raise ValueError('Initial medoids should be a numpy array')
if initial_assignments is None or initial_distance is None:
initial_assignments, initial_distance = _assign(metric, ptraj, initial_medoids)
| assignments = initial_assignments
distance_to_current = initial_distance
medoids = initial_medoids
pgens = ptraj[medoids]
k = len(initial_medoids)
obj_func = p_norm(distance_to_current, p=norm_exponent)
max_norm = p_norm(distance_to_current, p='max')
if not np.all(np.unique(medo | ids) == np.sort(medoids)):
raise ValueError('Initial medoids must be distinct')
if not np.all(np.unique(assignments) == sorted(medoids)):
raise ValueError('Initial assignments dont match initial medoids')
for iteration in xrange(num_iters):
for medoid_i in xrange(k):
if not np.all(np.unique(assignments) == sorted(medoids)):
raise ValueError('Loop invariant lost')
if local_swap is False:
trial_medoid = np.random.randint(num_frames)
else:
trial_medoid = random.choice(np.where(assignments == medoids[medoid_i])[0])
old_medoid = medoids[medoid_i]
if old_medoid == trial_medoid:
continue
new_medoids = medoids.copy()
new_medoids[medoid_i] = trial_medoid
pmedoids = ptraj[new_medoids]
new_distances = distance_to_current.copy()
new_assignments = assignments.copy()
logger.info('Sweep %d, swapping medoid %d (conf %d) for conf %d...', iteration, medoid_i, old_medoid, trial_medoid)
distance_to_trial = metric.one_to_all(ptraj, ptraj, trial_medoid)
if not np.all(np.isfinite(distance_to_trial)):
raise ValueError('distance metric returned nonfinite distances')
if distance_to_trial[old_medoid] < too_close_cutoff:
logger.info('Too close')
continue
assigned_to_trial = np.where(distance_to_trial < distance_to_current)[0]
new_assignments[assigned_to_trial] = trial_medoid
new_distances[assigned_to_trial] = distance_to_trial[assigned_to_trial]
ambiguous = np.where((new_assignments == old_medoid) & \
(distance_to_trial >= distance_to_current))[0]
for l in ambiguous:
d = metric.one_to_all(ptraj, pmedoids, l)
if not np.all(np.isfinite(d)):
raise ValueError('distance metric returned nonfinite distances')
argmin = np.argmin(d)
new_assignments[l] = new_medoids[argmin]
new_distances[l] = d[argmin]
new_obj_func = p_norm(new_distances, p=norm_exponent)
new_max_norm = p_norm(new_distances, p='max')
if new_obj_func < obj_func and (new_max_norm <= max_norm or ignore_max_objective is True):
logger.info("Accept. New f = %f, Old f = %f", new_obj_func, obj_func)
medoids = new_medoids
assignments = new_assignments
distance_to_current = new_distances
obj_func = new_obj_func
max_norm = new_max_norm
else:
logger.info("Reject. New f = %f, Old f = %f", new_obj_func, obj_func)
return medoids, assignments, distance_to_current
#####################################################################
# #
# End Clustering Functions #
# Begin Clustering Classes #
# #
########################################################### |
# Copyright 2014 | Cloudbase Solutions Srl
#
# | Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
WMI_JOB_STATUS_STARTED = 4096
WMI_JOB_STATE_RUNNING = 4
WMI_JOB_STATE_COMPLETED = 7
VHD_TYPE_FIXED = 2
VHD_TYPE_DYNAMIC = 3
|
"""timezone | at metavj level
Revision ID: 224621d9edde
Revises: 14346346596e
Create Date: 2015-12-21 16:52:30.275508
"""
# revision identifiers, used by Alembic.
revision = '224621d9edde'
down_revision = '5a590ae95255'
from alembic import op
import sqlalchemy as sa
import geoalchemy2 as ga
def upgrade():
op.create_table('timezone',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Co | lumn('name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint('id'),
schema='navitia'
)
op.create_table('tz_dst',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('tz_id', sa.BIGINT(), nullable=False),
sa.Column('beginning_date', sa.DATE(), nullable=False),
sa.Column('end_date', sa.DATE(), nullable=False),
sa.Column('utc_offset', sa.INTEGER(), nullable=False),
sa.ForeignKeyConstraint(['tz_id'], [u'navitia.timezone.id'], name=u'associated_tz_dst_fkey'),
sa.PrimaryKeyConstraint('id'),
schema='navitia'
)
op.add_column(u'meta_vj', sa.Column('timezone', sa.BIGINT(), nullable=True), schema=u'navitia')
op.drop_column(u'vehicle_journey', 'utc_to_local_offset', schema=u'navitia')
def downgrade():
op.drop_column(u'meta_vj', 'timezone', schema=u'navitia')
op.drop_table('tz_dst', schema='navitia')
op.drop_table('timezone', schema='navitia')
op.add_column(u'vehicle_journey', sa.Column('utc_to_local_offset', sa.BIGINT(), nullable=True), schema=u'navitia')
|
__author__ = 'mk'
import matplotlib.pyplot as plt
import sys
import math
import numpy as np
da | taDir = sys.argv[1]
resDir = sys.argv[2]
plt.figure(figsize=(8,4))
algLabel=['naive','cou','zigzag','pingpong','MK','LL']
for i in range(0,6,1):
filePath = dataDir + str(i) + '_overhead.dat'
file = open(filePath)
x = []
y = []
for eachLine in file.readlines():
xStr,yStr = eachLine.split()
x.append(int(xStr))
y.append(math.log(float(yStr)) | )
file.close()
plt.plot(x,y,label=algLabel[i],linewidth=1)
plt.xlabel("Data Size")
plt.ylabel("Overhead")
plt.title("Overhead Per Checkpoint")
plt.legend()
plt.savefig(resDir + "OverheadPerCheckpoint.pdf")
|
fro | m django.utils.translation import ugettext_lazy as _
# Legend Position
def get_legend_class(position):
return 'legend-' + str(position)
class LEGEND_POSITIONS:
BOTTOM = _('bottom')
TOP = _('top')
LEFT = _('left')
RIGHT = _('right')
get_choices = ((get_legend_class(BOTTOM), BOTTOM),
(get_legend_class(TOP), TOP),
(ge | t_legend_class(LEFT), LEFT),
(get_legend_class(RIGHT), RIGHT),)
def get_chart_position_class(position):
return 'chart-' + str(position)
class CHART_POSITIONS:
CENTER = _('center')
LEFT = _('left')
RIGHT = _('right')
get_choices = ((get_chart_position_class(CENTER), CENTER),
(get_chart_position_class(LEFT), LEFT),
(get_chart_position_class(RIGHT), RIGHT),) |
import asyncio
from collections import OrderedDict
from functools import partial
import logging
from quamash import QtGui, QtCore
from pyqtgraph import dockarea
from pyqtgraph import LayoutWidget
from artiq.protocols.sync_struct import Subscriber
from artiq.tools import short_format
from artiq.gui.tools import DictSyncModel
from artiq.gui.displays import *
try:
QSortFilterProxyModel = QtCore.QSortFilterProxyModel
except AttributeError:
QSortFilterProxyModel = QtGui.QSortFilterProxyModel
logger = logging.getLogger(__name__)
class DatasetsModel(DictSyncModel):
def __init__(self, parent, init):
DictSyncModel.__init__(self, ["Dataset", "Persistent", "Value"],
parent, init)
def sort_key(self, k, v):
return k
def convert(self, k, v, column):
if column == 0:
return k
elif column == 1:
return "Y" if v[0] else "N"
elif column == 2:
return short_format(v[1])
else:
raise ValueError
def _get_display_type_name(display_cls):
for name, (_, cls) in display_types.items():
if cls is display_cls:
return name
class DatasetsDock(dockarea.Dock):
def __init__(self, dialog_parent, dock_area):
dockarea.Dock.__init__(self, "Datasets", size=(1500, 500))
self.dialog_parent = dialog_parent
self.dock_area = dock_area
grid = LayoutWidget()
self.addWidget(grid)
self.search = QtGui.QLineEdit()
self.search.setPlaceholderText("search...")
self.search.editingFinished.connect(self._search_datasets)
grid.addWidget(self.search, 0, )
self.table = QtGui.QTableView()
self.table.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.table.horizontalHeader().setResizeMode(
QtGui.QHeaderView.ResizeToContents)
grid.addWidget(self.table, 1, 0)
add_display_box = QtGui.QGroupBox("Add display")
grid.addWidget(add_display_box, 1, 1)
display_grid = QtGui.QGridLayout()
add_display_box.setLayout(display_grid)
for n, name in enumerate(display_types.keys()):
btn = QtGui.QPushButton(name)
display_grid.addWidget(btn, n, 0)
btn.clicked.connect(partial(self.create_dialog, name))
self.displays = dict()
def _search_datasets(self):
self.table_model_filter.setFilterFixedString(self.search.displayText())
def get_dataset(self, key):
return self.table_model.backing_store[key][1]
async def sub_connect(self, host, port):
self.subscriber = Subscriber("datasets", self.init_datasets_model,
self.on_mod)
await self.subscriber.connect(host, port)
async def sub_close(self):
await self.subscriber.close()
def init_datasets_model(self, init):
self.table_model = DatasetsModel(self.table, init)
self.table_model_filter = QSortFilterProxyModel()
self.table_model_filter.setSourceModel(self.table_model)
self.table.setModel(self.table_model_filter)
return self.table_model
def update_display_data(self, dsp):
filtered_data = {k: self.table_model.backing_store[k][1]
for k in dsp.data_sources()
if k in self.table_model.backing_store}
dsp.update_data(filtered_data)
def on_mod(self, mod):
if mod["action"] == "init":
for display in self.displays.val | ues():
display.update_data(self.table_model.backing_store)
return
if mod["path"]:
source = mod["path"][0]
elif mod["action"] == "setitem":
source = mod["key"]
else:
return
for dis | play in self.displays.values():
if source in display.data_sources():
self.update_display_data(display)
def create_dialog(self, ty):
dlg_class = display_types[ty][0]
dlg = dlg_class(self.dialog_parent, None, dict(),
sorted(self.table_model.backing_store.keys()),
partial(self.create_display, ty, None))
dlg.open()
def create_display(self, ty, prev_name, name, settings):
if prev_name is not None and prev_name in self.displays:
raise NotImplementedError
dsp_class = display_types[ty][1]
dsp = dsp_class(name, settings)
self.displays[name] = dsp
self.update_display_data(dsp)
def on_close():
del self.displays[name]
dsp.sigClosed.connect(on_close)
self.dock_area.addDock(dsp)
self.dock_area.floatDock(dsp)
return dsp
def save_state(self):
r = dict()
for name, display in self.displays.items():
r[name] = {
"ty": _get_display_type_name(type(display)),
"settings": display.settings,
"state": display.save_state()
}
return r
def restore_state(self, state):
for name, desc in state.items():
try:
dsp = self.create_display(desc["ty"], None, name,
desc["settings"])
except:
logger.warning("Failed to create display '%s'", name,
exc_info=True)
try:
dsp.restore_state(desc["state"])
except:
logger.warning("Failed to restore display state of '%s'",
name, exc_info=True)
|
#!/usr/bin/env python
# Load common imports and system envs to build the core object
import sys, os
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "Extract and Upload IRIS Models to S3"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-u', '--url', help='URL to Download', dest='url')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key', help='S3 Key (Optional)', dest='s_key')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
data_dir = str(os.getenv("ENV_DATA_DST_DIR", "/opt/work/data/dst"))
if not os.path.exists(data_dir):
os.mkdir(data_dir, 0777)
ds_name = "iris_classifier"
cur_date_str = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
s3_bucket = "unique-bucket-name-for-datasets"
s3_key = "dataset_" + core.to_upper(ds_name) + ".cache.pickle.zlib"
s3_loc = ""
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
#
# End Arg Processing
#
#####################################################################
s3_loc = str(s3_bucket) + ":" + str(s3_key)
lg("-------------------------------------------------", 6)
lg("Extracting and Uploading Models from CACHE to S3Loc(" + str(s3_loc) + ")", 6)
lg("", 6)
cache_req = {
"RAName" : "CACHE", # Redis instance name holding the models
| "DSName" : str(ds_name), # | Dataset name for pulling out of the cache
"S3Loc" : str(s3_loc), # S3 location to store the model file
"DeleteAfter" : False, # Optional delete after upload
"SaveDir" : data_dir, # Optional dir to save the model file - default is ENV_DATA_DST_DIR
"TrackingID" : "" # Future support for using the tracking id
}
upload_results = core.ml_upload_cached_dataset_to_s3(cache_req, core.get_rds(), core.get_dbs(), debug)
if upload_results["Status"] == "SUCCESS":
lg("Done Uploading Model and Analysis DSName(" + str(ds_name) + ") S3Loc(" + str(cache_req["S3Loc"]) + ")", 6)
else:
lg("", 6)
lg("ERROR: Failed Upload Model and Analysis Caches as file for DSName(" + str(ds_name) + ")", 6)
lg(upload_results["Error"], 6)
lg("", 6)
sys.exit(1)
# end of if extract + upload worked
lg("", 6)
lg("Extract and Upload Completed", 5)
lg("", 6)
sys.exit(0)
|
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.NetworkMethods import NetworkMethod, NetworkMethods
from PLC.Auth import Auth
class AddNetworkMethod(Method):
"""
Adds a new network method.
Ret | urns 1 if successful, faults otherwise.
"""
roles = ['admin']
accepts = [
Auth(),
NetworkMethod.fields['method']
]
returns = Parameter(int, '1 if successful')
def call(self, auth, | name):
network_method = NetworkMethod(self.api)
network_method['method'] = name
network_method.sync(insert = True)
return 1
|
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import os
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.colorchooser as colorchooser
Spinbox = ttk.Spinbox if hasattr(ttk, "Spinbox") else tk.Spinbox
if __name__ == "__main__": # For stand-alone testing with parallel TkUtil
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..")))
import TkUtil
import TkUtil.Dock
from Globals import *
class Dock(TkUtil.Dock.Window):
def create_variables(self):
self.title = "Display"
self.__wordWrap = tk.StringVar()
self.__wordWrap.set("Word")
self.__wordWrap.trace("w", self.__set_word_wrap)
self.__blockCursor = tk.IntVar()
self.__blockCursor.set(False)
self.__blockCursor.trace("w", self.__set_block_cursor)
self.__lineSpacing = tk.StringVar()
self.__lineSpacing.set(0)
self.__lineSpacing.trace("w", self.__set_line_spacing)
de | f create_widgets(self):
self.wordWrapLabel = ttk.Label(self, text="Wrap:")
self.wordWrapCombobox = ttk.Combobox(self, state="readonly",
values=["None", "Character", "Word"],
textvariable=self.__wordWrap, width=10)
self.blockCursorCheckbutton = ttk.Checkbutton(self,
text="Block Cursor", variable=self.__blockCursor)
self | .lineSpacingLabel = ttk.Label(self, text="Line Spacing:")
self.lineSpacingSpinbox = tk.Spinbox(self, from_=0, to=32,
width=3, validate="all", justify=tk.RIGHT,
textvariable=self.__lineSpacing)
self.lineSpacingSpinbox.config(validatecommand=(
self.lineSpacingSpinbox.register(self.__validate_int),
"lineSpacingSpinbox", "%P"))
def create_layout(self):
pad = dict(padx=PAD, pady=PAD)
padW = dict(sticky=tk.W, **pad)
padWE = dict(sticky=(tk.W, tk.E), **pad)
self.wordWrapLabel.grid(row=1, column=0, **padW)
self.wordWrapCombobox.grid(row=1, column=1, columnspan=2, **padWE)
self.blockCursorCheckbutton.grid(row=2, column=0, columnspan=3,
**padWE)
self.lineSpacingLabel.grid(row=3, column=0, columnspan=2, **padW)
self.lineSpacingSpinbox.grid(row=3, column=2, stick=tk.E, **pad)
def __set_word_wrap(self, *args):
self.event_generate("<<WordWrapChanged>>")
def __set_block_cursor(self, *args):
self.event_generate("<<BlockCursorChanged>>")
def __set_line_spacing(self, *args):
self.event_generate("<<LineSpacingChanged>>")
def __validate_int(self, spinbox, number):
spinbox = getattr(self, spinbox)
return TkUtil.validate_spinbox_int(spinbox, number)
@property
def word_wrap(self):
wrap = self.__wordWrap.get().lower()
if wrap == "character":
wrap = "char"
return wrap
@word_wrap.setter
def word_wrap(self, value):
if value.lower() == "char":
value = "character"
self.__wordWrap.set(value.title())
@property
def block_cursor(self):
return bool(self.__blockCursor.get())
@block_cursor.setter
def block_cursor(self, value):
self.__blockCursor.set(value)
@property
def line_spacing(self):
return int(self.__lineSpacing.get())
@line_spacing.setter
def line_spacing(self, value):
self.__lineSpacing.set(value)
if __name__ == "__main__":
if sys.stdout.isatty():
application = tk.Tk()
application.title("Display")
dock = Dock(application, None)
dock.pack(fill=tk.BOTH, expand=True)
dock.bind("<Escape>", lambda *args: application.quit())
application.bind("<Escape>", lambda *args: application.quit())
application.mainloop()
else:
print("Loaded OK")
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
"""Test migration operations supported by admin user"""
@classmethod
def setup_clients(cls):
super(MigrationsAdminTest, cls).setup_clients()
cls.client = cls.os_admin.migrations_client
@decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
def test_list_migrations(self):
"""Test admin user can get the migrations list"""
self.client.list_migrations()
@decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_list_migrations_in_flavor_resize_situation(self):
"""Admin can get the migrations list containing the resized server"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
self.resize_server(server_id, self.flavor_ref_alt)
body = self.client.list_migrations()['migrations']
instance_uuids = [x['instance_uuid'] for x in body]
self.assertIn(server_id, instance_uuids)
def _flavor_clean_up(self, flavor_id):
try:
self.admin_flavors_client.delete_fla | vor(flavor_id)
self.admin_flavors_client.wait_for_resource_deletion(flavor_id)
except exceptions.NotFound:
pass
@decorators.idempotent_id('33f1fec3-ba18-4470-8e4e-1d888e7c3593')
@testtools.skipU | nless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert_deleted_flavor(self):
"""Test reverting resized server with original flavor deleted
Tests that we can revert the resize on an instance whose original
flavor has been deleted.
"""
# First we have to create a flavor that we can delete so make a copy
# of the normal flavor from which we'd create a server.
flavor = self.admin_flavors_client.show_flavor(
self.flavor_ref)['flavor']
flavor = self.admin_flavors_client.create_flavor(
name=data_utils.rand_name('test_resize_flavor_'),
ram=flavor['ram'],
disk=flavor['disk'],
vcpus=flavor['vcpus']
)['flavor']
self.addCleanup(self._flavor_clean_up, flavor['id'])
# Set extra specs same as self.flavor_ref for the created flavor,
# because the environment may need some special extra specs to
# create server which should have been contained in
# self.flavor_ref.
extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor_ref)['extra_specs']
if extra_spec_keys:
self.admin_flavors_client.set_flavor_extra_spec(
flavor['id'], **extra_spec_keys)
# Now boot a server with the copied flavor.
server = self.create_test_server(
wait_until='ACTIVE', flavor=flavor['id'])
server = self.servers_client.show_server(server['id'])['server']
# If 'id' not in server['flavor'], we can only compare the flavor
# details, so here we should save the to-be-deleted flavor's details,
# for the flavor comparison after the server resizing.
if not server['flavor'].get('id'):
pre_flavor = {}
body = self.flavors_client.show_flavor(flavor['id'])['flavor']
for key in ['name', 'ram', 'vcpus', 'disk']:
pre_flavor[key] = body[key]
# Delete the flavor we used to boot the instance.
self._flavor_clean_up(flavor['id'])
# Now resize the server and wait for it to go into verify state.
self.servers_client.resize_server(server['id'], self.flavor_ref_alt)
waiters.wait_for_server_status(self.servers_client, server['id'],
'VERIFY_RESIZE')
# Now revert the resize, it should be OK even though the original
# flavor used to boot the server was deleted.
self.servers_client.revert_resize_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
server = self.servers_client.show_server(server['id'])['server']
if server['flavor'].get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor['id'], server['flavor']['id'], msg)
else:
self.assertEqual(pre_flavor['name'],
server['flavor']['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
def _test_cold_migrate_server(self, revert=False):
if CONF.compute.min_compute_nodes < 2:
msg = "Less than 2 compute nodes, skipping multinode tests."
raise self.skipException(msg)
server = self.create_test_server(wait_until="ACTIVE")
src_host = self.get_host_for_server(server['id'])
self.admin_servers_client.migrate_server(server['id'])
waiters.wait_for_server_status(self.servers_client,
server['id'], 'VERIFY_RESIZE')
if revert:
self.servers_client.revert_resize_server(server['id'])
assert_func = self.assertEqual
else:
self.servers_client.confirm_resize_server(server['id'])
assert_func = self.assertNotEqual
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
dst_host = self.get_host_for_server(server['id'])
assert_func(src_host, dst_host)
@decorators.idempotent_id('4bf0be52-3b6f-4746-9a27-3143636fe30d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_cold_migration(self):
"""Test cold migrating server and then confirm the migration"""
self._test_cold_migrate_server(revert=False)
@decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_revert_cold_migration(self):
"""Test cold migrating server and then revert the migration"""
self._test_cold_migrate_server(revert=True)
|
# This module should be imported from REPL, not run from command line.
import socket
import uos
import network
import uwebsocket
import websocket_helper
import _webrepl
listen_s = None
client_s = None
def setup_conn(por | t, accept_handler):
global listen_s
listen_s = socket.socket()
listen_s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ai = socket.getaddrinfo("0.0.0.0", port)
addr = ai[0][4]
listen_s.bind(addr)
listen_s.listen(1)
if accept_hand | ler:
listen_s.setsockopt(socket.SOL_SOCKET, 20, accept_handler)
for i in (network.AP_IF, network.STA_IF):
iface = network.WLAN(i)
if iface.active():
print("WebREPL daemon started on ws://%s:%d" % (iface.ifconfig()[0], port))
return listen_s
def accept_conn(listen_sock):
global client_s
cl, remote_addr = listen_sock.accept()
prev = uos.dupterm(None)
uos.dupterm(prev)
if prev:
print("\nConcurrent WebREPL connection from", remote_addr, "rejected")
cl.close()
return
print("\nWebREPL connection from:", remote_addr)
client_s = cl
websocket_helper.server_handshake(cl)
ws = uwebsocket.websocket(cl, True)
ws = _webrepl._webrepl(ws)
cl.setblocking(False)
# notify REPL on socket incoming data (ESP32/ESP8266-only)
if hasattr(uos, 'dupterm_notify'):
cl.setsockopt(socket.SOL_SOCKET, 20, uos.dupterm_notify)
uos.dupterm(ws)
def stop():
global listen_s, client_s
uos.dupterm(None)
if client_s:
client_s.close()
if listen_s:
listen_s.close()
def start(port=8266, password=None):
stop()
if password is None:
try:
import webrepl_cfg
_webrepl.password(webrepl_cfg.PASS)
setup_conn(port, accept_conn)
print("Started webrepl in normal mode")
except:
print("WebREPL is not configured, run 'import webrepl_setup'")
else:
_webrepl.password(password)
setup_conn(port, accept_conn)
print("Started webrepl in manual override mode")
def start_foreground(port=8266):
stop()
s = setup_conn(port, None)
accept_conn(s)
|
from .g | ame | import Board
for i in range(10):
Board.all()
print(i)
|
identifiers
from PyQt4.QtGui import QPushButton
try:
from sh import inxi
except:
print(" 'inxi' not found, install it to get this info")
try:
from sh import mhwd
except:
print(" 'mhwd' not found, this is not Manjaro?")
try:
from sh import hwinfo
except:
print(" 'hwinfo' not found")
try:
from sh import free
except:
print(" 'free' not found")
try:
from sh import lsblk
except:
print(" 'lsblk' not found")
try:
from sh import df
except:
print(" 'df' not found")
try:
from sh import blockdev
except:
print(" 'blockdev' not found")
try:
from sh import test
except:
print(" 'test' not found")
try:
from sh import parted
except:
print(" 'parted' not found")
TMP_FILE = "/tmp/mlogsout.txt"
HEADER = '''
===================
|{:^17}| {}
===================
'''
checkbuttons = [
'Inxi',
'Installed g. drivers',
'List all g. drivers',
'Graphic Card Info',
'Memory Info',
'Partitions',
'Free Disk Space',
'Xorg.0',
'Xorg.1',
'pacman.log',
'journalctl - Emergency',
'journalctl - Alert',
'journalctl - Critical',
'journalctl - Failed',
'Open&Rc - rc.log',
]
def look_in_file(file_name, kws):
"""reads a file and returns only the lines that contain one of the keywords"""
with open(file_name) as f:
return "".join(filter(lambda line: any(kw in line for kw in kws), f))
class Window(QtGui.QWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.checks = [False]*len(checkbuttons) # initialize all buttons to False
# creates a vertical box layout for the window
vlayout = QtGui.QVBoxLayout()
# creates the checkboxes
for idx, text in enumerate(checkbuttons):
checkbox = QtGui.QCheckBox(text)
# connects the 'stateChanged()' signal with the 'checkbox_state_changed()' slot
checkbox.stateChanged.connect(partial(self.checkbox_state_changed, idx))
vlayout.addWidget(checkbox) # adds the checkbox to the layout
btn = QPushButton("&Show Info ({})".format(TMP_FILE), self)
btn.clicked.connect(self.to_computer)
btn.clicked.connect(self.to_editor)
vlayout.addWidget(btn)
vlayout.addStretch()
self.setLayout(vlayout) # sets the window layout
def checkbox_state_changed(self, idx, state):
self.checks[idx] = state == Qt.Checked
def to_computer(self, text):
f = open(TMP_FILE, 'w') # write mode clears any previous content from the file if it exists
if self.checks[0]:
print("Saving: inxi to file")
f.write(HEADER.format("Inxi -Fxzc0", "Listing computer information"))
try:
| f.write(str(inxi('-Fxxxzc0')))
except:
" 'inxi' not found, install it to get this info"
f.write('\n')
if self.checks[1]:
print("Getting info about installed graphical driver")
f.write(HEADER.format("Installed drivers", "Shows which graphic driver is installed"))
try:
f.write(str(mhwd('-li')))
except:
| print(" 'mhwd' not found, this is not Manjaro?")
f.write('\n')
if self.checks[2]:
print("Getting list of all drivers supported on detected gpu's")
f.write(HEADER.format("Available drivers", "list of all drivers supported on detected gpu's"))
try:
f.write(str(mhwd('-l')))
except:
print(" 'mhwd' not found, this is not Manjaro?")
# f.write('\n')
if self.checks[3]:
print('hwinfo -graphic card')
# os.system('hwinfo --gfxcard')
f.write(HEADER.format("hwinfo --gfxcard", "Show Graphic Card info"))
try:
f.write(str(hwinfo('--gfxcard')))
except:
print('hwinfo graphic card info error')
f.write('hwinfo graphic card info error')
f.write('\n')
if self.checks[4]:
print('memory info')
# os.system('free -h')
f.write(HEADER.format("Memory Info", "Info about Memory and Swap"))
try:
f.write(str(free(' -h')))
except:
print('memory info error')
f.write('memory info error')
f.write('\n')
if self.checks[5]:
print('disk info')
# os.system('lsblk')
f.write(HEADER.format("Disk Info", "Disks and Partitions"))
try:
f.write(str(lsblk()))
except:
print('lsblk error')
f.write('lsblk error')
f.write('\n')
if self.checks[6]:
print('free disk space')
# os.system('df')
f.write(HEADER.format("Free Disk Space", "Free space per pertition"))
try:
f.write(str(df()))
except:
print('free disk space error')
f.write('free disk space error')
f.write('\n')
if self.checks[9]:
print("Saving: Xorg.0.log to file")
f.write(HEADER.format("Xorg.0.log", "searching for: failed, error & (WW) keywords"))
try:
f.write(look_in_file('/var/log/Xorg.0.log', ['failed', 'error', '(WW)']))
except FileNotFoundError:
print("/var/log/Xorg.0.log not found!")
f.write("Xorg.0.log not found!")
f.write('\n')
if self.checks[10]:
print("Saving: Xorg.1.log to file")
f.write(HEADER.format("Xorg.1.log", "searching for: failed, error & (WW) keywords"))
try:
f.write(look_in_file('/var/log/Xorg.1.log', ['failed', 'error', '(WW)']))
except FileNotFoundError:
print("/var/log/Xorg.1.log not found!")
f.write("Xorg.1.log not found!")
f.write('\n')
if self.checks[11]:
print("Saving: pacman.log to file")
f.write(HEADER.format("pacman.log", "searching for: pacsave, pacnew, pacorig keywords"))
try:
f.write(look_in_file('/var/log/pacman.log', ['pacsave', 'pacnew', 'pacorig']))
except FileNotFoundError:
print("/var/log/pacman.log not found, this is not Manjaro or Arch based Linux?")
f.write("pacman.log not found! Not Arch based OS?")
f.write('\n')
if self.checks[12]:
print("Saving: journalctl (emergency) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Emergency keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['emergency', 'Emergency', 'EMERGENCY']))
f.write('\n')
if self.checks[13]:
print("Saving: journalctl (alert) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Alert keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['alert', 'Alert', 'ALERT']))
f.write('\n')
if self.checks[14]:
print("Saving: journalctl (critical) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Critical keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['critical', 'Critical', 'CRITICAL']))
f.write('\n')
if self.checks[15]:
print("Saving: journalctl (failed) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Failed keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['failed', 'Failed', 'FAILED']))
f.write('\n')
if self.checks[16]:
print("Saving: rc.log to file")
f.write(HEADER.format("rc.log", "OpenRc only! searching for: WARNING: keywords"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_browserstep
----------------------------------
Tests for `browserstep` module.
"""
import sys
import | unittest
from browserstep import browserstep
class TestBrowserstep(unittest.TestCase):
def setUp(self):
pass
def tearDown(se | lf):
pass
def test_000_something(self):
pass
if __name__ == '__main__':
sys.exit(unittest.main())
|
assert z.is_even is True
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_one():
z = Integer(1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_number is True
assert z.is_composite is False # issue 8807
def test_negativeone():
z = Integer(-1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is True
assert z.is_nonpositive is True
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_infinity():
oo = S.Infinity
assert oo.is_commutative is True
assert oo.is_integer is None
assert oo.is_rational is None
assert oo.is_algebraic is None
assert oo.is_transcendental is None
assert oo.is_real is True
assert oo.is_complex is True
assert oo.is_noninteger is None
assert oo.is_irrational is None
assert oo.is_imaginary is False
assert oo.is_positive is True
assert oo.is_negative is False
assert oo.is_nonpositive is False
assert oo.is_nonnegative is True
assert oo.is_even is None
assert oo.is_odd is None
assert oo.is_finite is False
assert oo.is_infinite is True
assert oo.is_comparable is True
assert oo.is_prime is False
assert oo.is_composite is None
assert oo.is_number is True
def test_neg_infinity():
mm = S.NegativeInfinity
assert mm.is_commutative is True
assert mm.is_integer is None
assert mm.is_rational is None
assert mm.is_algebraic is None
assert mm.is_transcendental is None
assert mm.is_real is True
assert mm.is_complex is True
assert mm.is_noninteger is None
assert mm.is_irrational is None
assert mm.is_imaginary is False
assert mm.is_positive is False
assert mm.is_negative is True
assert mm.is_nonpositive is True
assert mm.is_nonnegative is False
assert mm.is_even is None
assert mm.is_odd is None
assert mm.is_finite is False
assert mm.is_infinite is True
assert mm.is_comparable is True
assert mm.is_prime is False
assert mm.is_composite is False
assert mm.is_number is True
def test_nan():
nan = S.NaN
assert nan.is_commutative is True
assert nan.is_integer is None
assert nan.is_rational is None
assert nan.is_algebraic is None
assert nan.is_transcendental is None
assert nan.is_real is None
assert nan.is_complex is None
assert nan.is_noninteger is None
assert nan.is_irrational is None
assert nan.is_imaginary is None
assert nan.is_positive is None
assert nan.is_negative is None
assert nan.is_nonpositive is None
assert nan.is_nonnegative is None
assert nan.is_even is None
assert nan.is_odd is None
assert nan.is_finite is None
assert nan.is_infinite is None
assert nan.is_comparable is False
assert nan.is_prime is None
assert nan.is_composite is None
assert nan.is_number is True
def test_pos_rational():
r = Rational(3, 4)
assert r.is_commutative is True
assert r.is_integer is False
assert r.is_rational is True
assert r.is_algebraic is True
assert r.is_transcendental is False
assert r.is_real is True
assert r.is_complex is True
assert r.is_noninteger is True
assert r.is_irrational is False
assert r.is_imaginary is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
assert r.is_nonnegative is True
assert r.is_even is False
assert r.is_odd is False
assert r.is_finite is True
assert r.is_infinite is False
assert r.is_comparable is True
assert r.is_prime is False
assert r.is_composite is False
r = Rational(1, 4)
assert r.is_n | onpositive is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonnegative is True
r | = Rational(5, 4)
assert r.is_negative is False
assert r.is_positive is True
assert r.is_nonpositive is False
assert r.is_nonnegative is True
r = Rational(5, 3)
assert r.is_nonnegative is True
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
def test_neg_rational():
r = Rational(-3, 4)
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-1, 4)
assert r.is_nonpositive is True
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-5, 4)
assert r.is_negative is True
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_nonnegative is False
r = Rational(-5, 3)
assert r.is_nonnegative is False
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonpositive is True
def test_pi():
z = S.Pi
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_E():
z = S.Exp1
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_I():
z = S.ImaginaryUnit
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is False
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is True
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is Fal |
recording = '''jtfxgqec
zxoeuddn
anlfufma
dxuuyxkg
ttnewhlw
sjoyeiry
rgfwwdhw
qymxsllk
forftdvy
rzmnmewh
hogawihi
mtsyexba
mrjzqqfk
ypmkexpg
pjuyopgv
rtqquvaj
evubmlrq
bqlrtuce
ndidnbps
vqukosam
mzdyfkcd
rrbwdimb
uhnvxgly
aaimxpcv
acxvinqj
muaeikzy
lhzbosjd
fflqqiit
unfhzfrs
gmwoyvob
cculubmy
zqbugcwa
ijouicwt
bildjjww
ugksmnps
ivawibvu
igzteede
foehssxo
pkeevvlt
xumuixyw
okhhtycj
xhblffye
iqapgjqe
lkhpntum
wuzxgwow
bkkpfguu
bnqctsdi
cwncjrwn
eivhabsi
bwdicgfm
kowiourk
dhbzuztx
gibitfxo
wmrlhenb
wfzmjvwh
zddjirfg
fafhmiwf
ddhvufhg
qdwnlzqp
nhsnngut
uacmfgop
morcixux
sfdxrgqy
tezzvctv
dnnmtkfp
dygdzcib
efurreri
npvpklix
svpbdgyw
mcntltzd
inwkhxlx
sajfgeoi
nwkqrspt
qtzqsksv
mtncajjk
etarsvxr
eyaeeauy
gqnctylg
uerywmma
hjrxhtjb
zdsdyfzp
zhgrrhvd
yvxqyalf
rlgwftff
xczvgpzq
yydydclu
rzltbrro
jforpzau
zskadlfz
dqbqdsgv
bcwjltvc
byfoamgd
cpefdmso
ocuetyke
vlqrfnpp
ggikwydh
eakpyuov
osaguhlz
ylmrfvee
nvdvqpzm
pudbbuhh
bwmqdpyv
proscvgy
cetkcpjw
sbhcqeya
fgnyltmf
qcspgopp
bdhnemmy
tczkhihl
yduxunvr
dtxerncl
xnxeaayt
rvlcbgts
vpavzjqs
oueloufw
mubbhyna
nptmeppg
ojjfbuzz
lusboycs
gurmmorr
kefddaka
cpvpszit
bfvthzpm
owgcvdjo
simxphmv
rxedvjyw
hmeieuxr
vgqhcapz
vwtvbain
aobnhdsx
hkpshsjs
jxgegczu
xbsfxesk
pqhifeaj
triurorr
rnkufaxl
hmrqfoaw
veghzoxa
zbvgbpcm
rqrnbylj
txaawlta
uuksnfel
jqvycrvw
cdttmdpc
wojvbrzp
qvnuinon
gnpguyvh
cgbkpzbu
pdaqhlan
muiykslt
prvzlunm
whhcrchz
cahjhrkl
zifdgfpq
wanlienf
sfrnozvi
mwmykvyh
fbdfzgut
wfrviilb
ucaopfgo
fjhuikma
hdmizjdj
xngpfwvn
rueojtjg
xvtssxtx
vvcgzidf
xtehcxki
xksbfbso
osnzpqmy
isrnjkxh
utleakmz
dthmtbdt
plregxuh
amoeprsy
tmyhzhqd
csxqavbe
jmojlysw
slebxnbl
ldzryqmj
ajejyudk
ynhgnjhw
mdibxxxw
rvtcmesd
jmnwqddq
hppfoplc
nrcbjynz
kcqnjzue
mthvgjxm
ykztdbcv
etqqnhuz
tezkopgq
fwhwkqmz
fozpkzfy
hbbtlcog
hdvjqwyh
xuljsrvz
abskreoo
aedeydgc
dcyigvqf
ntpcvvgk
iiwgzkhl
zofhlqlx
veumtlae
qibdapwq
xpgpwirt
wvnnautq
wfhlgmdg
yqcrvdgx
srdufrbu
vycrvkpx
flwxzkim
enxayqxm
dgpntiaj
qedfutmp
vfdovine
dgrvjfjt
dqxxjahk
hnxpblyp
nnadwbsc
krmqqgwf
efykkzeb
lkrmrwqw
vfzayrwt
chopbnyf
vbydrtln
azmlestl
sqcyddvi
zdcubjok
afshwptc
sjgpuoch
bnfylydl
rsyxsbzi
psyuvyzx
npngqypd
xejayhdk
aqfmvjfi
tpffksph
uekwkjnj
ljsjimwm
hbgzjlig
ngssshxx
icitlosb
unxryqyt
nzpujfti
lupxnzhe
kxglfnic
ecewosbs
htlqxpiq
clqgnyfd
yyiozvar
mbvjgmyc
srhwhlin
casmlryr
ebuzskkp
iewhdqtr
oyidcobe
avptvltf
mfheqaxl
shqnezrq
xrpkzuvb
soxdjwba
aitmzlds
rpmpozpd
ccgxauky
gsstsjyx
bzeolqal
vfhddmuc
wfbbmqfv
pumxmnhj
qumdxkns
xymraott
uthlccig
ezpalags
giftxymr
ujjacleo
cgwgmktp
istetgdl
azedmaao
bnlfwyoq
orcwhbek
amswhkum
yxupesxu
mlzvqsrg
solkxzby
tbaxnjdu
xwbsiquk
hsftntsn
ajraaorz
mwmycrff
ymnbrbpj
uyfscatq
kzkgmbeh
libgpgnr
kxlgthxc
vzjbobyx
isqessab
ehursvof
guwrjnbi
xivkphwn
rurrmdmi
nqijeuzq
jambocej
qrtidktb
sbzvehmq
aikgzrsq
lgydnujf
twafyzry
nxhtklba
xhyaqyqe
xgvdfcrf
wdieppsd
iabrfmdm
doijaavc
oxydttkg
qsqiofwv
titrvjym
mwojqcku
tewiyhjx
jlqbksqd
knycvoks
tmcbnvhv
ekksoxmz
mgvommal
hrosnzeu
fzeymbek
evqxcukn
ilkpvdvl
rclpjbkb
tdpitlei
zvvzuucc
pzdgwnfz
mralxxlz
wywkawzh
hmazaakd
llltvbex
ihsmefpz
rzzgkjyz
srjqpeoq
jrczcdna
uuyskwop
yeuiaepa
vzppcwnn
oqhxixdo
xkwpfsij
cmsoiogl
ngbmaeue
lmqttyrj
yhgjxfmx
lwfgjnyp
ibbkjgra
gaxsotzr
paugisvs
pcqqauqi
pweuwnqs
jcbrscrj
ovtsgcnh
oscsgtqn
hkpwmhwk
pmdgwclk
owmskdhh
qutyussr
atdkvmzl
oqslriwe
wafjwfxp
ipcqlsxv
kzurbnoh
lfhfzwqo
ucybqwrj
tgnblzgm
lhwlniea
tlxymfbu
bcyvlkvt
glpacpjk
rjagzpnu
fyjpvhaq
cjtzwtdu
dkaqawts
pjoovtlv
xsnwqixw
swcftfed
cadigksp
fnsmxccx
cbxmdxvb
hpyqnpjq
jzpvphmo
kdkpubul
kiajwwta
uyeuctbe
yetyzqxw
fgeemnbl
brprbvgj
xszwwlea
ygunyguo
jwplrcbq
fejndxnx
oxsmkcqm
ldwkbpsk
cmzuxrst
jaoadiiu
oxcpkgbc
nyulhuci
bdwfqtkv
ehxvnzyd
cizuemsb
lbqyqduk
kqweswcd
tqnicuzh
utyaiaeu
osjdgvtj
qmrxcaoa
qiltxgvv
qklfgyss
lpjebmuo
bvebkous
yifrmeoa
jzgntlep
wadcknde
kaikclag
tucuhehr
bvwhuwzn
uvlecxgy
rzyxjhmo
dyyfwjgv
vocjkohi
ylyflktq
raltxpqg
eitypruw
pfbmopgm
qerushjt
xykophcv
amjhrlhi
uqkjhdhn
kkohprfw
hvsmtnfd
uxgiqmqc
npxwplcj
ltchgces
exiyyief
ysmvbqso
zpyvuhqz
lkvwronk
vxilskkl
cxfypwcd
jhrczkmf
rdedtejq
gmxcrlzi
jumwfmnn
gkynzdtd
dfdkxggc
yldclxhz
fsxvbwyj
ioiupzio
lxyqvncv
rsgsviny
osgcimej
tecqrgkq
tozohtwt
kmlowfrf
hhpiukqe
xlxlkjwf
ntvtoexx
zzvsvdow
yluidajg
vumkynvp
vaxipwwg
pqymmoif
sgjzogut
jppwszzn |
gvvaibqu
lwjotuil
srflotab
ibnblmjm
kvcsdivb
wqrpzmvr
gcmqdezs
vrizdyfo
vtqnsjbf
jwocjmvb
fjkiiowl
ctjhmmrq
pcckqfki
wqolxgfg
gbsdyrbc
giqmfqwb
fodfpvyl
nxdzwvzz
hpnatltw
adjjyhjd
aoguhvmv
yyeanoir
baojaygs
ovkebbjb
pmykvfex
zeooykoa
uuozuxjb
kxxvbh | br
jxbchjlr
qhiwdonk
dnvfwwfh
kjfrlslh
wionbrdf
qgkjarob
kwplsxso
txgelygh
vlmziqwf
wbetqqkp
qfkocear
wrvonhyr
sbiqrcri
lnwzitce
bctyrwph
kallfwzc
zfqwanet
bevnljjr
kwqsktan
gjviqwlu
zflsnpig
wzaufqvr
uvxhutav
diejbica
ojciaexn
zyjoxrwi
djkodeiz
gsinkcqk
jkonssuq
eychyabp
fkcogwnr
kkioyrnn
inqxlztu
cqnbxxks
ipwmpdmm
moozfajm
irjaimrw
ojihmanb
hzoszxzc
ajjvxqqi
ohkfkijd
nlsahrpv
zizxtmxa
gjtnrurd
pyqghfuj
fltnnyfe
goxagvfp
nplhpkiy
dlwgyvby
fzrfhcgh
zaiuostp
jdjojfkw
thksqbjh
qopcwnht
ewkljwho
qguaeaac
wxzzxgcc
nlnuuhdu
ihtzrqay
nmtdbkhp
yasxhulm
drzjobfy
qpgcjdxn
aegbxmjb
bbuxsffr
zevjcgzn
pgbqezxk
qdlepjko
zbtzvicm
ssjdcggg
ugrtxalo
tsbvnppt
rboleppu
gywfqiwz
skgzeqhu
hzuggbcf
dkegaxap
zijcjrkm
jtfkeoog
fyvtrvig
gophbeoj
ieatnihe
vlaauxgz
mxnheqkz
mftwybny
ebawojuj
dyrvecbs
lrrcwang
qswijdeu
wkuszdax
ecaokzfc
pmbznspx
tjqrztdv
mwdxruge
whutfdqy
zpfwqvox
fkqapoid
bodleqbn
kpxiuodk
johmsncc
enhamlol
yhtydoss'''.split("\n")
from collections import Counter
# get length of message
m_len = len(recording[0])
occurence_list = [[] for i in range(m_len)]
code = ""
for line in recording:
for e, i in enumerate(line):
occurence_list[e].append(i)
for entry in occurence_list:
mc = Counter(entry).most_common()[-1][0] # <--- only this was changed
code += mc
print("Code: ", code)
|
ension;
``in_parts=(PartitionSpec(2,2), PartitionSpec(4,1), None)`` means the
first argument should be partitioned over four devices by splitting both
of its dimensions in half, the second argument should be partitioned over
the four devices across the first dimension, and the third argument is
replicated across the four devices.
All PartitionSpecs in a given ``sharded_jit`` call must correspond to the
same total number of partitions, i.e. the product of all PartitionSpecs
must be equal, and the number of dimensions in the PartitionSpec
corresponding to an array ``a`` should equal ``a.ndim``. Arguments marked
as static using ``static_argnums`` (see below) do not require a
PartitionSpec.
out_parts: The output partitions, i.e. how each output of ``fun`` should be
partitioned or replicated. This follows the same convention as
``in_parts``.
num_partitions: Optional. If set, explicitly specifies the number of devices
``fun`` should partitioned across (rather than inferring it from
``in_parts``, ``out_parts``, and/or any ``with_sharding_constraint``
calls). Setting this should usually be unnecessary, but can be used to
maintain device persistence across multiple sharded_jit calls when some of
those calls only involve replicated values.
local_in_parts: Optional. This should be set when partitioning across
multiple processes, and says how each process's worth of data should be
partitioned (vs. in_parts which is the "global" partitioning across all
processes). This API is likely to change in the future.
local_out_parts: Optional. This should be set when partitioning across
multiple processes, and says how each process's worth of data should be
partitioned (vs. out_parts which is the "global" partitioning across all
processes). This API is likely to change in the future.
local_num_partitions: Optional. Explicitly specifies the numbers of local
devices to partitions across in a multi-process setting. This API is
likely to change in the future.
static_argnums: An int or collection of ints specifying which positional
arguments to treat as static (compile-time constant). Operations that only
depend on static arguments will be constant-folded. Calling the jitted
function with different values for these constants will trigger
recompilation. If the jitted function is called with fewer positional
arguments than indicated by ``static_argnums`` then an error is raised.
Each of the static arguments will be broadcasted to all devices, and
cannot be partitioned - these arguments will be removed from the *args
list before matching each remaining argument with its corresponding
PartitionSpec. Arguments that are not arrays or containers thereof must
be marked as static. Defaults to ``()``.
Returns:
A version of ``fun`` that will be distributed across multiple devices.
"""
if num_partitions is not None:
nparts = num_partitions
else:
nparts = pxla.get_num_partitions(in_parts, out_parts)
if local_num_partitions is not None:
local_nparts = local_num_partitions
else:
local_nparts = pxla.get_num_partitions(local_in_parts, local_out_parts)
static_argnums = _ensure_index_tuple(static_argnums)
@wraps(fun)
def wrapped(*args, **kwargs):
if kwargs:
raise NotImplementedError("sharded_jit over kwargs not yet supported")
f = lu.wrap_init(fun)
if static_argnums:
if max(static_argnums) >= len(args):
raise ValueError(
f"jitted function has static_argnums={static_argnums}"
f" but was called with only {len(args)} positional "
f"argument{'s' if len(args) > 1 else ''}. "
"All static broadcasted arguments must be passed positionally.")
dyn_argnums = [i for i in range(len(args)) if i not in static_argnums]
f, args = argnums_partial(f, dyn_argnums, args)
args_flat, in_tree = tree_flatten((args, kwargs))
in_parts_flat = tuple(flatten_axes("sharded_jit in_parts",
in_tree.children()[0], in_parts))
if local_in_parts is not None:
local_in_parts_flat = tuple(flatten_axes("sharded_jit local_in_parts",
in_tree.children()[0], local_in_parts))
else:
local_in_parts_flat = None
flat_fun, out_tree = flatten_fun(f, in_tree)
# TODO(skye): having a function-typed param in a primitive seems dicey, is
# there a better way?
out_parts_thunk = HashableFunction(
lambda: tuple(flatten_axes("sharded_jit out_parts", out_tree(), out_parts)),
closure=out_parts)
if local_out_parts:
local_out_parts_thunk = HashableFunction(
lambda: tuple(flatten_axes("sharded_jit local_out_parts",
out_tree(), local_out_parts)),
closure=local_out_parts)
else:
local_out_parts_thunk = HashableFunction(lambda: None, closure=None)
out = sharded_call(
flat_fun,
*args_flat,
nparts=nparts,
in_parts=in_parts_flat,
out_parts_thunk=out_parts_thunk,
local_in_parts=local_in_parts_flat,
local_out_parts_thunk=local_out_parts_thunk,
local_nparts=local_nparts,
name=flat_fun.__name__)
return tree_unflatten(out_tree(), | out)
return wrapped
def _sharding_constraint_impl(x, partitions):
# TODO(skye): can we also prevent this from being called in other
# non-sharded_jit contexts? (e.g. pmap, control flow)
raise NotImplementedError(
"with_sharding_constraint() should only be called inside sharded_jit()")
def _sharding_constraint_translation_rule(ctx, avals_in, avals_out, x_node,
partitions):
return [xla. | set_sharding(ctx.builder, x_node, partitions)]
sharding_constraint_p = core.Primitive("sharding_constraint")
sharding_constraint_p.def_impl(_sharding_constraint_impl)
sharding_constraint_p.def_abstract_eval(lambda x, partitions: x)
ad.deflinear2(sharding_constraint_p,
lambda ct, _, partitions: (with_sharding_constraint(ct, partitions),))
xla.register_translation(sharding_constraint_p,
_sharding_constraint_translation_rule)
def _sharding_constraint_lowering(ctx, x_node, partitions):
return [mlir.wrap_with_sharding_op(x_node, xla.sharding_to_proto(partitions))]
mlir.register_lowering(sharding_constraint_p, _sharding_constraint_lowering)
def with_sharding_constraint(x, partitions: Optional[PartitionSpec]):
"""Identity-like function that specifies how ``x`` should be sharded.
WARNING: this feature is still under active development! It may not work well,
and may change without warning!
This should only be called inside a function transformed by ``sharded_jit``.
It constrains how the function is sharded: regardless of any other specified
partitions, the compiler will make sure that ``x`` is sharded according to
``partitions``. Note that a ``with_sharding_constraint`` call doesn't
necessarily correspond to a reshard, since the compiler is free to achieve
this sharding as long as the constraint is met, e.g. it might insert a reshard
earlier in the computation. Another way to think of this is that the
``with_sharding_constraint`` call may flow "up" the function to preceding
operations as well as "down" to subsequent ones.
``partitions`` must correspond to the same number of total partitions dictated
by the outer ``sharded_jit`` and any other ``with_sharding_constraint`` calls.
In the case where only replication has been specified, any ``partitions`` are
valid.
Example usage:
@partial(sharded_jit, in_parts=None, out_parts=None, num_shards=2
def f(x):
y = x + 1
y = with_sharding_constraint(y, PartitionSpec(2,1))
return y * 2
In this example, the inputs and outputs of ``f`` will be replicated, but the
inner value of ``y`` will be partitioned in half. ``f`` will run on two
devices due to the with_sharding_constr |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
cl | ass ContactFormConfig(AppConfig):
"""The default AppConfig for admin which do | es autodiscovery."""
name = 'django_contact'
verbose_name = _("Contact") |
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
def _future_unary_unary(channel, group, method, timeout, protocol_options,
metadata, metadata_transformer, request,
request_serializer, response_deserializer):
multi_callable = channel.unary_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_future = multi_callable.future(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(response_future, None, response_future)
def _unary_stream(channel, group, method, timeout, protocol_options, metadata,
metadata_transformer, request, request_serializer,
response_deserializer):
multi_callable = channel.unary_stream(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_iterator = multi_callable(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(None, response_iterator, response_iterator)
def _blocking_stream_unary(channel, group, method, timeout, with_call,
protocol_options, metadata, metadata_transformer,
request_iterator, request_serializer,
response_deserializer):
try:
multi_callable = channel.stream_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
if with_call:
response, call = multi_callable.with_call(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return response, _Rendezvous(None, None, call)
else:
return multi_callable(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
def _future_stream_unary(channel, group, method, timeout, protocol_options,
metadata, metadata_transformer, request_iterator,
request_serializer, response_deserializer):
multi_callable = channel.stream_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_future = multi_callable.future(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(response_future, None, response_future)
def _stream_stream(channel, group, method, timeout, protocol_options, metadata,
metadata_transformer, request_iterator, request_serializer,
response_deserializer):
multi_callable = channel.stream_stream(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_iterator = multi_callable(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(None, response_iterator, response_iterator)
class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
def __init__(self, channel, group, method, metadata_transformer,
request_serializer, response_deserializer):
self._channel = channel
self._group = group
self._method = method
self._metadata_transformer = metadata_transformer
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request,
timeout,
metadata=None,
with_call=False,
protocol_options=None):
return _blocking_unary_unary(
self._channel, self._group, self._method, timeout, with_call,
protocol_options, metadata, self._metadata_transformer, request,
self._request_serializer, self._response_deserializer)
def future(self, request, timeout, metadata=None, protocol_options=None):
return _future_unary_unary(
self._channel, self._group, self._method, timeout, protocol_options,
metadata, self._metadata_transformer, request,
self._request_serializer, self._response_deserializer)
def event(self,
request,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
def __init__(self, channel, group, method, metadata_transformer,
request_serializer, response_deserializer):
self._channel = channel
self._group = group
self._method = method
self._metadata_transformer = metadata_transformer
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self, request, timeout, meta | data=None, protocol_options=None):
return _unary_stream(
self._channel, self._group, self._method, timeout, protocol_options,
metadata, self._metadata_transformer, request,
self._request_serializer, self._response_deserializer)
def event(self,
request,
receiver,
abortion_callback,
timeout,
| metadata=None,
protocol_options=None):
raise NotImplementedError()
class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
def __init__(self, channel, group, method, metadata_transformer,
request_serializer, response_deserializer):
self._channel = channel
self._group = group
self._method = method
self._metadata_transformer = metadata_transformer
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout,
metadata=None,
with_call=False,
protocol_options=None):
return _blocking_stream_unary(
self._channel, self._group, self._method, timeout, with_call,
protocol_options, metadata, self._metadata_transformer,
request_iterator, self._request_serializer,
self._response_deserializer)
def future(self,
request_iterator,
timeout,
metadata=None,
protocol_options=None):
return _future_stream_unary(
self._channel, self._group, self._method, timeout, protocol_options,
metadata, self._metadata_transformer, request_iterator,
self._request_serializer, self._response_deserializer)
def event(self,
receiver,
abor |
import datetime
class Historico:
def __init__(self):
self.data_abertura = datetime.datetime.to | day()
self.transacoes = []
def imprime(self):
print('data abertura: {}'.format(self.data_abertura))
print('transações: ')
for t in self.transacoes:
print('-', t)
class Cliente:
def __init__(self, nome, sobrenome, cpf):
self.nome = nome
self.sobrenome = sobrenome
self.cpf = cpf
class Conta:
def __init__(self, numero, cliente, saldo, limite=1000.0):
pr | int('inicializando uma conta')
self._numero = numero
self._titular = cliente
self._saldo = saldo
self._limite = limite
self._historico = Historico()
def deposita(self, valor):
self._saldo += valor
self._historico.transacoes.append('depósito de {}'.format(valor))
def saca(self, valor):
if (self._saldo < valor):
return False
else:
self._saldo -= valor
self._historico.transacoes.append('saque de {}'.format(valor))
return True
def transfere_para(self, destino, valor):
retirou = self.saca(valor)
if (retirou):
destino.deposita(valor)
self._historico.transacoes.append('transferencia de {} para conta'.format(valor, destino.numero))
return True
else:
return False
def extrato(self):
print('numero: {} \nsaldo: {}'.format(self._numero, self._saldo))
self._historico.transacoes.append('tirou extrato - saldo de {}'.format(self._saldo))
|
'''
Created on 03.05.2015
@author: vvladych
'''
from gi.repository import Gtk
from forecastmgmt.model.organisation import Organisation
from masterdata_abstract_window import AbstractAddMask
class OrganisationAddMask(AbstractAddMask):
def __init__(self, main_window, reset_callback):
super(OrganisationAddMask, self).__init__(main_window, reset_callback)
def create_layout(self):
self.set_column_spacing(5)
self.set_row_spacing(3)
placeholder_label = Gtk.Label("")
placeholder_label.set_size_request(1,40)
self.attach(placeholder_label,0,-1,1,1)
row = 0
# Row 0: organisation uuid
self.add_uuid_row("Organisation UUID", row)
row+=1
# Row 1: common name
self.add_common_name_row("Common Name", row)
row+=1
# last row
save_button = Gtk.Button("Save", Gtk.STOCK_SAVE)
save_button.connect("clicked", self.save_current_object)
self.attach(save_button,1,row,1,1)
back_button = Gtk.Button("Back", Gtk.STOCK_GO_BACK)
back_button.connect("clicked", self.parent_callback_func, self.reset_callback)
self.attach(back_button,2,row,1,1)
def fill_mask_from_current_object(self):
i | f self.current_object!=None:
self.uuid_text_entry.set_text(self.current_object.uuid)
self.common_name_text_entry.set_text(self.current_object.common_name)
else:
self.uuid_text_entry.set_text("") |
self.common_name_text_entry.set_text("")
def create_object_from_mask(self):
common_name = self.common_name_text_entry.get_text()
if common_name is None:
self.show_error_dialog("common name cannot be null")
return
organisation=Organisation(None,common_name)
return organisation
|
import base64
import cPickle as pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils.hashcompat import md5_constructor
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
pickled = pickle.dumps(session_dict)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delet | e() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session | ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django website).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'))
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
encoded_data = base64.decodestring(self.session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5_constructor(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
from django.core.exceptions import SuspiciousOperation
raise SuspiciousOperation("User tampered with session cookie.")
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
|
ort sys
import tempfile
import datetime
from xml.etree import ElementTree as etree
from convert import read_messages, read_calls
import yaffs
def read_chunk(fd, size):
s = fd.read(size)
if len(s) > 0 and len(s) != size:
raise IOError("Broken image file")
return s
def read_segment(fd):
chunk_data = read_chunk(fd, yaffs.CHUNK_SIZE)
spare_data = read_chunk(fd, yaffs.SPARE_SIZE)
if len(chunk_data) == 0 and len(spare_data) == 0:
return None, None
elif len(chunk_data) == 0 or len(spare_data) == 0:
raise IOError("Broken image file")
return chunk_data, yaffs.PackedTags2.from_buffer_copy(spare_data)
def extract(filename):
fd = open(filename, "rb")
yaffs_objects = {yaffs.OBJECTID_ROOT: "."}
while True:
chunk_data, tags = read_segment(fd)
if chunk_data is None:
break
if tags.t.byteCount == 0xffff:
header = yaffs.ObjectHeader.from_buffer_copy(chunk_data)
full_path_name = os.path.join(yaffs_objects[header.parentObjectId], header.name)
yaffs_objects[tags.t.objectId] = full_path_name
if header.type == yaffs.FILE:
remaining = header.fileSize
out = open(full_path_name, "wb")
try:
os.fchmod(out.fileno(), header.yst_mode)
except:
pass
while remaining > 0:
chunk_data, tags = read_segment(fd)
if remaining < tags.t.byteCount:
s = chunk_data[:remaining]
else:
s = chunk_data[:tags.t.byteCount]
out.write(s)
remaining -= len(s)
print "wrote", full_path_name
elif header.type == yaffs.SYMLINK:
os.symlink(header.alias, full_path_name)
print "symlink %s -> %s" % (header.alias, full_path_name)
elif header.type == yaffs.DIRECTORY:
try:
os.mkdir(full_path_name, 0777)
print "created directory %s" % full_path_name
except OSError, exc:
if "exists" in str(exc):
pass
else:
print str(exc)
raise
elif header.type == yaffs.HARDLINK:
os.link(yaffs_objects[header.equivalentObjectId], full_path_name)
print "hardlink %s -> %s" % (yaffs_objects[header.equivalentObjectId], full_path_name)
else:
print "skipping unknown object"
def get_files(filename, filenames, callback=None):
fd = open(filename, "rb")
yaffs_objects = {yaffs.OBJECTID_ROOT: "."}
rc = {}
while True:
chunk_data, tags = read_segment(fd)
if chunk_data is None:
break
if tags.t.byteCount == 0xffff:
header = yaffs.ObjectHeader.from_buffer_copy(chunk_data)
full_path_name = os.path.join(yaffs_objects[header.parentObjectId], header.name)
yaffs_objects[tags.t.objectId] = full_path_name
if callback is not None:
callback(header)
if header.type == yaffs.FILE:
remaining = header.fileSize
contents = ""
if header.name in filenames:
while remaining > 0:
chunk_data, tags = read_segment(fd)
if remaining < tags.t.byteCount:
s = chunk_data[:remaining]
else:
s = chunk_data[:tags.t.byteCount]
contents += s
remaining -= len(s)
rc[full_path_name] = contents
else:
blocks = (remaining + yaffs.CHUNK_SIZE - 1) / yaffs.CHUNK_SIZE
fd.seek(blocks*(yaffs.CHUNK_SIZE+yaffs.SPARE_SIZE), 1)
return rc
def dotty(header):
if header.name.endswith(".db"):
sys.stdout.write("+")
else:
sys.stdout.write(".")
sys.stdout.flush()
def get_save_filename(filename=""):
while True:
if filename == "":
new_filename = raw_input("Save as: ")
else:
new_filename = raw_input("Save as (empty=%s): " % filename)
if new_filename == "" and filena | me = | = "":
continue
if new_filename != "":
filename = new_filename
try:
os.stat(filename)
ans = raw_input("Warning: %s already exists, overwrite (y/n)? " % filename)
if ans.lower().startswith("y"):
break
except OSError:
break
return filename
def save(filename, content):
open(get_save_filename(filename), "wb").write(content)
def extract_sms(content):
fd_n, name = tempfile.mkstemp()
fd = os.fdopen(fd_n, "wb")
try:
fd.write(content)
fd.close()
messages = read_messages(name)
print "Read %s messages" % str(messages.attrib["count"])
newest = datetime.datetime.fromtimestamp(int(messages.getchildren()[0].attrib["date"])/1000)
output = newest.strftime("sms-%Y%m%d%H%M%S.xml")
etree.ElementTree(messages).write(get_save_filename(output),
encoding="utf-8",
xml_declaration=True)
except Exception, exc:
print "Failed to extract messages: %s" % exc
print repr(exc)
finally:
try:
os.unlink(name)
except:
print "Warning: failed to remove temporary file %s" % name
def extract_calls(content):
fd, name = tempfile.mkstemp()
fd = os.fdopen(fd, "wb")
try:
fd.write(content)
fd.close()
calls = read_calls(name)
print "Read %s calls" % str(calls.attrib["count"])
newest = datetime.datetime.fromtimestamp(int(calls.getchildren()[0].attrib["date"])/1000)
output = newest.strftime("calls-%Y%m%d%H%M%S.xml")
etree.ElementTree(calls).write(get_save_filename(output),
encoding="utf-8",
xml_declaration=True)
except Exception, exc:
print "Failed to extract calls: %s" % exc
finally:
try:
os.unlink(name)
except:
print "Warning: failed to remove temporary file %s" % name
def interactive(filename):
print "Scanning and reading image (this may take some time)"
r = get_files(filename, ["mmssms.db", "contacts2.db"], dotty)
print ""
while True:
print
print "Found files:"
names = r.keys()
for i, n in enumerate(names):
print "[%d] %s" % (i+1, n)
n = int(raw_input("Enter file number to extract (0 to quit): ")) - 1
if n < 0 or n >= len(names):
break
name = names[n]
print "File %s selected." % name
print "Possible actions:"
print "[f] save file"
print "[s] extract SMS messages from file"
print "[c] extract Call logs from file"
t = raw_input("Please choose action: ")
t = t.lower()
if t.startswith("f"):
save(os.path.basename(names[n]), r[names[n]])
elif t.startswith("s"):
extract_sms(r[names[n]])
elif t.startswith("c"):
extract_calls(r[names[n]])
def main():
parser = optparse.OptionParser(usage="%prog [options...] data.img")
parser.add_option("-x", "--extract", action="store_true",
help="Don't search for required databases, just extract the filesystem")
parser.add_option("-s", "--sms", action="store_true",
help="Input file is mmssms.db, just convert SMS messages to XML format.")
parser.add_option("-c", "--calls", action="store_true",
help="Input file is contacts2.db, just convert Call Logs to XML format.")
parser.description = "This prog |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import atexit
import uuid
from ..utils.multi_dict import MultiDict
class WebSocketManager(object):
def __init__(self):
self.sockets = {}
self.topic_clientid_map = MultiDict()
self.clientid_topic_map = MultiDict()
self.auth_functions = {}
atexit.register(self._atexit)
def _atexit(self):
if len(self.sockets) != 0:
log.warning("Not all websocket connections were closed properly")
def remove_clientid(self, clientid):
topics = self.clientid_topic_map.get(clientid, [])
for topic in topics:
self.topic_clientid_map.remove_val(topic, clientid)
def remove_topic( | self, topic):
clientids = self.topic_clientid_map.get(topic)
for clientid in clientids:
self.clientid_topic_map.remove_val(clientid, topic)
def subscribe_socket(self, socket, topic, clientid=None):
if clientid is None :
clientid = str( | uuid.uuid4())
self.subscribe(clientid, topic)
self.add_socket(socket, clientid)
def can_subscribe(self, clientid, topic):
#auth goes here
return True
def register_auth(self, authtype, func):
self.auth_functions[authtype] = func
def auth(self, authtoken, topic):
#authtoken - some string, whatever you want it to be
#topic - string topic, of syntax type:value.
#topic type maps to auth function
authtype, topic = topic.split(":", 1)
if self.auth_functions.get(authtype):
return self.auth_functions[authtype](authtoken, topic)
else:
return True
def subscribe(self, clientid, topic):
if self.can_subscribe(clientid, topic):
log.debug("subscribe %s, %s", topic, clientid)
self.topic_clientid_map.add(topic, clientid)
self.clientid_topic_map.add(clientid, topic)
def add_socket(self, socket, clientid):
log.debug("add socket %s", clientid)
self.sockets[clientid] = socket
def remove_socket(self, clientid):
log.debug("remove socket %s", clientid)
self.sockets.pop(clientid, None)
def send(self, topic, msg, exclude=None):
if exclude is None:
exclude = set()
log.debug("sending to %s", self.topic_clientid_map.get(topic, []))
for clientid in tuple(self.topic_clientid_map.get(topic, [])):
socket = self.sockets.get(clientid, None)
if not socket:
continue
if clientid in exclude:
continue
try:
socket.write_message(topic + ":" + msg)
except Exception as e: #what exception is this?if a client disconnects
log.exception(e)
self.remove_socket(clientid)
self.remove_clientid(clientid)
|
coder_rnn_dropout_inputs = args.rnn_dropout_inputs
_, decoder_rnn_dropout_states = args.rnn_dropout_states
_, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
config_decoder = decoder.RecurrentDecoderConfig(
max_seq_len_source=max_seq_len_source,
rnn_config=rnn.RNNConfig(cell_type=args.rnn_cell_type,
num_hidden=args.rnn_num_hidden,
num_layers=decoder_num_layers,
dropout_inputs=decoder_rnn_dropout_inputs,
dropout_states=decoder_rnn_dropout_states,
dropout_recurrent=decoder_rnn_dropout_recurrent,
residual=args.rnn_residual_connections,
first_residual_layer=args.rnn_first_residual_layer,
forget_bias=args.rnn_forget_bias,
lhuc=args.lhuc is not None and (C.LHUC_DECODER in args.lhuc or C.LHUC_ALL in args.lhuc)),
attention_config=config_attention,
hidden_dropout=args.rnn_decoder_hidden_dropout,
state_init=args.rnn_decoder_state_init,
context_gating=args.rnn_context_gating,
layer_normalization=args.layer_normalization,
attention_in_upper_layers=args.rnn_attention_in_upper_layers,
state_init_lhuc=args.lhuc is not None and (C.LHUC_STATE_INIT in args.lhuc or C.LHUC_ALL in args.lhuc),
enc_last_hidden_concat_to_embedding=args.rnn_enc_last_hidden_concat_to_embedding)
return config_decoder
def check_encoder_decoder_args(args) -> None:
"""
Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse.
"""
encoder_embed_dropout, decoder_embed_dropout = args.embed_dropout
encoder_rnn_dropout_inputs, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
encoder_rnn_dropout_states, decoder_rnn_dropout_states = args.rnn_dropout_states
if encoder_embed_dropout > 0 and encoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
if decoder_embed_dropout > 0 and decoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
encoder_rnn_dropout_recurrent, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
if encoder_rnn_dropout_recurrent > 0 or decoder_rnn_dropout_recurrent > 0:
check_condition(args.rnn_cell_type == C.LSTM_TYPE,
"Recurrent dropout without memory loss only supported for LSTMs right now.")
def create_model_config(args: argparse.Namespace,
source_vocab_sizes: List[int],
target_vocab_size: int,
max_seq_len_source: int,
max_seq_len_target: int,
config_data: data_io.DataConfig) -> model.ModelConfig:
"""
Create a ModelConfig from the argument given in the command line.
:param args: Arguments as returned by argparse.
:param source_vocab_sizes: The size of the source vocabulary (and source factors).
:param target_vocab_size: The size of the target vocabulary.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param config_data: Data config.
:return: The model configuration.
"""
num_embed_source, num_embed_target = args.num_embed
embed_dropout_source, embed_dropout_target = args.embed_dropout
source_vocab_size, *source_factor_vocab_sizes = source_vocab_sizes
check_encoder_decoder_args(args)
config_conv = None
if args.encoder == C.RNN_WITH_CONV_EMBED_NAME:
config_conv = encoder.ConvolutionalEmbeddingConfig(num_embed=num_embed_source,
max_filter_width=args.conv_embed_max_filter_width,
num_filters=args.conv_embed_num_filters,
pool_stride=args.conv_embed_pool_stride,
num_highway_layers=args.conv_embed_num_highway_layers,
dropout=args.conv_embed_dropout)
if args.encoder == C.TRANSFORMER_WITH_CONV_EMBED_TYPE:
config_conv = encoder.ConvolutionalEmbeddingConfig(num_embed=num_embed_source,
output_dim=num_embed_source,
max_filter_width=args.conv_embed_max_filter_width,
num_filters=args.conv_embed_num_filters,
pool_stride=args.conv_embed_pool_stride,
num_highway_layers=args.conv_embed_num_highway_layers,
dropout=args.conv_embed_dropout)
config_encoder, encoder_num_hidden = create_encoder_config(args, max_seq_len_source, max_seq_len_target,
config_conv)
config_decoder = create_decoder_config(args, encoder_num_hidden, max_seq_len_source, max_seq_len_target)
source_factor_configs = | None
if len(source_vocab_sizes) > 1:
source_factor_configs = [encoder.FactorConfig(size, dim) for size, dim in zip(source_factor_vocab_sizes,
args.source_factors_num_embed)]
|
config_embed_source = encoder.EmbeddingConfig(vocab_size=source_vocab_size,
num_embed=num_embed_source,
dropout=embed_dropout_source,
factor_configs=source_factor_configs)
config_embed_target = encoder.EmbeddingConfig(vocab_size=target_vocab_size,
num_embed=num_embed_target,
dropout=embed_dropout_target)
config_loss = loss.LossConfig(name=args.loss,
vocab_size=target_vocab_size,
normalization_type=args.loss_normalization_type,
label_smoothing=args.label_smoothing)
model_config = model.ModelConfig(config_data=config_data,
vocab_source_size=source_vocab_size,
vocab_target_size=target_vocab_size,
config_embed_source=config_embed_source,
config_embed_target=config_embed_target,
config_encoder=config_encoder,
config_decoder=config_decoder,
config_loss=config_loss,
weight_tying=args.weight_tying,
weight_tying_type=args.weight_tying_type if args.weight_tying else None,
weight_normalization=args.weight_normalization,
lhuc=args.lhuc is not None)
return model_config
def create_training_model(config: model.ModelConfig,
context: List[mx.Context],
output_dir: str,
train_iter: data_io.BaseParallelSampleIter,
args: argparse.Namespace) -> training.TrainingModel:
"""
Create a training model and load the parameters from disk if needed.
:param config: The configuration for the model.
:param context: The context |
# -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import pickling as m
try:
import cPickle as pickle # Use cPickle on Python 2.7
exc | ept ImportError:
import pickle
@pytest.mark.parametrize("cls_name", ["Pickleable", "PickleableNew"])
def test_roundtrip(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.setExtra1(15)
p.setExtra2(48)
data = pickle.dumps(p, 2) # Must use pickle protocol >= 2
p2 = pickle.loads(data) |
assert p2.value() == p.value()
assert p2.extra1() == p.extra1()
assert p2.extra2() == p.extra2()
@pytest.mark.xfail("env.PYPY")
@pytest.mark.parametrize("cls_name", ["PickleableWithDict", "PickleableWithDictNew"])
def test_roundtrip_with_dict(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.extra = 15
p.dynamic = "Attribute"
data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL)
p2 = pickle.loads(data)
assert p2.value == p.value
assert p2.extra == p.extra
assert p2.dynamic == p.dynamic
def test_enum_pickle():
from pybind11_tests import enums as e
data = pickle.dumps(e.EOne, 2)
assert e.EOne == pickle.loads(data)
|
# dialogs - provide common dialogs
#
# Copyright (c) 2006 FSF Europe
#
# Authors:
# Sebastian Heinlein <glatzor@ubuntu.com>
# Michael Vogt <mvo@canonical.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHA | NTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, | MA 02111-1307
# USA
from gi.repository import Gtk
def show_error_dialog(parent, primary, secondary):
p = "<span weight=\"bold\" size=\"larger\">%s</span>" % primary
dialog = Gtk.MessageDialog(parent,Gtk.DialogFlags.MODAL,
Gtk.MessageType.ERROR,Gtk.ButtonsType.CLOSE,"")
dialog.set_markup(p);
dialog.format_secondary_text(secondary);
dialog.run()
dialog.hide()
|
License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cron script to generate usage notifications for volumes existing during
the audit period.
Together with the notifications generated by volumes
create/delete/resize, over that time period, this allows an external
system consuming usage notification feeds to calculate volume usage
for each tenant.
Time periods are specified as 'hour', 'month', 'day' or 'year'
- `hour` - previous hour. If run at 9:07am, will generate usage for
8-9am.
- `month` - previous month. If the script is run April 1, it will
generate usages for March 1 through March 31.
- `day` - previous day. if run on July 4th, it generates usages for
July 3rd.
- `year` - previous year. If run on Jan 1, it generates usages for
Jan 1 through Dec 31 of the previous year.
"""
import datetime
import sys
import iso8601
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n # noqa
i18n.enable_lazy()
from cinder import context
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
import cinder.volume.volume_utils
CONF = cfg.CONF
script_opts = [
cfg.StrOpt('start_time',
help="If this option is specified then the start time "
"specified is used instead of the start time of the "
"last completed audit period."),
cfg.StrOpt('end_time',
help="If this option is specified then the end time "
"specified is used instead of the end time of the "
"last completed audit period."),
cfg.BoolOpt('send_actions',
default=False,
help="Send the volume and snapshot create and delete "
"notifications generated in the specified period."),
]
CONF.register_cli_opts(script_opts)
def _time_error(LOG, begin, end):
if CONF.start_time:
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
begin = begin.replace(tzinfo=iso8601.UTC)
end = end.replace(tzinfo=iso8601.UTC)
if end <= begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
'end': end}
LOG.error(msg)
sys.exit(-1)
return begin, end
def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
"""volume_ref notify usage"""
try:
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
cinder.volume.volume_utils.notify_about_volume_usage(
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
except Excep | tion as exc_msg:
LOG.error("Exists | volume notification failed: %s",
exc_msg, resource=volume_ref)
def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
"""snapshot_ref notify usage"""
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
cinder.volume.volume_utils.notify_about_snapshot_usage(
admin_context, snapshot_ref, 'exists', extra_info)
except Exception as exc_msg:
LOG.error("Exists snapshot notification failed: %s",
exc_msg, resource=snapshot_ref)
def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
"""backup_ref notify usage"""
try:
cinder.volume.volume_utils.notify_about_backup_usage(
admin_context, backup_ref, 'exists', extra_info)
LOG.debug("Sent notification for <backup_id: %(backup_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'backup_id': backup_ref.id,
'project_id': backup_ref.project_id,
'extra_info': extra_info})
except Exception as exc_msg:
LOG.error("Exists backups notification failed: %s", exc_msg)
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.created_at),
'audit_period_ending': str(obj_ref.created_at),
}
LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'create.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.deleted_at),
'audit_period_ending': str(obj_ref.deleted_at),
}
LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'delete.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context,
begin, end, notify_about_usage, type_id_str, type_name):
_notify_usage(LOG, obj_ref, extra_info, admin_context)
if CONF.send_actions:
if begin < obj_ref.created_at < end:
_create_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
if obj_ref.deleted_at and begin < obj_ref.deleted_at < end:
_delete_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
def main():
objects.register_all()
admin_context = context.get_admin_context()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
LOG = logging.getLogger("cinder")
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
begin, end = _time_error(LOG, begin, end)
LOG.info("Starting volume usage audit")
LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
{"begi |
import graphene
from ...core.permissions import DiscountPermissions
from ...discount import models
from ..core.mutations import ModelBulkDeleteMutation
class SaleBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of sale IDs to delete."
)
class Meta:
description = "Deletes sales."
model = models.Sale
permissions = (DiscountP | ermissions.MANAGE_DISCOUNTS,)
class VoucherBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of voucher IDs to delete."
)
class Meta:
description = "Deletes vouchers."
model = models.Voucher
permissions = (DiscountPermissions.MANAGE_DISCOUNTS, | )
|
== exp_version
@classmethod
def _handle_event(cls, exploration_id, exp_version, aggregated_stats):
if 'undefined' in aggregated_stats['state_stats_mapping']:
logging.error(
'Aggregated stats contains an undefined state name: %s'
% list(aggregated_stats['state_stats_mapping'].keys()))
return
if cls._is_latest_version(exploration_id, exp_version):
taskqueue_services.defer(
taskqueue_services.FUNCTION_ID_UPDATE_STATS,
taskqueue_services.QUEUE_NAME_STATS,
exploration_id,
exp_version, aggregated_stats)
class AnswerSubmissionEventHandler(BaseEventHandler):
"""Event handler for recording answer submissions."""
EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED
@classmethod
def _handle_event(
cls, exploration_id, exploration_version, state_name,
interaction_id, answer_group_index, rule_spec_index,
classification_categorization, session_id, time_spent_in_secs,
params, normalized_answer):
"""Records an event when an answer triggers a rule. The answer recorded
here is a Python-representation of the actual answer submitted by the
user.
"""
# TODO(sll): Escape these args?
stats_services.record_answer(
exploration_id, exploration_version, state_name, interaction_id,
stats_domain.SubmittedAnswer(
normalized_answer, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, params,
session_id, time_spent_in_secs))
feedback_is_useful = (
classification_categorization != (
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION))
stats_models.AnswerSubmittedEventLogEntryModel.create(
exploration_id, exploration_version, state_name, session_id,
time_spent_in_secs, feedback_is_useful)
class ExplorationActualStartEventHandler(BaseEventHandler):
"""Event handler for recording exploration actual start events."""
EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id):
stats_models.ExplorationActualStartEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id)
class SolutionHitEventHandler(BaseEventHandler):
"""Event handler for recording solution hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.SolutionHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class StartExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration start events."""
EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, | params,
play_type):
stats_models.StartExplorationEventLogEntryModel.create(
exp_id, exp | _version, state_name, session_id, params,
play_type)
handle_exploration_start(exp_id)
class MaybeLeaveExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration leave events."""
EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.MaybeLeaveExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class CompleteExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration completion events."""
EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.CompleteExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class RateExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration rating events."""
EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION
@classmethod
def _handle_event(cls, exp_id, user_id, rating, old_rating):
stats_models.RateExplorationEventLogEntryModel.create(
exp_id, user_id, rating, old_rating)
handle_exploration_rating(exp_id, rating, old_rating)
class StateHitEventHandler(BaseEventHandler):
"""Event handler for recording state hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT
# TODO(sll): Remove params before sending this event to the jobs taskqueue.
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StateHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
params, play_type)
class StateCompleteEventHandler(BaseEventHandler):
"""Event handler for recording state complete events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.StateCompleteEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class LeaveForRefresherExpEventHandler(BaseEventHandler):
"""Event handler for recording "leave for refresher exploration" events."""
EVENT_TYPE = feconf.EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP
@classmethod
def _handle_event(
cls, exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.LeaveForRefresherExplorationEventLogEntryModel.create(
exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class FeedbackThreadCreatedEventHandler(BaseEventHandler):
"""Event handler for recording new feedback thread creation events."""
EVENT_TYPE = feconf.EVENT_TYPE_NEW_THREAD_CREATED
@classmethod
def _handle_event(cls, exp_id):
feedback_services.handle_new_thread_created(exp_id)
class FeedbackThreadStatusChangedEventHandler(BaseEventHandler):
"""Event handler for recording reopening feedback thread events."""
EVENT_TYPE = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED
@classmethod
def _handle_event(cls, exp_id, old_status, new_status):
feedback_services.handle_thread_status_changed(
exp_id, old_status, new_status)
def handle_exploration_start(exp_id):
"""Handles a user's start of an exploration.
Args:
exp_id: str. The exploration which has been started.
"""
exp_summary = exp_fetchers.get_exploration_summary_by_id(exp_id)
if exp_summary:
for user_id in exp_summary.owner_ids:
_increment_total_plays_count_transactional(user_id)
def handle_exploration_rating(exp_id, rating, old_rating):
"""Handles a new rating for an exploration.
Args:
exp_id: str. The exploration which has been rated.
rating: int. The new rating of the exploration.
old_rating: int. The old rating of the exploration before
refreshing.
"""
exp_summary = exp_fetchers.get_exploration_summary_by_id(exp_id)
if exp_summary:
for user_id in exp_summary.owner_ids:
_refresh_average_ratings_transactional(user_id, rating, old_rating)
@transaction_services.run_in_transaction_wrapper
def _refresh_average_ratings_transactional(user_id, new_rating, old_rating):
"""Refreshes the average rating for a user.
Args:
user_id: str. The id of the user.
|
'''
'''
from __future__ import absolute_import
from ...exceptions import ProtocolError
index = {}
def register(cls):
''' Decorator to add a Message (and its revision) to the Protocol index.
'''
key = (cls.msgtype, cls.revision)
if key in index:
raise ProtocolError("Duplicate message specification encountered: %r" % key)
index[key] = cls
retu | rn cls
from .ack import *
from .ok import *
from .patch_doc import *
from .pull_doc_req import *
from .pull_doc_reply import *
from .push_doc import *
from .error import *
from .server_info_reply import *
from .server_info_req import *
from .workin | g import *
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for adding and editing sheriff rotations."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import json
from google.appengine.api import users
from google.appengine.ext import ndb
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.common import xsrf
from dashboard.models import table_config
class CreateHealthReportHandler(request_handler.RequestHandler):
def get(self):
"""Renders the UI with the form fields."""
self.RenderStaticHtml('create_health_report.html')
def post(self):
"""POSTS the data to the datastore."""
user = users.get_current_user()
if not user:
self.response.out.write(json.dumps({'error': 'User not logged in.'}))
return
if not utils.IsInternalUser():
self.response.out.write(json.dumps(
{'error':
'Unauthorized access, please use chromium account to login.'}))
return
get_token = self.request.get('getToken')
get_table_config_list = self.request.get('getTableConfigList')
get_table_config_details = self.request.get('getTableConfigDetails')
if get_token == 'true':
values = {}
self.GetDynamicVariables(values)
self.response.out.write(json.dumps({
'xsrf_token': values['xsrf_token'],
}))
elif get_table_config_list:
self._GetTableConfigList()
elif get_table_config_details:
self._GetTableConfigDetails(get_table_config_details)
else:
self._CreateTableConfig()
def _GetTableConfigList(self):
| query = table_config.TableConfig.query()
table_config_list = query.fetch(keys_only=True)
return_list = []
for config in table_config_list:
return_list.append(config.id | ())
self.response.out.write(json.dumps({
'table_config_list': return_list,
}))
def _GetTableConfigDetails(self, config_name):
config_entity = ndb.Key('TableConfig', config_name).get()
if config_entity:
master_bot_list = []
for bot in config_entity.bots:
master_bot_list.append(bot.parent().string_id() + '/' + bot.string_id())
self.response.out.write(json.dumps({
'table_name': config_name,
'table_bots': master_bot_list,
'table_tests': config_entity.tests,
'table_layout': config_entity.table_layout
}))
else:
self.response.out.write(json.dumps({
'error': 'Invalid config name.'
}))
def _CreateTableConfig(self):
"""Creates a table config. Writes a valid name or an error message."""
self._ValidateToken()
name = self.request.get('tableName')
master_bot = self.request.get('tableBots').splitlines()
tests = self.request.get('tableTests').splitlines()
table_layout = self.request.get('tableLayout')
override = int(self.request.get('override'))
user = users.get_current_user()
if not name or not master_bot or not tests or not table_layout or not user:
self.response.out.write(json.dumps({
'error': 'Please fill out the form entirely.'
}))
return
try:
created_table = table_config.CreateTableConfig(
name=name, bots=master_bot, tests=tests, layout=table_layout,
username=user.email(), override=override)
except table_config.BadRequestError as error:
self.response.out.write(json.dumps({
'error': error.message,
}))
logging.error('BadRequestError: %r', error.message)
return
if created_table:
self.response.out.write(json.dumps({
'name': name,
}))
else:
self.response.out.write(json.dumps({
'error': 'Could not create table.',
}))
logging.error('Could not create table.')
def _ValidateToken(self):
user = users.get_current_user()
token = str(self.request.get('xsrf_token'))
if not user or not xsrf._ValidateToken(token, user):
self.abort(403)
|
""" progressbar2 related utils"""
from codekit.codetools import warn
from public import public
from time import sleep
import progressbar
import functools
@public
def setup_logging(verbosity=0):
"""Configure progressbar sys.stderr wrapper which is required to play nice
with logging and not have strange formatting artifacts.
"""
progressbar.streams.wrap_stderr()
@public
def countdown_timer(seconds=10):
"""Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero.
"""
tick = 0.1 # seconds
n_ticks = int(seconds / tick)
widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(
widgets=widgets, max_value=n_ticks
).start()
for i in range(n_ticks):
pbar.update(i)
sleep(tick)
pbar.finish()
@public
def wait_for_user_panic(**kwargs):
"""Display a scary message and count down progresss bar so an interative
user a chance to panic and kill the program.
Parameters
----------
kwargs
Passed verbatim to countdown_timer()
"""
warn('Now is the time to panic and Ctrl-C')
countdown_timer(**kwargs)
@public
@functools.lru_cache()
def wait_for_user_panic_once(**kwargs):
"""Same functionality as wait_for_user_panic() but will only display a
countdown once, reguardless of how many times it is called.
Parameters
----------
kwargs
Passed verbatim to wait_for_user_panic()
| """
wait_for_user_panic(**kwargs)
@public
def eta_bar(msg, max_value):
"""Display an adaptive ETA / countdown bar with a message.
Parameters
| ----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
"""
widgets = [
"{msg}:".format(msg=msg),
progressbar.Bar(), ' ', progressbar.AdaptiveETA(),
]
return progressbar.ProgressBar(widgets=widgets, max_value=max_value)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.scala.goals.tailor import classify_source_files
from pants.backend.scala.target_types imp | ort (
ScalaJunitTestsGeneratorTarget,
ScalaSourcesGeneratorTarget,
ScalatestTestsGeneratorTarget,
)
def test_classify_source_files() -> None:
scalatest_files = {
"foo/bar/BazSpec.scala",
}
junit_files = {
"foo/bar/BazTest.scala",
}
lib_files = {"foo/bar/Baz.scala"}
assert {
ScalatestTestsGeneratorTarget: scal | atest_files,
ScalaJunitTestsGeneratorTarget: junit_files,
ScalaSourcesGeneratorTarget: lib_files,
} == classify_source_files(junit_files | lib_files | scalatest_files)
|
#=====
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPL | ARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF AD | VISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#=====
# Subd Render
#
# This cookbook example demonstrates how to load & render a mesh primitive and
# render it as a subdivision surface.
#
#=====
from IECore import *
class subdRender(ParameterisedProcedural) :
def __init__(self) :
ParameterisedProcedural.__init__( self, "Renders a mesh as a subd." )
path = PathParameter( "path", "Path", "" )
self.parameters().addParameter( path )
def doBound(self, args) :
geo = Reader.create( args['path'].value ).read()
return geo.bound()
def doRenderState(self, renderer, args) :
pass
def doRender(self, renderer, args) :
geo = Reader.create( args['path'].value ).read()
geo.interpolation = "catmullClark"
geo.render( renderer )
# register
registerRunTimeTyped( subdRender )
|
import unittest
import dbt.exceptions
import dbt.utils
from dbt.parser.schema_renderer import SchemaYamlRenderer
class TestYamlRendering(unittest.TestCase):
def test__m | odels(self):
context = {
"test_var": "1234",
"alt_var": "replaced",
}
renderer = SchemaYamlRenderer(context, 'models')
# Verify description is not rendered and misc attribute is rendered
dct | = {
"name": "my_model",
"description": "{{ test_var }}",
"attribute": "{{ test_var }}",
}
expected = {
"name": "my_model",
"description": "{{ test_var }}",
"attribute": "1234",
}
dct = renderer.render_data(dct)
self.assertEqual(expected, dct)
# Verify description in columns is not rendered
dct = {
'name': 'my_test',
'attribute': "{{ test_var }}",
'columns': [
{'description': "{{ test_var }}", 'name': 'id'},
]
}
expected = {
'name': 'my_test',
'attribute': "1234",
'columns': [
{'description': "{{ test_var }}", 'name': 'id'},
]
}
dct = renderer.render_data(dct)
self.assertEqual(expected, dct)
def test__sources(self):
context = {
"test_var": "1234",
"alt_var": "replaced",
}
renderer = SchemaYamlRenderer(context, 'sources')
# Only descriptions have jinja, none should be rendered
dct = {
"name": "my_source",
"description": "{{ alt_var }}",
"tables": [
{
"name": "my_table",
"description": "{{ alt_var }}",
"columns": [
{
"name": "id",
"description": "{{ alt_var }}",
}
]
}
]
}
rendered = renderer.render_data(dct)
self.assertEqual(dct, rendered)
def test__macros(self):
context = {
"test_var": "1234",
"alt_var": "replaced",
}
renderer = SchemaYamlRenderer(context, 'macros')
# Look for description in arguments
dct = {
"name": "my_macro",
"arguments": [
{"name": "my_arg", "attr": "{{ alt_var }}"},
{"name": "an_arg", "description": "{{ alt_var}}"}
]
}
expected = {
"name": "my_macro",
"arguments": [
{"name": "my_arg", "attr": "replaced"},
{"name": "an_arg", "description": "{{ alt_var}}"}
]
}
dct = renderer.render_data(dct)
self.assertEqual(dct, expected)
|
import urllib.parse
import sys
from ccs import core
from ccs import constants
from . import response
def ticker():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
def trades():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
# nejaky problem s kodovanim
# def trades_chart():
# s = __name__.split(".")[1]
# r = sys._getframe().f_code.co_name
#
# # complete request
# cr = core.request(s, r)
#
# return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s) | )
def orderbook():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_na | me
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s)) |
T",
__DFU_STATE_DFU_MANIFEST_WAIT_RESET: "STATE_DFU_MANIFEST_WAIT_RESET",
__DFU_STATE_DFU_UPLOAD_IDLE: "STATE_DFU_UPLOAD_IDLE",
__DFU_STATE_DFU_ERROR: "STATE_DFU_ERROR",
}
# USB device handle
__dev = None
# Configuration descriptor of the device
__cfg_descr = None
__verbose = None
# USB DFU interface
__DFU_INTERFACE = 0
# Python 3 deprecated getargspec in favour of getfullargspec, but
# Python 2 doesn't have the latter, so detect which one to use
getargspec = getattr(inspect, "getfullargspec", inspect.getargspec)
if "length" in getargspec(usb.util.get_string).args:
# PyUSB 1.0.0.b1 has the length argument
def get_string(dev, index):
return usb.util.get_string(dev, 255, index)
else:
# PyUSB 1.0.0.b2 dropped the length argument
def get_string(dev, index):
return usb.util.get_string(dev, index)
def find_dfu_cfg_descr(descr):
if len(descr) == 9 and descr[0] == 9 and descr[1] == _DFU_DESCRIPTOR_TYPE:
nt = collections.namedtuple(
"CfgDescr",
[
"bLength",
"bDescriptorType",
"bmAttributes",
"wDetachTimeOut",
"wTransferSize",
"bcdDFUVersion",
],
)
return nt(*struct.unpack("<BBBHHH", bytearray(descr)))
return None
def init(**kwargs):
"""Initializes the found DFU device so that we can program it."""
global __dev, __cfg_descr
devices = get_dfu_devices(**kwargs)
if not devices:
raise ValueError("No DFU device found")
if len(devices) > 1:
raise ValueError("Multiple DFU devices found")
__dev = devices[0]
__dev.set_configuration()
# Claim DFU interface
usb.util.claim_interface(__dev, __DFU_INTERFACE)
# Find the DFU configuration descriptor, either in the device or interfaces
__cfg_descr = None
for cfg in __dev.configurations():
__cfg_descr = find_dfu_cfg_descr(cfg.extra_descriptors)
if __cfg_descr:
break
for itf in cfg.interfaces():
__cfg_descr = find_dfu_cfg_descr(itf.extra_descriptors)
if __cfg_descr:
break
# Get device into idle state
for attempt in range(4):
status = get_status()
if status == __DFU_STATE_DFU_IDLE:
break
elif status == __DFU_STATE_DFU_DOWNLOAD_IDLE or status == __DFU_STATE_DFU_UPLOAD_IDLE:
abort_request()
else:
clr_status()
def abort_request():
"""Sends an abort request."""
__dev.ctrl_transfer(0x21, __DFU_ABORT, 0, __DFU_INTERFACE, None, __TIMEOUT)
def clr_status():
"""Clears any error status (perhaps left over from a previous session)."""
__dev.ctrl_transfer(0x21, __DFU_CLRSTATUS, 0, __DFU_INTERFACE, None, __TIMEOUT)
def get_status():
"""Get the status of the last operation."""
stat = __dev.ctrl_transfer(0xA1, __DFU_GETSTATUS, 0, __DFU_INTERFACE, 6, 20000)
# firmware can provide an optional string for any error
if stat[5]:
message = get_string(__dev, stat[5])
if message:
print(message)
return stat[4]
def check_status(stage, expected):
status = get_status()
if status != expected:
raise SystemExit("DFU: %s failed (%s)" % (stage, __DFU_STATUS_STR.get(status, status)))
def mass_erase():
"""Performs a MASS erase (i.e. erases the entire device)."""
# Send DNLOAD with first byte=0x41
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, "\x41", __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def page_erase(addr):
"""Erases a single page."""
if __verbose:
print("Erasing page: 0x%x..." % (addr))
# Send DNLOAD with first byte=0x41 and page address
buf = struct.pack("<BI", 0x41, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def set_address(addr):
"""Sets the address for the next operation."""
# Send DNLOAD with first byte=0x21 and page address
buf = struct.pack("<BI", 0x21, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0):
"""Writes a buffer into memory. This routine assumes that memory has
already been erased.
"""
xfer_count = 0
xfer_bytes = 0
xfer_total = len(buf)
xfer_base = addr
while xfer_bytes < xfer_total:
if __verbose and xfer_count % 512 == 0:
print(
"Addr 0x%x %dKBs/%dKBs..."
% (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)
)
if progress and xfer_count % 2 == 0:
progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size)
# Set mem write address
set_address(xfer_base + xfer_bytes)
# Send DNLOAD with fw data
chunk = min(__cfg_descr.wTransferSize, xfer_total - xfer_bytes)
__dev.ctrl_transfer(
0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes : xfer_bytes + chunk], __TIMEOUT
)
# Execute last command
check_status("wri | te memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
xfer_count += 1
xfer_bytes += chunk
def write_page(buf, xfer_offset):
"""Writes a single page. This routine assumes that memory has already
been erased.
"""
xfer_base = 0x08000000
# Set | mem write address
set_address(xfer_base + xfer_offset)
# Send DNLOAD with fw data
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
if __verbose:
print("Write: 0x%x " % (xfer_base + xfer_offset))
def exit_dfu():
"""Exit DFU mode, and start running the program."""
# Set jump address
set_address(0x08000000)
# Send DNLOAD with 0 length to exit DFU
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, None, __TIMEOUT)
try:
# Execute last command
if get_status() != __DFU_STATE_DFU_MANIFEST:
print("Failed to reset device")
# Release device
usb.util.dispose_resources(__dev)
except:
pass
def named(values, names):
"""Creates a dict with `names` as fields, and `values` as values."""
return dict(zip(names.split(), values))
def consume(fmt, data, names):
"""Parses the struct defined by `fmt` from `data`, stores the parsed fields
into a named tuple using `names`. Returns the named tuple, and the data
with the struct stripped off."""
size = struct.calcsize(fmt)
return named(struct.unpack(fmt, data[:size]), names), data[size:]
def cstring(string):
"""Extracts a null-terminated string from a byte array."""
return string.decode("utf-8").split("\0", 1)[0]
def compute_crc(data):
"""Computes the CRC32 value for the data passed in."""
return 0xFFFFFFFF & -zlib.crc32(data) - 1
def read_dfu_file(filename):
"""Reads a DFU file, and parses the individual elements from the file.
Returns an array of elements. Each element is a dictionary with the
following keys:
num - The element index.
address - The address that the element data should be written to.
size - The size of the element data.
data - The element data.
If an error occurs while parsing the file, then None is returned.
"""
print("File: {}".for |
import argparse
import configparser
from codepunks.config import (Config, INISource, XMLSource, JSONSource,
YAMLSource, ArgParserSource)
ARGS1 = argparse.Namespace()
ARGS1.apkey1 = "apval1"
ARGS1.apkey2 = "apval2"
ARGS1.apkey3 = "apval3"
def testEmptyCfg():
Config()
def testINISource():
c = Config(INISource("tests/config/config.ini"))
c.load()
def testINISourcePreBuilt():
fname = "tests/config/config.ini"
parser = configparser.ConfigParser()
parser.read(fname)
c = Config(INISource(fname, cfgparser=parser))
c.load()
def testINISource2():
c = Config([INISource("tests/config/config.ini"),
INISource("tests/c | onfig/config2.ini")])
c.load()
def testXMLSource():
c = Config(XMLSource("tests/config/config.xml"))
c.load()
def testJSONSource():
c = Config(JSONSource("tests/config/config.json"))
c.load()
def testYAMLSource():
c = Config(YAMLSource("tests/config/config.yml"))
c.load()
def testArgParserSource():
c = Config(ArgParserSource(ARGS1))
c.load()
def testAllSources():
c = Config([INISource("tests | /config/config.ini"),
XMLSource("tests/config/config.xml"),
JSONSource("tests/config/config.json"),
YAMLSource("tests/config/config.yml"),
ArgParserSource(ARGS1)])
c.load()
|
class Dudle(object):
de | f __init__(self):
self.baseurl = 'https://dudle.inf.tu-dresden.de/?create_poll='
def getDudle(self, name, type='time', url=''):
return(self.baseurl + name + '&poll_type | =' + type + '&poll_url=' + url)
|
from django.apps import AppConfig
|
class UniversityCreditsConfig(AppConfig):
name = 'university_c | redits'
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Pooja <pooja@linux.vnet.ibm.com>
import os
import re
from avocado import Test
from avocado import main
from avocado.utils import archive, build, distro, process
from avocado.utils.software_manager import SoftwareManager
class Libunwind(Test):
def setUp(self):
'''
Build Libunwind library
Source:
https://github.com/pathscale/libunwind/archive/vanilla_pathscale.zip
'''
dist = distro.detect()
smm = SoftwareManager()
deps = ['gcc', 'libtool', 'autoconf', 'automake', 'make']
if dist.name == 'Ubuntu':
deps.extend(['dh-autoreconf', 'dh-dist-zilla', 'g++',
'texlive-extra-utils'])
elif dist.name in ['SuSE', 'rhel', 'fedora', 'redhat']:
deps.extend(['gcc-c++'])
else:
self.cancel('Test not supported in %s' % dist.name)
| for package in deps:
if not smm.check_installed(package) and not smm.install(package):
self.cancel("Failed to install %s, which is needed for"
"the test to be run" % package)
tarball = self.fetch_asset('vanilla_pathscale.zip', locations=[
'https://github.com/pathscale/libunwind/archive/'
'vanilla_pathscale.zip'], expire='7d')
| archive.extract(tarball, self.srcdir)
self.sourcedir = os.path.join(self.srcdir, 'libunwind-vanilla_pathscale')
os.chdir(self.sourcedir)
process.run('./autogen.sh', shell=True)
'''
For configure options on different architecture please refer
https://github.com/pathscale/libunwind
'''
configure_option = self.params.get('configure_option',
default='configure_option')
process.run('./configure %s' % configure_option, shell=True)
build.make(self.sourcedir)
build.make(self.sourcedir, extra_args='install')
def test(self):
'''
Execute regression tests for libunwind library
'''
results = build.run_make(self.sourcedir, extra_args='check',
ignore_status=True).stdout
fail_list = ['FAIL', 'XFAIL', 'ERROR']
failures = []
for failure in fail_list:
num_fails = re.compile(r"# %s:(.*)" %
failure).findall(results)[0].strip()
if int(num_fails):
failures.append({failure: num_fails})
if failures:
self.fail('Test failed with following:%s' % failures)
if __name__ == "__main__":
main()
|
"""
pyfire.contact
~~~~~~~~~~
Handles Contact ("roster item") interpretation as per RFC-6121
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
from sqlalchemy import Table, Column, Boolean, Integer, String, Enum, ForeignKey
from sqlalchemy.orm import relationship, backref
from pyfire.jid import JID
from pyfire.storage import Base, JIDString
contacts_groups = Table('contacts_groups', Base.metadata,
Column('contact_id', Integer, ForeignKey('contacts.id')),
Column('group_id', Integer, ForeignKey('groups.id'))
)
class Roster(Base):
"""List of contacts for a given jid"""
__tablename__ = 'rosters'
id = Column(Integer, primary_key=True)
jid = Column(JIDString, nullable=False)
def __init__(self, jid):
self.jid = JID(jid)
class Group(Base):
"""Simple group, only providing a name for now"""
__tablename__ = 'groups'
id = Column(Integer, primary_key=True)
name = Column(String(255))
class Contact(Base):
"""Jabber Contact, aka roster item. It has some really strict attribute
setting mechanism as it leads to all kinds of fantastic crashes with
clients which should be avoided in any case.
"""
__tablename__ = 'contacts'
id = Column(Integer, primary_key=True)
approved = Column(Boolean)
ask = Column(Enum('subscribe'))
jid = Column(JIDString, nullable=False)
name = Column(String(255))
subscription = Column(Enum("none", "from", "to", "remove", "both"))
groups = relationship(Group, secondary=contacts_groups)
roster = relationship(Roster, backref=backref('contacts'))
roster_id = Column(Integer, ForeignKey('rosters.id'), nullable=False)
def __init__(self, jid, **kwds):
super(Contact, self).__init__()
# required
if isinstance(jid, basestring):
self.jid = JID(jid)
elif isinstance | (jid, JID):
self.jid = jid
self.jid.validate(raise_error=True)
else:
raise AttributeError("Needs valid jid either as string or JID instance")
# optional
self.approved = False
self.ask = None
self.name = None
self.subscription = "none"
| self.groups = []
for k, v in kwds.iteritems():
setattr(self, k, v)
def to_element(self):
"""Formats contact as `class`:ET.Element object"""
element = ET.Element("item")
if self.approved is not None:
element.set("approved", 'true' if self.approved else 'false')
if self.ask is not None:
element.set("ask", self.ask)
element.set("jid", str(self.jid))
if self.name is not None:
element.set("name", self.name)
if self.subscription is not None:
element.set("subscription", self.subscription)
for group in self.groups:
group_element = ET.SubElement(element, "group")
group_element.text = group
return element
@staticmethod
def from_element(element):
"""Creates contact instance from `class`:ET.Element"""
if element.tag != "item":
raise ValueError("Invalid element with tag %s" % element.tag)
cont = Contact(element.get('jid'))
cont.ask = element.get('ask')
cont.subscription = element.get('subscription')
approved = element.get('approved')
if approved == 'true':
cont.approved = True
elif approved == 'false':
cont.approved = False
else:
cont.approved = approved
for group in list(element):
if group.tag == "group":
cont.groups.append(group.text)
return cont
|
)
asts = set(_[1] for _ in ar)
ar2 = set(_[:2] for _ in ar)
aam, sam = missing(aabs, sabs)
asm, ssm = missing(asts, ssts)
ar2m, sr2m = missing(ar2, set(sr))
print('OK to skip')
print(sorted(aam))
print('Need to be created')
print(sorted(sam))
print()
print(sorted(asm))
print()
print(sorted(ssm))
print()
#print(sorted(ar2m))
#print()
#print(sorted(sr2m))
#print()
assert all(s in achild for s in schild), f'somehow the kids dont match {achild} {schild}\n' + str(sorted(set(a) - set(s) | set(s) - set(a)
for a, s in ((tuple(sorted(achild.items())),
tuple(sorted(schild.items()))),)))
for k, (structs, figs) in out.items():
for struct in structs:
assert not re.match('\d+-\d+', struct) and not re.match('\d+$', struct), f'bad struct {struct} in {k}'
errata = {'nodes with layers':achild}
return out, errata
class PaxSrAr_4(PaxSrAr):
sourceFile = auth.get_path('resources') / 'pax-4th-ed-indexes.txt'
artifact = Artifacts.PaxRat4
class PaxSrAr_6(PaxSrAr):
sourceFile = auth.get_path('resources') / 'pax-6th-ed-indexes.txt'
artifact = Artifacts.PaxRat6
class PaxMSrAr_2(PaxSrAr):
sourceFile = auth.get_path('resources') / 'paxm-2nd-ed-indexes.txt'
artifact = Artifacts.PaxMouse2
class PaxMSrAr_3(PaxSrAr):
sourceFile = auth.get_path('resources') / 'paxm-3rd-ed-indexes.txt'
artifact = Artifacts.PaxMouse3
class PaxTree_6(Source):
source = '~/ni/dev/nifstd/paxinos/tree.txt'
artifact = Artifacts.PaxRat6
@classmethod
def loadData(cls):
with open(os.path.expanduser(cls.source), 'rt') as f:
| return [l for l in f.read().split('\n') if l]
@classmethod
def processData(cls):
out = {}
recs = []
parent_stack = [None]
old_depth = 0
layers = {}
for l in cls.raw:
depth, abbrev, _, name = l.split(' | ', 3)
depth = len(depth)
if old_depth < depth: # don't change
parent = parent_stack[-1]
parent_stack.append(abbrev)
old_depth = depth
elif old_depth == depth:
if len(parent_stack) - 1 > depth:
parent_stack.pop()
parent = parent_stack[-1]
parent_stack.append(abbrev)
elif old_depth > depth: # bump back
for _ in range(old_depth - depth + 1):
parent_stack.pop()
parent = parent_stack[-1]
parent_stack.append(abbrev)
old_depth = depth
struct = None if name == '-------' else name
o = (depth, abbrev, struct, parent)
if '-' in abbrev:
# remove the precomposed, we will deal with them systematically
maybe_parent, rest = abbrev.split('-', 1)
if rest.isdigit() or rest == '1a' or rest == '1b': # Pir1a Pir1b
if parent == 'Unknown': # XXX special cases
if maybe_parent == 'Pi': # i think this was probably caused by an ocr error from Pir3 -> Pi3
continue
assert maybe_parent == parent, f'you fall into a trap {maybe_parent} {parent}'
if parent not in layers:
layers[parent] = []
layers[parent].append((layer, o)) # FIXME where does layer come from here?
# I think this comes from the previous iteration of the loop?!
elif struct is not None and ', layer 1' in struct:
# remove the precomposed, we will deal with them systematically
parent_, layer = abbrev[:-1], abbrev[-1]
if parent_ == 'CxA' and parent == 'Amy': # XXX special cases
parent = 'CxA'
elif parent == 'Unknown':
if parent_ == 'LOT':
parent = 'LOT'
elif parent_ == 'Tu':
parent = 'Tu'
assert parent_ == parent, f'wrong turn friend {parent_} {parent}'
if parent not in layers:
layers[parent] = []
layers[parent].append((layer, o))
else:
recs.append(o)
out[abbrev] = ([struct], (), parent)
errata = {'nodes with layers':layers}
return recs, out, errata
@classmethod
def validate(cls, trecs, tr, errata):
print(Counter(_[1] for _ in trecs).most_common()[:5])
('CxA1', 2), ('Tu1', 2), ('LOT1', 2), ('ECIC3', 2)
assert len(tr) == len(trecs), 'Abbreviations in tr are not unique!'
return tr, errata
class PaxFix4(LocalSource):
artifact = Artifacts.PaxRat4
_data = ({
# 1-6b are listed in fig 19 of 4e, no 3/4, 5a, or 5b
'1':(['layer 1 of cortex'], tuple()),
'1a':(['layer 1a of cortex'], tuple()),
'1b':(['layer 1b of cortex'], tuple()),
'2':(['layer 2 of cortex'], tuple()),
'3':(['layer 3 of cortex'], tuple()),
'3/4':(['layer 3/4 of cortex'], tuple()),
'4':(['layer 4 of cortex'], tuple()),
'5':(['layer 5 of cortex'], tuple()),
'5a':(['layer 5a of cortex'], tuple()),
'5b':(['layer 5b of cortex'], tuple()),
'6':(['layer 6 of cortex'], tuple()),
'6a':(['layer 6a of cortex'], tuple()),
'6b':(['layer 6b of cortex'], tuple()),
}, {})
class PaxFix6(LocalSource):
artifact = Artifacts.PaxRat6
_data = ({
'1':(['layer 1 of cortex'], tuple()),
'1a':(['layer 1a of cortex'], (8,)),
'1b':(['layer 1b of cortex'], (8,)),
'2':(['layer 2 of cortex'], tuple()),
'3':(['layer 3 of cortex'], tuple()),
'3/4':(['layer 3/4 of cortex'], (94,)),
'4':(['layer 4 of cortex'], tuple()),
'5':(['layer 5 of cortex'], tuple()),
'5a':(['layer 5a of cortex'], (52, 94)),
'5b':(['layer 5b of cortex'], tuple()),
'6':(['layer 6 of cortex'], tuple()),
'6a':(['layer 6a of cortex'], tuple()),
'6b':(['layer 6b of cortex'], tuple()),
}, {})
class PaxFix(LocalSource):
_data = ({
'1':(['layer 1'], tuple()),
'1a':(['layer 1a'], (8,)),
'1b':(['layer 1b'], (8,)),
'2':(['layer 2'], tuple()),
'3':(['layer 3'], tuple()),
'3/4':(['layer 3/4'], (94,)),
'4':(['layer 4'], tuple()),
'5':(['layer 5'], tuple()),
'5a':(['layer 5a'], (52, 94)),
'5b':(['layer 5b'], tuple()),
'6':(['layer 6'], tuple()),
'6a':(['layer 6a'], tuple()),
'6b':(['layer 6b'], tuple()),
}, {})
class PaxMFix(LocalSource):
_data = ({}, {})
class PaxLabels(LabelsBase):
""" Base class for processing paxinos indexes. """
__pythonOnly = True
path = 'ttl/generated/parcellation/'
imports = parcCore,
_fixes = []
_dupes = {}
_merge = {}
@property
def fixes_abbrevs(self):
fixes_abbrevs = set()
for f in self._fixes:
fixes_abbrevs.add(f[0])
for dupe in self._dupes.values():
fixes_abbrevs.add(dupe.alt_abbrevs[0])
return fixes_abbrevs
@property
def fixes_prov(self):
_fixes_prov = {}
for f in self._fixes:
for l in f[1][0]:
_fixes_prov[l] = [ParcOnt.wasGeneratedBy.format(line=getSourceLine(self.__class__))] # FIXME per file
return _fixes_prov
@property
def dupes_structs(self):
ds = {'cerebellar lobules', 'cerebellar lobule'}
for dupe in self._dupes.values():
for struct in dupe.structures:
ds.add(struct)
return ds
@property
def fixes(self):
_, _, collisions, _ = self.reco |
from __future__ import absolute_import
from __future__ import print_function
from .ChessFile import ChessFile, LoadingError
from pychess.Utils.GameModel import GameModel
from pychess.Utils.const import WHITE, BLACK, WON_RESIGN, WAITING_TO_START, BLACKWON, WHITEWON, DRAW
from pychess.Utils.logic import getStatus
from pychess.Utils.lutils.leval import evaluateComplete
__label__ = _("Chess Position")
__ending__ = "epd"
__append__ = True
def save (file, model, position=None):
"""Saves game to file in fen format"""
color = model.boards[-1].color
fen = model.boards[-1].asFen().split(" ")
# First four parts of fen are the same in epd
file.write(u" ".join(fen[:4]))
############################################################################
# Repetition count #
############################################################################
rc = model.boards[-1].board.repetitionCount()
############################################################################
# Centipawn evaluation #
############################################################################
if model.status == WHITEWON:
if color == WHITE:
ce = 32766
else: ce = -32766
elif model.status == BLACKWON:
if color == WHITE:
ce = -32766
else: ce = 32766
elif model.status == DRAW:
ce = 0
else: ce = evaluateComplete(model.boards[-1].board, model.boards[-1].color)
############################################################################
# Opcodes #
############################################################################
opcodes = (
("fmvn", fen[5]), # In fen full move number is the 6th field
("hmvc", fen[4]), # In fen halfmove clock is the 5th field
# Email and name of reciever and sender. We don't know the email.
("tcri", "?@?.? %s" % repr(model.players[color]).replace(";","")),
("tcsi", "?@?.? %s" % repr(model.players[1-color]).replace(";","")),
("ce", ce),
("rc", rc),
)
for key, value in opcodes:
file.write(u" %s %s;" % (key, value))
############################################################################
# Resign opcode #
############################################################################
if model.status in (WHITEWON, BLACKWON) and model.reason == WON_RESIGN:
file.write(u" resign;")
print(u"", file=file)
file.close()
def load (file):
return EpdFile ([line for line in map(str.strip, file) if line])
class EpdFile (ChessFile):
def loadToModel (self, gameno, position, model=None):
if not model: model = GameModel()
fieldlist = self.games[gameno].split(" ")
if len(fieldlist) == 4:
fen = self.games[gameno]
opcodestr = ""
elif len(fieldlist) > 4:
fen = " ".join(fieldlist[:4])
opcodestr = " ".join(fieldlist[4:])
else: raise LoadingError("EPD string can not have less than 4 field")
opcodes = {}
for opcode in map(str.strip, opcodestr.split(";")):
space = opcode.find(" ")
if space == -1:
opcodes[opcode] = True
else:
opcodes[opcode[:space]] = opcode[space+1:]
if "hmvc" in opcodes:
fen += " " + opcodes["hmvc"]
else: fen += " 0"
if "fmvn" in opcodes:
fen += " " + opcodes["fmvn"]
else: fen += " 1"
model.boards = [model.variant.board(setup=fen)]
model.variations = [model.boards]
model.status = WAITING_TO_START
# rc is kinda broken
#if "rc" in opcodes:
# model.boards[0].board.rc = int(opcodes["rc"])
if "resign" in opcodes:
if fieldlist[1] == "w":
model.status = BLACKWON
else:
model.status = WHITEWON
model.reason = WON_RESIGN
if model.status == WAITING_TO_START:
status, reason = getStatus(model.boards[-1])
if status in (BLACKWON, WHITEWON, DRAW):
model.status, model.reason = status, reason
| return model
def get_player_names (self, gameno):
data = self.games[gameno]
names = {}
for key in "tcri", "tcsi":
keyindex = data.find(key)
if keyindex == -1:
names[key] = _("Unknown")
else:
sem = data.find(";", keyindex)
if sem == -1:
opcode = data[keyindex+len(key)+1:]
else: opcode = data[keyindex+len(key)+ | 1:sem]
email, name = opcode.split(" ", 1)
names[key] = name
color = data.split(" ")[1] == "b" and BLACK or WHITE
if color == WHITE:
return (names["tcri"], names["tcsi"])
else:
return (names["tcsi"], names["tcri"])
|
DFU=0x00D0,
BATTERY_STATUS=0x1000,
LED_CONTROL=0x1300,
CHANGE_HOST=0x1814,
BACKLIGHT=0x1981,
REPROG_CONTROLS=0x1B00,
REPROG_CONTROLS_V2=0x1B01,
REPROG_CONTROLS_V2_2=0x1B02, # LogiOptions 2.10.73 features.xml
REPROG_CONTROLS_V3=0x1B03,
REPROG_CONTROLS_V4=0x1B04,
WIRELESS_DEVICE_STATUS=0x1D4B,
LEFT_RIGHT_SWAP=0x2001,
SWAP_BUTTON=0x2005,
VERTICAL_SCROLLING=0x2100,
SMART_SHIFT=0x2110,
HI_RES_SCROLLING=0x2120,
HIRES_WHEEL=0x2121,
LOWRES_WHEEL=0x2130,
MOUSE_POINTER=0x2200,
ADJUSTABLE_DPI=0x2201,
POINTER_SPEED=0x2205,
ANGLE_SNAPPING=0x2230,
SURFACE_TUNING=0x2240,
HYBRID_TRACKING=0x2400,
FN_INVERSION=0x40A0,
NEW_FN_INVERSION=0x40A2,
K375S_FN_INVERSION=0x40A3,
ENCRYPTION=0x4100,
LOCK_KEY_STATE=0x4220,
SOLAR_DASHBOARD=0x4301,
KEYBOARD_LAYOUT=0x4520,
KEYBOARD_DISABLE=0x4521,
DUALPLATFORM=0x4530,
KEYBOARD_LAYOUT_2=0x4540,
TOUCHPAD_FW_ITEMS=0x6010,
TOUCHPAD_SW_ITEMS=0x6011,
TOUCHPAD_WIN8_FW_ITEMS=0x6012,
TOUCHPAD_RAW_XY=0x6100,
TOUCHMOUSE_RAW_POINTS=0x6110,
TOUCHMOUSE_6120=0x6120,
GESTURE=0x6500,
GESTURE_2=0x6501,
GKEY=0x8010,
MKEYS=0x8020,
MR=0x8030,
REPORT_RATE=0x8060,
COLOR_LED_EFECTS=0x8070,
PER_KEY_LIGHTING=0x8080,
ONBOARD_PROFILES=0x8100,
MOUSE_BUTTON_SPY=0x8110,
)
FEATURE._fallback = lambda x: 'unknown:%04X' % x
FEATURE_FLAG = _NamedInts(
internal=0x20,
hidden=0x40,
obsolete=0x80)
DEVICE_KIND = _NamedInts(
keyboard=0x00,
remote_control=0x01,
numpad=0x02,
mouse=0x03,
touchpad=0x04,
trackball=0x05,
presenter=0x06,
receiver=0x07)
FIRMWARE_KIND = _NamedInts(
Firmware=0x00,
Bootloader=0x01,
Hardware=0x02,
Other=0x03)
BATTERY_OK = lambda status: status not in (BATTERY_STATUS.invalid_battery, BATTERY_STATUS.thermal_error)
BATTERY_STATUS = _NamedInts(
discharging=0x00,
recharging=0x01,
almost_full=0x02,
full=0x03,
slow_recharge=0x04,
invalid_battery=0x05,
thermal_error=0x06)
ERROR = _NamedInts(
unknown=0x01,
invalid_argument=0x02,
out_of_range=0x03,
hardware_error=0x04,
logitech_internal=0x05,
invalid_feature_index=0x06,
invalid_function=0x07,
busy=0x08,
unsupported=0x09)
#
#
#
class FeatureNotSupported(_KwException):
"""Raised when trying to request a feature not supported by the device."""
pass
class FeatureCallError(_KwException):
"""Raised if the device replied to a feature call with an error."""
pass
#
#
#
class FeaturesArray(object):
"""A sequence of features supported by a HID++ 2.0 device."""
__slots__ = ('supported', 'device', 'features')
assert FEATURE.ROOT == 0x0000
def __init__(self, device):
assert device is not None
self.device = device
self.supported = True
self.features = None
def __del__(self):
self.supported = False
self.device = None
self.features = None
def _check(self):
# print (self.device, "check", self.supported, self.features, self.device.protocol)
if self.supported:
assert self.device
if self.features is not None:
return True
if not self.device.online:
# device is not connected right now, will have to try later
return False
# I _think_ this is universally true
if self.device.protocol and self.device.protocol < 2.0:
self.supported = False
self.device.features = None
self.device = None
return False
reply = self.device.request(0x0000, _pack('!H', FEATURE.FEATURE_SET))
if reply is None:
self.supported = False
else:
fs_index = ord(reply[0:1])
if fs_index:
count = self.device.request(fs_index << 8)
if count is None:
_log.warn("FEATURE_SET found, but failed to read features count")
# most likely the device is unavailable
return False
else:
count = ord(count[:1])
assert count >= fs_index
self.features = [None] * (1 + count)
self.features[0] = FEATURE.ROOT
self.features[fs_index] = FEATURE.FEATURE_SET
return True
else:
self.supported = False
return False
__bool__ = __nonzero__ = _check
def __getitem__(self, index):
if self._check():
if isinstance(index, int):
if index < 0 or index >= len(self.features):
raise IndexError(index)
if self.features[index] is None:
feature = self.device.feature_request(FEATURE.FEATURE_SET, 0x10, index)
if feature:
feature, = _unpack('!H', feature[:2])
self.features[index] = FEATURE[feature]
return self.features[index]
elif isinstance(index, slice):
indices = index.indices(len(self.features))
return [self.__getitem__(i) for i in range(*indices)]
def __contains__(self, featureId):
"""Tests whether the list contains given Feature ID"""
if self._check():
ivalue = int(featureId)
may_have = False
for f in self.features:
if f is None:
may_have = True
elif ivalue == int(f):
return True
if may_have:
reply = self.device.request(0x0000, _pack('!H', ivalue))
if reply:
index = ord(reply[0:1])
if index:
self.features[index] = FEATURE[ivalue]
return True
def index(self, featureId):
"""Gets the Feature Index for a given Feature ID"""
if self._check():
may_have = False
ivalue = int(featureId)
for index, f in enumerate(self.features):
if f is None:
may_have = True
elif ivalue == int(f):
return index
if may_have:
reply = self.device.request(0x0000, _pack('!H', ivalue))
if reply:
index = ord(reply[0:1])
self.features[index] = FEATURE[ivalue]
return index
raise ValueError("%r not in list" % featureId)
def __iter__(self):
if self._check():
yield FEATURE.ROOT
index = 1
last_index = len(self.features)
while index < last_index:
yield self.__getitem__(index)
index += 1
def __len__(self):
return len(self.features) if self._check() else 0
#
#
#
class KeysArray(object):
"""A sequence of key mappings supported by a HID++ 2.0 device."""
__slots__ = ('device', 'keys', 'keyversion')
def __init__(self, device, count):
assert device is not None
self.device = device
self.keyversion = 0
self.keys = [None] * count
def __getitem__(self, index):
if isinstance(index, int):
if index < 0 or index >= len(self.keys):
raise IndexError(index)
# | TODO: add here additional variants for other REPROG_CONTROLS
if self.keys[index] is None:
keydata = feature_request(self.device, FEATURE.REPROG_CONTROLS, 0x10, index)
self.keyversion=1
if keydata is None:
keydata = feature_request(self.device, FEATURE.REPROG_CONTROLS_V4, 0x10, index)
self.keyversion=4
if keydata:
key, key_task, flags, pos, gro | up, gmask = _unpack('!HHBBBB', keydata[:8])
ctrl_id_text = special_keys.CONTROL[key]
ctrl_task_text = special_keys.TASK[key_task]
if self.keyversion == 1:
self.keys[index] = _ReprogrammableKeyInfo(index, ctrl_id_text, ctrl_task_text, flags)
if self.keyversion == 4:
try:
mapped_data = feature_request(self.device, FEATURE.REPROG_CONTROLS_V4, 0x20, key&0xff00, key&0xff)
if mapped_data:
remap_key, remap_flag, remapped = _unpack('!HBH', mapped_data[:5])
# if key not mapped map it to itself for display
if remapped == 0:
remapped = key
except Exception:
remapped = key
remap_key = key
remap_flag = 0
remapped_text = special_keys.CONTROL[remapped]
self.keys[index] = _ReprogrammableKeyInfoV4(index, ctrl_id_text, ctrl_task_text, flags, pos, group, gmask, remapped_text)
return self.keys[index]
elif isinstance(index, slice):
indices = index.indices(len(self.keys))
return [self.__getitem__(i) for i in range(*indices)]
def index(self, value):
for index, k in enumerate(self.keys):
if k is not None and int(value) == int(k.key):
return index
for index, k in enumerate(self.keys):
if k is None:
k = self.__getitem__(index)
if k is not None:
return index
def __iter__(self):
for k in range(0, len(self.keys)):
yield self.__getitem__(k)
def __len__(self):
return len(self.keys)
#
#
#
def feature_request(device, feature, function=0x00, *params):
if device.online and device.features: |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Sof | tware, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY | KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .lecroyWRXIA import *
class lecroyWR44MXIA(lecroyWRXIA):
"Lecroy WaveRunner 44MXi-A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'WaveRunner 44MXi-A')
super(lecroy104MXiA, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 400e6
self._init_channels()
|
#!/usr/bin/env python
import os
import re
import sys
def stamp(html):
"""Stamp a Python HTML documentation page with the SourceF | orge logo"""
def replace(m):
return ('<span class="release-info">%s '
'Hosted on <a href="http://sourceforge.net">'
'<img src="http://sourceforge.net/'
'sflogo.php?group_id=82987&type=1" width="88" height="31"'
'border="0" alt="SourceForge Logo"></a></span>' % m.group(1))
mailRe = re.compile(r'<span class="release-info">(.*)</span | >')
return re.sub(mailRe, replace, html)
# stamp()
if __name__ == '__main__':
for name in sys.argv[1:]:
html = open(name, 'r').read()
text = stamp(html)
if text != html:
os.remove(name)
file = open(name, 'w')
file.write(text)
file.close()
# Local Variables: ***
# mode: python ***
# End: ***
|
fro | m simpleGossip.gossiping.gossip import RemoteGossipService
if __name__ == "__main__":
from rpyc.utils.server import ThreadedServer
t = ThreadedServer( RemoteGossipService, port=18861 )
t.start() | |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""PyCrypto AES implementation."""
from .cryptomath import *
from .aes import *
if pycryptoLoaded:
import Crypto.Cipher.AES
def new(key, mode, IV | ):
return PyCrypto_AES(key, mode, IV)
class PyCrypto_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "pycrypto")
key = bytes(key)
IV = bytes(IV)
self.context = Crypto.Cipher.AES.new(key, mode, IV)
def encrypt(self, plaintext):
plaintext = bytes(plaintext)
return | bytearray(self.context.encrypt(plaintext))
def decrypt(self, ciphertext):
ciphertext = bytes(ciphertext)
return bytearray(self.context.decrypt(ciphertext))
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2016 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <danimaribeiro@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of th | e GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITN | ESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from openerp import api, models
class CrmLead(models.Model):
_inherit = 'crm.lead'
@api.multi
def handle_partner_assignation(self, action='create',
partner_id=False, context=None):
partner_ids = super(CrmLead, self).handle_partner_assignation(
action=action, partner_id=partner_id, context=context)
for lead in self:
partner_id = partner_ids[lead.id]
partner = self.env['res.partner'].browse(partner_id)
if partner.parent_id:
partner_ids[lead.id] = partner.parent_id.id
lead.partner_id = partner.parent_id.id
return partner_ids
|
clas | s SpellPickerController:
de | f render(self):
pass
_controller_class = SpellPickerController
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
from nose.tools import eq_, assert_raises
from mosql.query import select, insert, replace
from mosql.util import param, ___, raw, DirectionError, OperatorError, autoparam
def test_select_customize():
gen = select('person', OrderedDict([
('name like', 'Mosky%'), ('age >', 20),
]))
exp = 'SELECT * FROM "person" WHERE "name" LIKE \'Mosky%\' AND "age" > 20'
eq_(gen, exp)
def test_select_customize_operator():
gen = select('person', OrderedDict([
(('name', 'like'), 'Mosky%'), (('age', '>'), 20)
]))
exp = 'SELECT * FROM "person" WHERE "name" LIKE \'Mosky%\' AND "age" > 20'
eq_(gen, exp)
def test_select_operationerror():
with assert_raises(OperatorError) as cxt:
select('person', {"person_id = '' OR true; --": 'mosky'})
exp = "this operator is not allowed: \"= '' OR TRUE; --\""
eq_(str(cxt.exception), exp)
def test_select_directionerror():
with assert_raises(DirectionError) as cxt:
select('person', {'name like': 'Mosky%'},
order_by=('age ; DROP person; --', ))
exp = "this direction is not allowed: '; DROP PERSON; --'"
eq_(str(cxt.exception), exp)
def test_select_param():
gen = select('table', OrderedDict([
('custom_param', param('my_param')), ('auto_param', autoparam),
('using_alias', ___),
]))
exp = (
'SELECT * FROM "table" WHERE "custom_param" = | %(my_param)s '
'AND "auto_param" = %(auto_param)s AND "using_alias" = %(using_alias)s'
)
eq_(gen, exp)
def test_insert_dict():
gen = insert('p | erson', OrderedDict([
('person_id', 'mosky'), ('name', 'Mosky Liu')
]))
exp = ('INSERT INTO "person" ("person_id", "name") '
'VALUES (\'mosky\', \'Mosky Liu\')')
eq_(gen, exp)
def test_insert_returing():
gen = insert('person', OrderedDict([
('person_id', 'mosky'), ('name', 'Mosky Liu'),
]), returning=raw('*'))
exp = ('INSERT INTO "person" ("person_id", "name") '
'VALUES (\'mosky\', \'Mosky Liu\') RETURNING *')
eq_(gen, exp)
def test_replace():
gen = replace('person', OrderedDict([
('person_id', 'mosky'), ('name', 'Mosky Liu')
]))
exp = ('REPLACE INTO "person" ("person_id", "name") '
'VALUES (\'mosky\', \'Mosky Liu\')')
eq_(gen, exp)
|
space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Denis Engemannn <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.array([], int)] # right hemi is empty
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language). Without this also
# the main effects will be returned.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# Note. for further details on this ANOVA function consider the
# corresponding
# :ref:`time-frequency tutorial <tut_stats_cluster_sensor_rANOVA_tfr>`.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
# get f-values only.
###############################################################################
# Compute cl | ustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal).
source_space = grade_to_tris(5)
# as we only have one hemisphere we need only need half the connectivity
lh_source_s | pace = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, eff |
#!/usr/bin/python
#
# convert .po to .js
#
import json
import optparse
import os
import polib
import re
import string
import sys
parser = optparse.OptionParser(usage="usage: %prog [options] pofile...")
parser.add_option("--callback", default="_.setTranslation", dest="callback", help="callback function to call with data")
parser.add_option("--quiet", action="store_false", default=True, dest="verbose", help="don't print status messages to stdout")
(optio | ns, ar | gs) = parser.parse_args()
if args == None or len(args) == 0:
print("ERROR: you must specify at least one po file to translate");
sys.exit(1)
paramFix = re.compile("(\\(([0-9])\\))")
for srcfile in args:
destfile = os.path.splitext(srcfile)[0] + ".js"
if options.verbose:
print("INFO: converting %s to %s" % (srcfile, destfile))
xlate_map = {}
po = polib.pofile(srcfile, autodetect_encoding=False, encoding="utf-8", wrapwidth=-1)
for entry in po:
if entry.obsolete or entry.msgstr == '':
continue
xlate_map[entry.msgid] = entry.msgstr;
dest = open(destfile, "w")
dest.write('i18n = ')
encoder = json.JSONEncoder()
for part in encoder.iterencode(xlate_map):
if part.startswith('"function('):
dest.write(part[1:-1]);
else:
dest.write(part);
dest.write(";\n")
dest.close()
|
#!/usr/bin/python
import unittest, pprint, sys
sys.path.append( '../siux' )
import siuxlib
class TestSourceInfo(unittest.TestCase):
# config
auth = '<YOUR_API_KEY>'
def checkSourceInfo( self,retList ):
"""
Method tests sourceInfo structure
:param retList: - structure reported by API
"""
self.assertTrue( 'browser' in retList )
self.assertTrue( 'browserFamilyName' in retList )
self.assertTrue( 'browserId' in retList )
self.assertTrue( isinstance( retList['browserId'] ,int ))
self.assertTrue( 'browserName' in retList )
self.assertTrue( 'clientId' in retList )
self.assertTrue( isinstance( retList['clientId'] ,int ))
self.assertTrue( 'clientName' in retList )
self.assertTrue( 'clientPay' in retList )
self.assertTrue( isinstance( retList['clientPay'] ,int ))
self.assertTrue( 'domainId' in retList )
self.assertTrue( isinstance( retList['domainId'] ,int ))
self.assertTrue( 'googleGaProfileId' in retList )
self.assertTrue( isinstance( retList['googleGaProfileId'] ,int ))
self.assertTrue( 'googleGaTsCreate' in retList )
self.assertTrue( isinstance( retList['googleGaTsCreate'] ,int ))
self.assertTrue( 'lastChecktime' in retList )
self.assertTrue( isinstance( retList['lastChecktime'] ,int ))
self.assertTrue( 'lastErrChecktime' in retList )
self.assertTrue( isinstance( retList['lastErrChecktime'] ,int ))
self.assertTrue( 'lastErrNo' in retList )
self.assertTrue( isinstance( retList['lastErrNo'] ,int ))
self.assertTrue( 'lastErrStatusCode' in retList )
self.assertTrue( isinstance( retList['lastErrStatusCode'] ,int ))
self.assertTrue( 'lastErrStatusMessage' in retList )
self.assertTrue( 'lastStatusCode' in retList )
self.assertTrue( 'lastStatusMessage' in retList )
self.assertTrue( 'lastStatusText' in retList )
self.assertTrue( 'minAvailability' in retList )
self.assertTrue( isinstance( retList['minAvailability'] ,float ))
self.assertTrue( 'name' in retList )
self.assertTrue( 'paramCookie' in retList )
self.assertTrue( 'paramHeaderOnly' in retList )
self.assertTrue( 'paramPasswd' in retList )
self.assertTrue( 'paramPost' in retList )
self.assertTrue( 'paramSearch' in retList )
self.assertTrue( 'paramServer' in retList )
self.assertTrue( 'paramServerType' in retList )
self.assertTrue( 'paramUsername' in retList )
self.assertTrue( 'parentId' in retList )
self.assertTrue( 'publicStatActive' in retList )
self.assertTrue( isinstance( retList['publicStatActive'] ,int ))
self.assertTrue( 'rumIdent' in retList )
self.assertTrue( 'serviceCheckType' in retList )
self.assertTrue( 'serviceCheckTypeId' in retList )
self.assertTrue( isinstance( retList['serviceCheckTypeId'] ,int ))
self.assertTrue( 'siuxdbId' in retList )
self.assertTrue( isinstance( retList['siuxdbId'] ,int ))
self.assertTrue( 'sourceGroupId' in retList )
self.assertTrue( isinstance( retList['sourceGroupId'] ,int ))
self.assertTrue( 'sourceGroupName' in retList )
self.assertTrue( 'sourceId' in retList )
self.assertTrue( isinstance( retList['sourceId'] ,int ))
self.assertTrue( 'sourceType' in retList )
self.assertTrue( 'sourceTypeName' in retList )
self.assertTrue( 'status' in retList )
self.assertTrue( 'timeSchemeId' in retList )
self.assertTrue( isinstance( retList['timeSchemeId'] ,int ))
self.assertTrue( 'timeSchemeName' in retList )
self.assertTrue( 'timeout' in retList )
self.assertTrue( isinstance( retList['timeout'] ,int ))
self.ass | ertTrue( 'timeoutErr' in retList )
self.assertTrue( isinstance( retList['timeoutErr'] ,int ))
self.assertTrue( 'timeoutWarn' in retList )
self.assertTrue( isinstance( retList['timeoutWarn'] ,int ))
self.assertTrue( 'timezone' in retList )
self.assertTrue( isinstance( retList['timezone'] ,int ))
self.assertTrue( 'timezoneId' in retList )
self.asse | rtTrue( isinstance( retList['timezoneId'] ,int ))
self.assertTrue( 'timezoneName' in retList )
self.assertTrue( 'timezoneNick' in retList )
self.assertTrue( 'url' in retList )
self.assertTrue( 'urlNice' in retList )
def testSourceInfo(self):
"""
Test tests correct api sourceList call
"""
# init
S = siuxlib.SiUXclient( auth = self.auth )
# source.list()
retList = S.sourceList()
#pprint.pprint( retList )
if retList['statusCode'] == 'OK':
if sys.version_info[0] == 2 and sys.version_info[1] >= 7:
self.assertGreater(retList['data'].__len__(),0)
for line in retList['data']:
sourceId = line['sourceId']
self.assertTrue( isinstance( sourceId , int ) )
sourceInfo = S.sourceInfo( sourceId )
self.checkSourceInfo( sourceInfo['data'] )
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
from pymongo import MongoClient
import json
import sys
# print message and die
def msgDie(msg):
print msg
sys.exit(2)
if len(sys.argv) != 4:
msgDie("usage: unifi-minder.py config.json site-name minSNR")
# load config
cfgFile = sys.argv[1]
siteName = sys.argv[2]
minSNR = sys.argv[3]
with open(cfgFile) as data_f | ile:
cfg = json.load(data_file)
# get database
dbCfg = cfg['database']
client = MongoClient(dbCfg['host'], dbCfg['port'])
db = client[dbCfg['db']]
sites = db['site']
site = sites.find_one({"name": siteName})
sid = str(site["_id"])
devices = db['device']
for device in devices.find({"site_id": sid}):
mac = device["mac"]
mac = mac.replace(":", "")
for radio in device['radio_table']:
radtype = radio['radio']
print " | config.minrssi.%s.%s=%s" % (mac, radtype, minSNR)
|
#!/usr/bin/env python
'''
decode an stm32 ICSR register value
'''
import sys
import optparse
def num(s):
try:
return int(s)
except ValueError:
return int(s, 16)
parser = optparse.OptionParser(__file__)
opts, args = parser.parse_args()
if len(args) == 0:
print(parser.usage)
sys.exit(0)
ICSR = num(args[0])
# https://www.st.com/content/ccc/resource/technical/document/programming_manual/6c/3a/cb/e7/e4/ea/44/9b/DM00046982.pdf/files/DM00046982.pdf/jcr:content/translations/en.DM00046982.pdf
# pag | e 225
def decoder_m4_vectactive(value):
exceptions = {
0: "Thread mode",
1: "Reserved",
2: "NMI",
3: "Hard fault",
4: "Memory management fault",
5: "Bus fault",
6: "Usage fault",
7: "Reserved....",
10: "Reserved",
11: "SVCall",
12: "Reserved for Debug",
13: "Reserved",
14: "PendSV",
15: "SysTick",
}
if value in exceptions:
exception = "%s" % str( | exceptions[value])
else:
exception = "IRQ%u" % (value - 16)
sys.stdout.write(" (%s)" % exception)
M4_BITS = [
("0-8", "VECTACTIVE", decoder_m4_vectactive),
("9-10", "RESERVED1", None),
("11", "RETOBASE", None),
("12-18", "VECTPENDING", None),
("19-21", "RESERVED2", None),
("22", "ISRPENDING", None),
("23-24", "RESERVED3", None),
("25", "PENDSTCLR", None),
("27", "PENDSVCLR", None),
("28", "PENDSVSET", None),
("29-30", "RESERVED4", None),
("31", "NMIPENDSET", None),
]
for bit in M4_BITS:
(bits, name, decoder) = bit
if "-" in bits:
(start_bit, stop_bit) = bits.split("-")
start_bit = int(start_bit)
stop_bit = int(stop_bit)
else:
start_bit = int(bits)
stop_bit = int(bits)
mask = 0
for i in range(start_bit, stop_bit+1):
mask |= (1 << i)
value = (ICSR & mask) >> start_bit
sys.stdout.write("%s: %u" % (name, value)),
if decoder is not None:
decoder(value)
print("")
|
import math
def formatAmount(val, prec=3, lowest=0, highest=0, currency=False, forceSign=False):
"""
Add suffix to value, transform value to match new suffix and round it.
Keyword arguments:
val -- value to process
prec -- precision of final number (number of significant positions to show)
lowest -- lowest order for suffixizing for numbers 0 < |num| < 1
highest -- highest order for suffixizing for numbers |num| > 1
currency -- if currency, billion suffix will be B instead of G
forceSign -- if True, positive numbers are signed too
"""
if val is None:
return ""
# Define suffix maps
posSuffixMap = {3: "k", 6: "M", 9: "B" if currency is True else "G"}
negSuffixMap = {-6: '\u03bc', -3: "m"}
# Define tuple of the map keys
# As we're going to go from the biggest order of abs(key), sort
# them differently due to one set of values being negative
# and other positive
posOrders = tuple(sorted(iter(posSuffixMap.keys()), reverse=True))
negOrders = tuple(sorted(iter(negSuffixMap.keys()), reverse=False))
# Find the least abs(key)
posLowest = min(posOrders)
negHighest = max(negOrders)
# By default, mantissa takes just value and no suffix
mantissa, suffix = val, ""
# Positive suffixes
if abs(val) > 1 and highest >= posLowest:
# Start from highest possible suffix
for key in posOrders:
# Find first suitable suffix and check if it's not above highest order
if abs(val) >= 10 ** key and key <= highest:
mantissa, suffix = val / float(10 ** key), posSuffixMap[key]
# Do additional step to eliminate results like 999999 => 1000k
# If we're already using our greatest order, we can't do anything useful
if posOrders.index(key) == 0:
break
else:
# Get order greater than current
prevKey = posOrders[posOrders.index(key) - 1]
# Check if the key to which we potentially can change is greater
# than our highest boundary
if prevKey > highest:
# If it is, bail - we already have acceptable results
break
# Find multiplier to get from one order to another
orderDiff = 10 ** (prevKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
# Divide mantissa and use suffix of greater order
mantissa, suffix = mantissa / orderDiff, posSuffixMap[prevKey]
# Otherwise consider current results as acceptable
break
# Take numbers between 0 and 1, and matching/below highest possible negative suffix
elif abs(val) < 1 and val != 0 and lowest <= negHighest:
# Start from lowest possible suffix
for key in negOrders:
# Get next order
try:
nextKey = negOrders[negOrders.index(key) + 1]
except IndexError:
nextKey = 0
# Check if mantissa with next suffix is in range [1, 1000)
if abs(val) < 10 ** nextKey and key >= lowest:
mantissa, suffix = val / float(10 ** key), negSuffixMap[key]
# Do additional step to eliminate results like 0.9999 => 1000m
# Check if the key we're potentially switching to is greater than our
# upper boundary
if nextKey > highest:
# If it is, leave loop with results we already have
break
# Find the multiplier between current and next order
orderDiff = 10 ** (nextKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
# Divide mantissa and use suffix of greater order
# Use special handling of zero key as it's not on the map
mantissa, suffix = mantissa / orderDiff, posSuffixMap[nextKey] if nextKey != 0 else ""
# Other | wise consider current results as acceptable
break
# Round mantissa according to our prec variable
mantissa = roundToPrec(mantissa, prec)
sign = "+" if force | Sign is True and mantissa > 0 else ""
# Round mantissa and add suffix
result = "{0}{1}{2}".format(sign, mantissa, suffix)
return result
def roundToPrec(val, prec):
# We're not rounding integers anyway
# Also make sure that we do not ask to calculate logarithm of zero
if int(val) == val:
return int(val)
# Find round factor, taking into consideration that we want to keep at least prec
# positions for fractions with zero integer part (e.g. 0.0000354 for prec=3)
roundFactor = int(prec - math.ceil(math.log10(abs(val))))
# But we don't want to round integers
if roundFactor < 0:
roundFactor = 0
# Do actual rounding
val = round(val, roundFactor)
# Make sure numbers with .0 part designating float don't get through
if int(val) == val:
val = int(val)
return val
def roundDec(val, prec):
if int(val) == val:
return int(val)
return round(val, prec)
|
{}
for n,m in sys.modules.items():
if n=='reportlab' or n=='rlextra' or n[:10]=='reportlab.' or n[:8]=='rlextra.':
v = [getattr(m,x,None) for x in ('__version__','__path__','__file__')]
if [_f for _f in v if _f]:
v = [v[0]] + [_f for _f in v[1:] if _f]
module_versions[n] = tuple(v)
store['__module_versions'] = module_versions
self.store['__payload'] = {}
self._add(kw)
def _add(self,D):
payload = self.store['__payload']
for k, v in D.items():
payload[k] = v
def add(self,**kw):
self._add(kw)
def _dump(self,f):
try:
pos=f.tell()
pickle_dump(self.store,f)
except:
S=self.store.copy()
ff=getBytesIO()
for k,v in S.items():
try:
pickle_dump({k:v},ff)
except:
S[k] = '<unpicklable object %r>' % v
f.seek(pos,0)
pickle_dump(S,f)
def dump(self):
f = open(self.fn,'wb')
try:
self._dump(f)
finally:
f.close()
def dumps(self):
f = getBytesIO()
self._dump(f)
return f.getvalue()
def _load(self,f):
self.store = pickle_load(f)
def load(self):
f = open(self.fn,'rb')
try:
self._load(f)
finally:
f.close()
def loads(self,s):
self._load(getBytesIO(s))
def _show_module_versions(self,k,v):
self._writeln(k[2:])
K = | list(v.keys())
K.sort()
for k in K:
vk = vk0 = v[k]
if isinstance(vk,tuple): vk0 = vk[0]
try:
__import__(k)
| m = sys.modules[k]
d = getattr(m,'__version__',None)==vk0 and 'SAME' or 'DIFFERENT'
except:
m = None
d = '??????unknown??????'
self._writeln(' %s = %s (%s)' % (k,vk,d))
def _banner(self,k,what):
self._writeln('###################%s %s##################' % (what,k[2:]))
def _start(self,k):
self._banner(k,'Start ')
def _finish(self,k):
self._banner(k,'Finish ')
def _show_lines(self,k,v):
self._start(k)
self._writeln(v)
self._finish(k)
def _show_file(self,k,v):
k = '%s %s' % (k,os.path.basename(v[0]))
self._show_lines(k,v[1])
def _show_payload(self,k,v):
if v:
import pprint
self._start(k)
pprint.pprint(v,self.stdout)
self._finish(k)
def _show_extensions(self):
for mn in ('_rl_accel','_renderPM','sgmlop','pyRXP','pyRXPU','_imaging','Image'):
try:
A = [mn].append
__import__(mn)
m = sys.modules[mn]
A(m.__file__)
for vn in ('__version__','VERSION','_version','version'):
if hasattr(m,vn):
A('%s=%r' % (vn,getattr(m,vn)))
except:
A('not found')
self._writeln(' '+' '.join(A.__self__))
specials = {'__module_versions': _show_module_versions,
'__payload': _show_payload,
'__traceback': _show_lines,
'__script': _show_file,
}
def show(self):
K = list(self.store.keys())
K.sort()
for k in K:
if k not in list(self.specials.keys()): self._writeln('%-15s = %s' % (k,self.store[k]))
for k in K:
if k in list(self.specials.keys()): self.specials[k](self,k,self.store[k])
self._show_extensions()
def payload(self,name):
return self.store['__payload'][name]
def __setitem__(self,name,value):
self.store['__payload'][name] = value
def __getitem__(self,name):
return self.store['__payload'][name]
def _writeln(self,msg):
self.stdout.write(msg+'\n')
def _flatten(L,a):
for x in L:
if isSeq(x): _flatten(x,a)
else: a(x)
def flatten(L):
'''recursively flatten the list or tuple L'''
R = []
_flatten(L,R.append)
return R
def find_locals(func,depth=0):
'''apply func to the locals at each stack frame till func returns a non false value'''
while 1:
_ = func(sys._getframe(depth).f_locals)
if _: return _
depth += 1
class _FmtSelfDict:
def __init__(self,obj,overrideArgs):
self.obj = obj
self._overrideArgs = overrideArgs
def __getitem__(self,k):
try:
return self._overrideArgs[k]
except KeyError:
try:
return self.obj.__dict__[k]
except KeyError:
return getattr(self.obj,k)
class FmtSelfDict:
'''mixin to provide the _fmt method'''
def _fmt(self,fmt,**overrideArgs):
D = _FmtSelfDict(self, overrideArgs)
return fmt % D
def _simpleSplit(txt,mW,SW):
L = []
ws = SW(' ')
O = []
w = -ws
for t in txt.split():
lt = SW(t)
if w+ws+lt<=mW or O==[]:
O.append(t)
w = w + ws + lt
else:
L.append(' '.join(O))
O = [t]
w = lt
if O!=[]: L.append(' '.join(O))
return L
def simpleSplit(text,fontName,fontSize,maxWidth):
from reportlab.pdfbase.pdfmetrics import stringWidth
lines = asUnicode(text).split(u'\n')
SW = lambda text, fN=fontName, fS=fontSize: stringWidth(text, fN, fS)
if maxWidth:
L = []
for l in lines:
L[-1:-1] = _simpleSplit(l,maxWidth,SW)
lines = L
return lines
def escapeTextOnce(text):
"Escapes once only"
from xml.sax.saxutils import escape
if text is None:
return text
if isBytes(text): s = text.decode('utf8')
text = escape(text)
text = text.replace(u'&amp;',u'&')
text = text.replace(u'&gt;', u'>')
text = text.replace(u'&lt;', u'<')
return text
if isPy3:
def fileName2FSEnc(fn):
if isUnicode(fn):
return fn
else:
for enc in fsEncodings:
try:
return fn.decode(enc)
except:
pass
raise ValueError('cannot convert %r to filesystem encoding' % fn)
else:
def fileName2FSEnc(fn):
'''attempt to convert a filename to utf8'''
from reportlab.rl_config import fsEncodings
if isUnicode(fn):
return asBytes(fn)
else:
for enc in fsEncodings:
try:
return fn.decode(enc).encode('utf8')
except:
pass
raise ValueError('cannot convert %r to utf8 for file path name' % fn)
import itertools
def prev_this_next(items):
"""
Loop over a collection with look-ahead and look-back.
From Thomas Guest,
http://wordaligned.org/articles/zippy-triples-served-with-python
Seriously useful looping tool (Google "zippy triples")
lets you loop a collection and see the previous and next items,
which get set to None at the ends.
To be used in layout algorithms where one wants a peek at the
next item coming down the pipe.
"""
extend = itertools.chain([None], items, [None])
prev, this, next = itertools.tee(extend, 3)
try:
next(this)
next(next)
next(next)
except StopIteration:
pass
return zip(prev, this, next)
def commasplit(s):
'''
Splits the string s at every unescaped comma and returns the result as a list.
To escape a comma, double it. Individual items are stripped.
To avoid the ambiguity of 3 successive commas to denote a comma at the beginning
or end of an item, add a space between the item seperator and the escaped comma.
>>> commasplit(u'a,b,c') == [u'a', u'b', u'c']
True
>>> commasplit('a,, , b , c ') == [u'a,', u'b', u'c']
True
>>> commasplit(u'a, ,,b, c') == [u'a', u',b', u'c']
'''
if isBytes(s): s = s |
"""
A Printer for generating executable code.
The most important function here is srepr that returns a string so that the
relation eval(srepr(expr))=expr holds in an appropriate environment.
"""
from printer import Printer
from sympy.core import Basic
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import prec_to_dps, repr_dps
class ReprPrinter(Printer):
printmethod = "_sympyrepr"
def reprify(self, args, sep):
return sep.join([self.doprint(item) for item in args])
def emptyPrinter(self, expr):
if isinstance(expr, str):
return expr
elif hasattr(expr, "__srepr__"):
return expr.__srepr__()
elif hasattr(expr, "args") and hasattr(expr.args, "__iter_ | _"):
l = []
for o in expr.args:
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)'%', '.join(l)
elif hasattr(expr, "__module__") and hasattr(expr, "__name__"):
return "<'%s.%s'>"%(expr.__module__, expr.__name__)
else:
return str | (expr)
def _print_Add(self, expr):
args = list(expr.args)
args.sort(Basic._compare_pretty)
args = map(self._print, args)
return "Add(%s)"%", ".join(args)
def _print_Function(self, expr):
r = '%s(%r)' % (expr.func.__base__.__name__, expr.func.__name__)
r+= '(%s)' % ', '.join([self._print(a) for a in expr.args])
return r
def _print_FunctionClass(self, expr):
return 'Function(%r)'%(expr.__name__)
def _print_GeometryEntity(self, expr):
# GeometryEntity is special -- its base is tuple
return repr(expr)
def _print_Infinity(self, expr):
return 'Infinity'
def _print_Integer(self, expr):
return '%s(%s)' % (expr.__class__.__name__, self._print(expr.p))
def _print_list(self, expr):
return "[%s]"%self.reprify(expr, ", ")
def _print_Matrix(self, expr):
l = []
for i in range(expr.rows):
l.append([])
for j in range(expr.cols):
l[-1].append(expr[i,j])
return '%s(%s)' % (expr.__class__.__name__, self._print(l))
def _print_NaN(self, expr):
return "nan"
def _print_NegativeInfinity(self, expr):
return "NegativeInfinity"
def _print_NegativeOne(self, expr):
return "NegativeOne"
def _print_One(self, expr):
return "One"
def _print_Rational(self, expr):
return '%s(%s, %s)' % (expr.__class__.__name__, self._print(expr.p), self._print(expr.q))
def _print_Fraction(self, expr):
return '%s(%s, %s)' % (expr.__class__.__name__, self._print(expr.numerator), self._print(expr.denominator))
def _print_Real(self, expr):
dps = prec_to_dps(expr._prec)
r = mlib.to_str(expr._mpf_, repr_dps(expr._prec))
return "%s('%s', prec=%i)" % (expr.__class__.__name__, r, dps)
def _print_Sum2(self, expr):
return "Sum2(%s, (%s, %s, %s))" % (self._print(expr.f), self._print(expr.i),
self._print(expr.a), self._print(expr.b))
def _print_Symbol(self, expr):
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
def _print_Predicate(self, expr):
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
def _print_str(self, expr):
return repr(expr)
def _print_tuple(self, expr):
if len(expr)==1:
return "(%s,)"%self._print(expr[0])
else:
return "(%s)"%self.reprify(expr, ", ")
def _print_WildFunction(self, expr):
return "%s('%s')" % (expr.__class__.__name__, expr.name)
def _print_Zero(self, expr):
return "Zero"
def _print_AlgebraicNumber(self, expr):
return "%s(%s, %s)" % (self.__class__.__name__,
self._print(self.coeffs()), self._print(expr.root))
def srepr(expr, **settings):
"""return expr in repr form"""
return ReprPrinter(settings).doprint(expr)
|
# -*- coding: utf-8 -*-
from eventlet import httpc
from zenqueue.client.http.common import HTTPQueueClient
class QueueClient(HTTPQueueClient):
def send(self, url, data=''):
# Catch non-successful HTTP requests and treat them as if they were.
try:
result = httpc.post(url, data=data,
content_type='application/json; charset=utf-8')
except httpc.ConnectionError, exc:
result = exc.params.response_body
return res | ult | |
from django.contrib import admin
from . import models
from .models import | Hollywood, Profession, Artist, Xaxis, Yaxis, MovieImage, ArtistImage
class ArtistInline(admin.StackedInline):
model = Artist
extra = 4
class ProfessionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['prof_name']}),
]
inlines = [ArtistInline]
admin.site.register(Profession, ProfessionAdmin)
admin.site.register(Hollywood)
admin.site.register(Xaxis)
admin.site.register(Yaxis)
admin.site.register(MovieImage)
a | dmin.site.register(ArtistImage)
|
"""
Public views
"""
from django_future.csrf import ensure_csrf_cookie
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.conf import settings
from edxmako.shortcuts import render_to_response
from external_auth.views import (ssl_login_shortcut, ssl_get_cert_from_request,
redirect_with_get)
from microsite_configuration import microsite
__all__ = ['signup', 'login_page', 'howitworks']
@ensure_ | csrf_cookie
def signup(request):
"""
Display the signup form.
"""
csrf_token = csrf(request)['csrf_token']
if request.user.is_authenticated():
return redirect('/course/')
if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):
# Redirect to course to login to process their certificate if SSL is enabled
# and registration is disabled.
return redirect_with_get('l | ogin', request.GET, False)
return render_to_response('register.html', {'csrf': csrf_token})
@ssl_login_shortcut
@ensure_csrf_cookie
def login_page(request):
"""
Display the login form.
"""
csrf_token = csrf(request)['csrf_token']
if (settings.FEATURES['AUTH_USE_CERTIFICATES'] and
ssl_get_cert_from_request(request)):
# SSL login doesn't require a login view, so redirect
# to course now that the user is authenticated via
# the decorator.
next_url = request.GET.get('next')
if next_url:
return redirect(next_url)
else:
return redirect('/course/')
if settings.FEATURES.get('AUTH_USE_CAS'):
# If CAS is enabled, redirect auth handling to there
return redirect(reverse('cas-login'))
return render_to_response(
'login.html',
{
'csrf': csrf_token,
'forgot_password_link': "//{base}/login#forgot-password-modal".format(base=settings.LMS_BASE),
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
)
def howitworks(request):
"Proxy view"
if request.user.is_authenticated():
return redirect('/course/')
else:
return render_to_response('howitworks.html', {})
|
#!/usr/bin/env python
'Unit test for debugger info file'
import inspect, unittest
from trepan import debugger as Mdebugger
from trepan.processor.command import info as Minfo
from trepan.processor.command.info_subcmd import files as MinfoFile
from cmdhelper import dbg_setup
class TestInfoFile(unittest.TestCase):
# FIXME: put in a more common place
# Possibly fix up Mock to include this
def setup_io(self, command):
self.clear_output()
command.msg = self.msg
command.errmsg = self.errmsg
command.msg_nocr = self.msg_nocr
return
def clear_output(s | elf):
self.msgs = []
self.errmsgs = []
self.last_was_newline = True
return
def msg_nocr(self, msg):
if len(self.msgs) > 0:
self.msgs[-1] += msg
else:
self.msgs += msg
pass
return
def msg(self, msg):
self.msgs += [msg]
return
def errmsg(self, msg):
self.errmsgs.append(msg)
pass
def test_info_file(self):
d = Mdebugger.Debugger( | )
d, cp = dbg_setup(d)
command = Minfo.InfoCommand(cp, 'info')
sub = MinfoFile.InfoFiles(command)
self.setup_io(sub)
sub.run([])
self.assertEqual([], self.msgs)
cp.curframe = inspect.currentframe()
for width in (80, 200):
# sub.settings['width'] = width
sub.run(['test-info-file.py', 'lines'])
sub.run([])
pass
pass
if __name__ == '__main__':
unittest.main()
|
"""
Test for the bandicoot.helper.group module.
"""
import bandicoot as bc
from bandicoot.core import Record, Position
import unittest
import datetime
from bandicoot.tests.generate_user import random_burst
from bandicoot.helper.group import group_records
from bandicoot.helper.tools import std, mean
from datetime import timedelta
import numpy as np
import os
class TestGroup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dir_changed = False
def setUp(self):
if not TestGroup._dir_changed:
abspath = os.path.abspath(__file__)
name = abspath.index(os.path.basename(__file__))
abspath = abspath[:name]
| os.chdir(abspath)
TestGroup._dir_changed = True
self.maxDiff = None
self.user = bc.io.read_orange("u_test", "samples", describe=False)
self.random_int_list = np.random.randint(1, 1000, size=9001)
self.sum_stats_list = [bc.helper.tools.SummaryStats(np.random.rand(), np.random.rand(),
np.random.rand(), np.rando | m.rand(), np.random.rand(), np.random.rand(), np.random.rand(), []) for _ in range(9001)]
def test_statistics(self):
self.assertDictEqual(bc.helper.group.statistics(self.random_int_list, summary='default'), {
'mean': mean(self.random_int_list),
'std': std(self.random_int_list),
})
def mean_std(key):
return {
'mean': mean([getattr(s, key) for s in self.sum_stats_list]),
'std': std([getattr(s, key) for s in self.sum_stats_list]),
}
self.assertDictEqual(bc.helper.group.statistics(self.sum_stats_list, summary='extended'), {
'kurtosis': mean_std('kurtosis'),
'mean': mean_std('mean'),
'median': mean_std('median'),
'skewness': mean_std('skewness'),
'std': mean_std('std'),
'min': mean_std('min'),
'max': mean_std('max')
})
self.assertEqual(bc.helper.group.statistics([]).values(), [None] * 2)
def test_statistics_bad_aggregated(self):
def run_bad_aggregated():
try:
bc.helper.group.statistics("bad_aggregated")
except (TypeError, ValueError):
return True
return False
self.assertTrue(run_bad_aggregated())
def test_weekly_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 24), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 11), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_weekday_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 25), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 11), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_week='weekday')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_weekend_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 23), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 31), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 10, 18), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_week='weekend')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_daily_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 22, 10, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 23, 10, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 7, 11, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 10, 18, 2, 00), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_day='night')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[3]]])
grouping = bc.helper.group.group_records(user, groupby='week', part_of_day='day')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0], records[1]], [records[2]]])
def test_none_group(self):
records = [
Record("call", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 5), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 11), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 12), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby=None)
self.assertEqual(records, list(next(grouping)))
self.assertRaises(StopIteration, grouping.next)
class ConsistencyTests(unittest.TestCase):
def setUp(self):
self.user = bc.User()
self.user.records = random_burst(100, delta=timedelta(days=2))
def _group_set(self, method, interaction):
chunks = group_records(self.user, groupby=method,
interaction=interaction)
new_records = set(r for c in chunks for r in c)
return new_records
def test_weekly(self):
old_records = set(self.user.records)
new_records = self._group_set('week', None)
self.assertSetEqual(new_records, old_records)
new_records = self._group_set('week', 'call')
self.assertSetEqual(new_records, {r for r in old_records
if r.interaction == 'call'})
class MissingTests(unittest.TestCase):
def setUp(self):
self.user = bc.read_csv('user_ignored', 'samples')
def test_amount(self):
result = {
'all': 4,
'interaction': 2,
'direction': 2,
'correspondent_id': 0,
'datetime': 0,
'call_duration': 1,
}
self.assertDictEqual(self.user.ignored_records, result)
def test_total_records(self):
self.assertEqual(len(self.user.records), 1)
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General | Public License version 2 as
# published by the Free Software Foundation.
#
# T | his program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This module provides a place to collect various mic-related utils
# for the OpenEmbedded Image Tools.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
from mic import msger
from mic.utils import runner
def exec_cmd(cmd_and_args, as_shell = False, catch = 3):
"""
Execute command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
"""
msger.debug("exec_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
if (as_shell):
rc, out = runner.runtool(cmd_and_args, catch)
else:
rc, out = runner.runtool(args, catch)
out = out.strip()
msger.debug("exec_cmd: output for %s (rc = %d): %s" % \
(cmd_and_args, rc, out))
if rc != 0:
# We don't throw exception when return code is not 0, because
# parted always fails to reload part table with loop devices. This
# prevents us from distinguishing real errors based on return
# code.
msger.warning("WARNING: %s returned '%s' instead of 0" % (cmd_and_args, rc))
return (rc, out)
def exec_cmd_quiet(cmd_and_args, as_shell = False):
"""
Execute command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
"""
return exec_cmd(cmd_and_args, as_shell, 0)
def exec_native_cmd(cmd_and_args, native_sysroot, catch = 3):
"""
Execute native command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
native_paths = \
"export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \
(native_sysroot, native_sysroot, native_sysroot)
native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args)
msger.debug("exec_native_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
rc, out = exec_cmd(native_cmd_and_args, True, catch)
if rc == 127: # shell command-not-found
msger.error("A native (host) program required to build the image "
"was not found (see details above). Please make sure "
"it's installed and try again.")
return (rc, out)
def exec_native_cmd_quiet(cmd_and_args, native_sysroot):
"""
Execute native command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
return exec_native_cmd(cmd_and_args, native_sysroot, 0)
# kickstart doesn't support variable substution in commands, so this
# is our current simplistic scheme for supporting that
wks_vars = dict()
def get_wks_var(key):
return wks_vars[key]
def add_wks_var(key, val):
wks_vars[key] = val
BOOTDD_EXTRA_SPACE = 16384
IMAGE_EXTRA_SPACE = 10240
__bitbake_env_lines = ""
def set_bitbake_env_lines(bitbake_env_lines):
global __bitbake_env_lines
__bitbake_env_lines = bitbake_env_lines
def get_bitbake_env_lines():
return __bitbake_env_lines
def get_line_val(line, key):
"""
Extract the value from the VAR="val" string
"""
if line.startswith(key + "="):
stripped_line = line.split('=')[1]
stripped_line = stripped_line.replace('\"', '')
return stripped_line
return None
def get_bitbake_var(key):
for line in __bitbake_env_lines.split('\n'):
if (get_line_val(line, key)):
val = get_line_val(line, key)
return val
return None
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Lester R Claudio <claudiol@redhat.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WIT | HOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: realm
version_added: 1.0.0
short_description: Manage Realm | s
description:
- Manage Realms
author:
- "Lester R Claudio (@claudiol1)"
options:
name:
description:
- Name of the realm
required: true
type: str
realm_proxy:
description:
- Proxy to use for this realm
required: true
type: str
realm_type:
description:
- Realm type
choices:
- Red Hat Identity Management
- FreeIPA
- Active Directory
required: true
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.entity_state
- theforeman.foreman.foreman.taxonomy
'''
EXAMPLES = '''
- name: "Create EXAMPLE.LOCAL Realm"
theforeman.foreman.realm:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
name: "EXAMPLE.COM"
realm_proxy: "foreman.example.com"
realm_type: "Red Hat Identity Management"
state: present
'''
RETURN = '''
entity:
description: Final state of the affected entities grouped by their type.
returned: success
type: dict
contains:
realms:
description: List of realms.
type: list
elements: dict
'''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import ForemanTaxonomicEntityAnsibleModule
class ForemanRealmModule(ForemanTaxonomicEntityAnsibleModule):
pass
def main():
module = ForemanRealmModule(
foreman_spec=dict(
name=dict(required=True),
realm_proxy=dict(type='entity', required=True, resource_type='smart_proxies'),
realm_type=dict(required=True, choices=['Red Hat Identity Management', 'FreeIPA', 'Active Directory']),
),
)
with module.api_connection():
module.run()
if __name__ == '__main__':
main()
|
from s | rc.tools.enum import enum
import pyxbmct.addonwindow as pyxbmct
from src.tools.dialog import dialog
EnumMode = enum(SELECT=0, ROTATE=1)
class EnumButton(object):
def __init__(self, label, values, current, default, changeCallback=None, saveCallback=None, customLabels=None, mode=EnumMode.SELECT, returnValue=False, alignment=pyxbmct.ALIGN_CENTER):
self.label = label
self.values = values
self.customLabels = customLabels
self.mode = mode
| self.returnValue = returnValue
self.changeCallback = changeCallback
self.saveCallback = saveCallback
self.currentValue = current
self.defaultValue = default
self.currentIndex = None
self.defaultIndex = None
self.assignedValue = False
if saveCallback is None:
self.onSave = None
if customLabels:
self._findCurrentIndex()
label = str(customLabels[self.currentIndex])
else:
label = str(current)
if alignment is not None:
self.button = pyxbmct.Button(label, alignment=alignment)
else:
self.button = pyxbmct.Button(label)
def update(self, value):
if self.currentValue != value:
self.currentValue = value
if self.customLabels:
self._findCurrentIndex()
label = str(self.customLabels[self.currentIndex])
else:
self.currentIndex = None
label = str(value)
self.button.setLabel(label)
self.assignedValue = True
def onClick(self):
if self.mode == EnumMode.SELECT:
if self.customLabels:
values = self.customLabels
else:
values = self.values
selectedIndex = dialog.select(self.label, list((str(value) for value in values)))
if selectedIndex == -1:
return
index = selectedIndex
else:
if self.currentIndex is None:
self._findCurrentIndex()
if self.currentIndex == len(self.values) - 1:
index = 0
else:
index = self.currentIndex + 1
self.assign(index)
def onDefault(self):
if self.defaultIndex is None:
self._findDefaultIndex()
self.assign(self.defaultIndex)
def onSave(self):
if self.assignedValue:
if self.returnValue:
self.saveCallback(self.currentValue)
else:
self.saveCallback(self.currentIndex)
def assign(self, index):
value = self.values[index]
self.currentIndex = index
self.currentValue = value
if self.customLabels:
label = str(self.customLabels[index])
else:
label = str(value)
self.button.setLabel(label)
self.assignedValue = True
if self.changeCallback:
if self.returnValue:
self.changeCallback(value)
else:
self.changeCallback(index)
def _findDefaultIndex(self):
for i in range(0, len(self.values)):
value = self.values[i]
if value == self.defaultValue:
self.defaultIndex = i
if self.defaultIndex is None:
raise ValueError ('Default value not found in value list')
def _findCurrentIndex(self):
for i in range(0, len(self.values)):
value = self.values[i]
if value == self.currentValue:
self.currentIndex = i
if self.currentIndex is None:
raise ValueError ('Current value not found in value list') |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import numpy as np
from rmgpy import getPath
from rmgpy.qm.main import QMCalculator
from rmgpy.molecule import Molecule
from rmgpy.qm.mopac import MopacMolPM3, MopacMolPM6, MopacMolPM7
mopacEnv = os.getenv('MOPAC_DIR', default="/opt/mopac")
if os.path.exists(os.path.join(mopacEnv , 'MOPAC2012.exe')):
executablePath = os.path.join(mopacEnv , 'MOPAC2012.exe')
elif os.path.exists(os.path.join(mopacEnv , 'MOPAC2009.exe')):
executablePath = os.path.join(mopacEnv , 'MOPAC2009.exe')
else:
executablePath = os.path.join(mopacEnv , '(MOPAC 2009 or 2012)')
mol1 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
class TestMopacMolPM3(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "MOPAC not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'mopac',
method = 'pm3',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = MopacMolPM3(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateTherm | oData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # | to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # to 1 decimal place
class TestMopacMolPM6(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "MOPAC not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'mopac',
method = 'pm6',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = MopacMolPM6(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 167704.4270, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 338.0999241, 1) # to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertEqual(result.molecularMass.value, 128.173)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 167704.0681, 0) # to 0 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 338.0999241, 1) # to 1 decimal place
class TestMopacMolPM7(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "MOPAC not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'mopac',
method = 'pm7',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
mol1 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
self.qmmol1 = MopacMolPM7(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.9863, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1) # to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.8571, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1) # to 1 decimal place
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) ) |
coding: utf-8 -*-
#
# face_recognition documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['face_recognition_models', 'Click', 'dlib', 'numpy', 'PIL']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import face_recognition
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Face Recognition'
copyright = u"2017, Adam Geitgey"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = face_recognition.__version__
# The full version, including alpha/beta/rc tags.
release = face_recognition.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'face_recognitiondoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'face_recognition.tex',
u'Face Recognition Documentation',
u'Adam Geitgey', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, | no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'face_recognition',
u'Face Recognition Documentation',
[u'Adam Geitgey'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- O | ptions for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Venue."""
from telegram import Tele | gramObject, Location
class Venue(TelegramObject):
"""This object represents a venue.
Attributes:
location (:class:`telegram.Location`): Venue location.
title (:obj:`str`): Name of the venue.
address (:obj:`str` | ): Address of the venue.
foursquare_id (:obj:`str`): Optional. Foursquare identifier of the venue.
Args:
location (:class:`telegram.Location`): Venue location.
title (:obj:`str`): Name of the venue.
address (:obj:`str`): Address of the venue.
foursquare_id (:obj:`str`, optional): Foursquare identifier of the venue.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self, location, title, address, foursquare_id=None, **kwargs):
# Required
self.location = location
self.title = title
self.address = address
# Optionals
self.foursquare_id = foursquare_id
self._id_attrs = (self.location, self.title)
@classmethod
def de_json(cls, data, bot):
data = super(Venue, cls).de_json(data, bot)
if not data:
return None
data['location'] = Location.de_json(data.get('location'), bot)
return cls(**data)
|
# -*- coding:utf-8 -* | -
from django.utils.translation import ugettext_lazy as _
# SearchForm's strings
SEARCH_FORM_KEYWORDS = _(u'Key Words / Profession')
SEARCH_FORM_LOCATION = _(u'City, State or Zip Code')
# SearchFiltersForm's strings
SEARCH_FILTERS_FORM_JOB_POSITION = _(u'Job Position')
SEARCH_FILTERS_FORM_EXPERIENCE_YEARS = _(u'Experience')
SEARCH_FILTERS_FORM_DISTANCE = _(u' | Distance')
SEARCH_FILTERS_FORM_FULL_TIME = _(u'Full Time')
SEARCH_FILTERS_FORM_PART_TIME = _(u'Part Time')
SEARCH_FILTERS_FORM_VISA = _(u'Has a Visa / Visa required')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.