prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import logging
import io
from pathlib import Path
from rdflib import URIRef, RDF
from dipper.graph.RDFGraph import RDFGraph
LOG = logging.getLogger(__name__)
class TestUtils:
@staticmethod
def test_graph_equality(turtlish, graph):
"""
:param turtlish: file path or string of triples in turtle
format without prefix header
:param graph: Graph object to test against
:return: Boolean, True if graphs contain same
set of triples
"""
turtle_graph = RDFGraph()
turtle_graph.bind_all_namespaces()
prefixes = "\n".join(
["@prefix {}: <{}> .".format(
n[0], n[1]) for n in turtle_graph.namespace_manager.namespaces()]
)
headless_ttl = ''
try:
if Path(turtlish).exists():
headless_ttl = Path(turtlish).read_text()
else:
raise OSError
except OSError:
if isinstance(turtlish, str):
headless_ttl = turtlish
else:
raise ValueError("turtlish must be filepath or string")
turtle_string = prefixes + headless_ttl
mock_file = io.StringIO(turtle_string)
turtle_graph.parse(mock_file, format="turtle")
TestUtils.remove_ontology_axioms(graph)
turtle_triples = set(list(turtle_graph))
ref_triples = set(list(graph))
equality = turtle_triples == ref_triples
if not equality:
LOG.warning(
"Triples do not match\n"
"\tLeft hand difference: %s\n"
"\tR | ight hand difference: %s",
sorted(turtle_triples - ref_triples),
sorted(ref_triples - turtle_triples)
)
return equality
@staticmethod
def remove_ontology_axioms(graph):
"""
Given an rdflib graph, remove any triples
connected to an ontology node:
{} a owl:Ontology
:param graph: RDFGraph
:return: None
"""
ontology_iri = URIRef("http://www.w3.org/2002/07/owl#Ontology")
| for subject in graph.subjects(RDF.type, ontology_iri):
for predicate, obj in graph.predicate_objects(subject):
graph.remove((subject, predicate, obj))
graph.remove((subject, RDF.type, ontology_iri))
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
def read_file(path):
try:
with open(path, 'r') as f:
return f.read()
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: unable to read required file %s' % (path, 0, 0, re.sub | (r'\s+', ' ', str(ex))))
| return None
def main():
ORIGINAL_FILE = 'requirements.txt'
VENDORED_COPY = 'test/lib/ansible_test/_data/requirements/ansible.txt'
original_requirements = read_file(ORIGINAL_FILE)
vendored_requirements = read_file(VENDORED_COPY)
if original_requirements is not None and vendored_requirements is not None:
if original_requirements != vendored_requirements:
print('%s:%d:%d: must be identical to %s' % (VENDORED_COPY, 0, 0, ORIGINAL_FILE))
if __name__ == '__main__':
main()
|
_render_edit_form_for(
self, rest_handler_cls, title, annotations_dict=None,
delete_xsrf_token='delete-unit', page_description=None):
"""Renders an editor form for a given REST handler class."""
if not annotations_dict:
annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT
key = self.request.get('key')
extra_args = {}
if self.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(rest_handler_cls.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(delete_xsrf_token))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self,
rest_handler_cls.SCHEMA_JSON,
annotations_dict,
key, rest_url, exit_url,
extra_args=extra_args,
delete_url=delete_url, delete_method='delete',
read_only=not filer.is_editable_fs(self.app_context),
required_modules=rest_handler_cls.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit %s' % title)
if page_description:
template_values['page_description'] = page_description
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit(self):
"""Shows unit editor."""
self._render_edit_form_for(
UnitRESTHandler, 'Unit',
page_description=messages.UNIT_EDITOR_DESCRIPTION)
def get_edit_link(self):
"""Shows link editor."""
self._render_edit_form_for(
LinkRESTHandler, 'Link',
page_description=messages.LINK_EDITOR_DESCRIPTION)
def get_edit_assessment(self):
"""Shows assessment editor."""
self._render_edit_form_for(
AssessmentRESTHandler, 'Assessment',
page_description=messages.ASSESSMENT_EDITOR_DESCRIPTION)
def get_edit_lesson(self):
"""Shows the lesson/activity editor."""
self._render_edit_form_for(
LessonRESTHandler, 'Lessons and Activities',
annotations_dict=LessonRESTHandler.get_schema_annotations_dict(
courses.Course(self).get_units()),
delete_xsrf_token='delete-lesson')
class CommonUnitRESTHandler(BaseRESTHandler):
"""A common super class for all unit REST handlers."""
def unit_to_dict(self, unused_unit):
"""Converts a unit to a dictionary representation."""
raise Exception('Not implemented')
def apply_updates(
self, unused_unit, unused_updated_unit_dict, unused_errors):
"""Applies changes to a unit; modifies unit input argument."""
raise Exception('Not implemented')
def get(self):
"""A GET REST method shared by all unit types."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
message = ['Success.']
if self.request.get('is_newly_created'):
unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower()
message.append(
'New %s has been created and saved.' % unit_type)
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=self.unit_to_dict(unit),
xsrf_token=XsrfTokenManager.create_xsrf_token('put-unit'))
def put(self):
"""A PUT REST method shared by all unit types."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
| transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updated_unit_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
errors = []
self.apply_updates(unit, updated_unit_dict, errors)
if not errors:
course = courses.Course(self)
assert course.update_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-unit', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
course.delete_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
class UnitRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to unit."""
URI = '/rest/course/unit'
SCHEMA_JSON = """
{
"id": "Unit Entity",
"type": "object",
"description": "Unit",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Unit'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
create_status_annotation()]
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
def unit_to_dict(self, unit):
assert unit.type == 'U'
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, unused_errors):
unit.title = updated_unit_dict.get('title')
unit.now_available = not updated_unit_dict.get('is_draft')
class LinkRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to link."""
URI = '/rest/course/link'
SCHEMA_JSON = """
{
"id": "Link Entity",
"type": "object",
"description": "Link",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"url": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Link'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'tit | |
"""
Support for Eneco Slimmer stekkers (Smart Plugs).
This provides controls for the z-wave smart plugs Toon can control.
"""
import logging
from homeassistant.components.switch import SwitchDevice
import custom_components.toon as toon_main
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup discovered Smart Plugs."""
_toon_main = hass.data[toon_main.TOON_HANDLE]
switch_items = []
for plug in _toon_main.toon.smartplugs:
switch_items.append(EnecoSmartPlug(hass, plug))
add_devices_callback(switch_items)
class EnecoSmartPlug(SwitchDevice):
"""Representation of a Smart Plug."""
def __init__(self, hass, plug):
"""Initialize the Smart Plug."""
self.smartplug = plug
self.toon_data_store = hass.data[toon_main.TOON_HANDLE]
@property
def should_poll(self):
"""No polling needed with subscriptions."""
return True
@property
def unique_id(self):
"""Return the ID of this switch."""
return self.smartplug.device_uuid
@property
def name(self):
"""Return the name of the switch if any."""
return self.smartplug.name
@property
def current_power_w(self):
"""Current pow | er usage in W."""
return self.toon_data_store.get_data('current_power', self.name)
@property
def today_energy_kwh(self):
"""Today total energy usage in kWh."""
return self.toon_data_store.get_data('today_energy', self.name)
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self.toon_data_store.get_data('current_state', self.name)
@property
def available(self):
"""True if | switch is available."""
return self.smartplug.can_toggle
def turn_on(self, **kwargs):
"""Turn the switch on."""
return self.smartplug.turn_on()
def turn_off(self):
"""Turn the switch off."""
return self.smartplug.turn_off()
def update(self):
"""Update state."""
self.toon_data_store.update()
|
- qtcreator
| |-- plugins
|
|-- plugins
| |-- qt plugins
| |-- csdaquick plguins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
Linux:
|-- bin
| |-- qt.conf -> Prefix=..
| |-- csdataquick executibles
| |-- qtcreator
|
|-- lib
| |-- qt shared libraries
| |-- csdataquick shared libraries
| |-- qtcreator
| |-- qtcreator shared libraries
| |-- plugins
|
|-- libexec
| |-- qtcreator
| |-- qt.conf -> Prefix=../..
| |-- qml2puppet
|
|-- plugins
| |-- csdaquick plguins
| |-- qt plugins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
macOS:
|-- bin
| |-- csdataquick app bundles
| | |-- Contents
| | |-- Resources
| | |-- qt.conf -> Prefix=../../../..
| |-- Qt Creator.app
| |-- Contents
| |-- Resources
| |-- qt.conf -> Prefix=../../../..
| |-- qmldesigner
| |-- qt.conf -> Prefix=../../../../..
|
|-- lib
| |-- qt frameworks
| |-- csdataquick shared libraries
|
|-- plugins
| |-- qt plugins
| |-- csdataquick plugins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
"""
import argparse
import glob
import os
import platform
import re
import sys
import shutil
import subprocess
if sys.hexversion < 0x03000000:
if sys.hexversion < 0x02070000:
subprocess.getoutput = lambda cmd: subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
else:
subprocess.getoutput = lambda cmd: subprocess.check_output(cmd, shell=True)
parser = argparse.ArgumentParser(description='Fixup Q | t and Qt Creator for packaging')
parser.add_argument('--target', required=True, help='target path')
parser.add_argument('--qtcreator', help='qt creator path')
parser.add_argument('--qmake', required=True, help='qmake file path')
args = parser.parse_args(sys.argv[1:])
qtcreator_path = args.qtcreator
target_path = args.target
qmake = args.qmake
bin_dir = os.path.join(t | arget_path, 'bin')
lib_dir = os.path.join(target_path, 'lib')
libexec_dir = os.path.join(target_path, 'libexec')
plugins_dir = os.path.join(target_path, 'plugins')
qml_dir = os.path.join(target_path, 'qml')
def smartCopy(src, dst, follow_symlinks=True, ignore=None):
"""
same as shell cp command. If *src* is a file, it is copied into *dst* if dst is an existing directory
or as file *dst*. If *src* is a directory, it is copied recursively into *dst* if dst is an existing
directory or as as directory *dst*.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
if os.path.isdir(src):
shutil.copytree(src, dst, symlinks=not follow_symlinks, ignore=ignore)
else:
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return dst
def deployQtLibraries():
libs = ['Core', 'Gui', 'Widgets', 'Concurrent', 'Network', 'PrintSupport', 'Script',
'Qml', 'Quick', 'QuickWidgets', 'QuickControls2', 'QuickTemplates2', 'QuickParticles',
'Xml', 'Svg', 'Sql', 'Help']
qtlibs_dir = subprocess.getoutput('%s -query QT_INSTALL_LIBS' % qmake).strip()
dst_dir = lib_dir
lib_pattern = 'libQt5%s.so*'
ignore_pattern = None
if platform.system() == 'Darwin':
lib_pattern = 'Qt%s.framework'
ignore_pattern = shutil.ignore_patterns('Headers', '*_debug', '*.prl')
elif platform.system() == 'Windows':
qtlibs_dir = subprocess.getoutput('%s -query QT_INSTALL_BINS' % qmake).strip()
dst_dir = bin_dir
lib_pattern = 'Qt5%s.dll'
elif platform.system() == 'Linux':
libs += ['XcbQpa', 'DBus']
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for lib in libs:
for file in glob.glob(os.path.join(qtlibs_dir, lib_pattern%lib)):
smartCopy(file, dst_dir, follow_symlinks=False, ignore=ignore_pattern)
if platform.system() == 'Windows':
for lib in ['libEGL.dll', 'libGLESv2.dll']:
smartCopy(os.path.join(qtlibs_dir, lib), dst_dir)
def deployQtPlugins():
plugins = ['bearer', 'designer', 'iconengines', 'imageformats',
'platforms', 'sqldrivers']
qtplugins_dir = subprocess.getoutput('%s -query QT_INSTALL_PLUGINS' % qmake).strip()
if not os.path.exists(plugins_dir):
os.makedirs(plugins_dir)
if platform.system() == 'Linux':
plugins += ['xcbglintegrations']
for plugin in plugins:
if not os.path.exists(os.path.join(qtplugins_dir, plugin)):
print('plugin "%s" does not exist' % plugin)
continue
shutil.copytree(os.path.join(qtplugins_dir, plugin),
os.path.join(plugins_dir, plugin),
symlinks=True,
ignore=shutil.ignore_patterns('*_debug.dylib', '*.dylib.dSYM', '*.pdb'))
# remove debug version on windows
if platform.system() == 'Windows':
# After sorting the debug version "<pluginname>d.dll" will be
# immedietly after the release version "<pluginname>.dll".
# It is then quick to remove every 2nd file from this list.
dlls = sorted(os.listdir(os.path.join(plugins_dir, plugin)))[1::2]
for dll in dlls:
os.remove(os.path.join(plugins_dir, plugin, dll))
def deployQtQuick():
qtqml_dir = subprocess.getoutput('%s -query QT_INSTALL_QML' % qmake).strip()
if not os.path.exists(qml_dir):
os.makedirs(qml_dir)
for qml in ['Qt', 'QtQml', 'QtGraphicalEffects', 'QtQuick', 'QtQuick.2']:
if not os.path.exists(os.path.join(qtqml_dir, qml)):
print('qml module "%s" does not exist' % qml)
continue
shutil.copytree(os.path.join(qtqml_dir, qml),
os.path.join(qml_dir, qml),
symlinks=True,
ignore=shutil.ignore_patterns('*_debug.dylib', '*.dylib.dSYM', '*plugind.dll','*.pdb'))
def deployQt():
# Copy Qt libraries
deployQtLibraries()
# Copy Qt plugins
deployQtPlugins()
# Copy QtQuick modules
deployQtQuick()
def restruct_macos():
bundle_name = os.path.basename(qtcreator_path)
if not bundle_name.endswith('.app'):
print('Not a valid app bundle')
return
# Copy the app bundle to bin
if not os.path.exists(bin_dir):
os.makedirs(bin_dir)
shutil.copytree(qtcreator_path, os.path.join(bin_dir, bundle_name), symlinks=True)
# Fix rpath
for root, dirs, files in os.walk(os.path.join(bin_dir, bundle_name)):
for file in files:
fname = os.path.join(root, file)
if os.path.islink(fname):
continue
if file == 'qml2puppet' or os.path.basename(root) == 'MacOS' or os.path.splitext(file)[1] == '.dylib':
cmd = 'install_name_tool -add_rpath "@loader_path/%s" "%s"' % (os.path.relpath(lib_dir, root), fname)
subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
# Fix qt.conf
open(os.path.join(bin_dir, bundle_name, 'Contents', 'Resources', 'qt.conf'), 'w').write('[Paths]\nPrefix = ../../..\n')
open(os.path.join(bin_dir, bundle_name, 'Contents', 'Resources', 'qmldesigner', 'qt.conf'), 'w').write('[Paths]\nPrefix = ../../../../..\n')
def restruct_windows():
# Copy the entire directory
for d in ['bin', 'lib', 'share']:
shutil.copytree(os.path.join(qtcreator_path, d), os.path.join(target_path, d))
# Fix qt.conf
open(os.path.join(bin_dir, 'qt.conf'), 'w').write('[Paths]\nPrefix = ..\n')
def restruct_linux():
# Copy the entire directory
for d in ['bin', 'lib', 'libexec', 'share']:
shutil.copytree(os.path.join(qtcreator_path, d),
os.path.join(target_path, d),
symlinks=True,
ignore=shutil.igno |
from data_vault import VaultEnvironment
class KeeperApiHelper:
_expected_commands = []
_vault_env = VaultEnvironment()
@staticmethod
def communicate_expect(actions):
# type: (list) -> None
KeeperApiHelper._expected_commands.clear()
KeeperApiHelper._expected_commands.extend(actions)
@staticmethod
def is_expect_empty():
# type: () -> bool
return len(KeeperApiHelper._expected_commands) == 0
@staticmethod
def communicate_command(_, request):
# type: (any, dic | t) -> dict
rs = {
'result': 'success',
'result_code': '',
'message': ''
| }
action = KeeperApiHelper._expected_commands.pop(0)
if callable(action):
props = action(request)
if type(props) == dict:
rs.update(props)
return rs
if type(action) == str:
if action == request['command']:
return rs
raise Exception()
|
import os
import shutil
class BasicOperations_TestClass:
TEST_ROOT =' __test_root__'
def setUp(self):
self.regenerate_root
print(self.TEST_ROOT)
assert os.path.isdir(self.TEST_ROOT)
def tearDown(self):
return True
def test_test(self):
assert self.bar == 1
def regenerate_root(self):
if os.path.isdir(self.TEST_ROOT):
shutil.rmtree(self.TEST_ROOTT)
os.makedirs(self.T | EST_ROOT)
| |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF T | HE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
name = "R"
identifier = "org.vistrails.vistrails.rpy"
version = "0.1.2"
old_identifiers = ["edu.utah.sci.vistrails.rpy"]
|
# Import everything needed to edit video clips
from moviepy.editor import *
# Load myHolidays.mp4 and select the subclip 00:00:50 - 0 | 0:00:60
clip = VideoFileClip("myHolidays.mp4").subclip(50,60)
# Reduce the audio volume (volume x 0.8)
clip = clip.volumex(0.8)
# Generate a text clip. You can customize the font, color, etc.
txt_clip = TextClip("My Holidays 2013",fontsize=70,color='white')
# Say that you want it to appear | 10s at the center of the screen
txt_clip = txt_clip.set_pos('center').set_duration(10)
# Overlay the text clip on the first video clip
video = CompositeVideoClip([clip, txt_clip])
# Write the result to a file (many options available !)
video.write_videofile("myHolidays_edited.webm")
|
ramework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, network_thread_start
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
class BIP9SoftForksTest(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
self.setup_clean_chain = True
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVer | sion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, acti | vated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = self.mocktime + 1
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 1-A
# check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
# Test 1-B
# check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period
test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False)
# Test 1-C
# finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
# check counting stats and "possible" flag before last block of this period achieves LOCKED_IN...
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# ...continue with Test 3
test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (wait |
#!/usr/bin/python
import glob, os
from cjh.cli import Cli, ListPrompt
from cjh.lists import ItemList
class Fileman(object):
@classmethod
def pwd(cls, getstr=False):
"""
Emulate 'pwd' command
"""
string = os.getcwd()
if getstr:
return string
else: print(string)
@classmethod
def mc(cls):
list_prompt = ListPrompt(['..'] + cls.ls(opts=['B'], get_list=True))
if len(list_prompt) > Cli.height():
Cli.less(str(list_prompt))
response = Cli.make_page(header=cls.pwd(getstr=True), func=list_prompt.input)
if response == 1:
os.chdir(list_prompt[response - 1])
cls.mc()
elif list_prompt[response - 1].endswith('/'):
os.chdir(list_prompt[response - 1][:-1])
cls.mc()
else: return list_prompt[response - 1]
@staticmethod
def ls(*args, **kwargs):
"""
Emulate 'ls' command
"""
if len(args) == 0:
cwd = os.getcwd()
file_list = os.listdir(cwd)
else:
file_list = []
for arg in args:
file_list += glob.glob(arg)
if 'opts' in kwargs and 'B' in kwargs['opts']:
file_list = [
file_ for file_ in file_list if not file_.endswith('~')
]
file_list.sort(key=str.lower)
dir_list = []
if 'opts' in kwargs and 'F' in kwargs['opts']:
for | index, file_ in enumerate(file_list):
if os.path.isdir(file_):
dir_list.append(file_ + '/')
del file_list[index]
elif os.access(file_, os.X_OK):
fil | e_list[index] = file_ + '*'
if 'get_list' not in kwargs or kwargs['get_list'] is not True:
string = ''
for dir_ in dir_list:
string += (dir_ + '\n')
for file_ in file_list:
string += (file_ + '\n')
if len(dir_list) + len(file_list) + 1 > Cli.height():
Cli.less(string)
else: Cli.write(string.strip())
else:
return dir_list + file_list
|
image by fitting the image I(x,y,z) to a
polynomial, then subtracts this fitted intensity variation and uses
centroid methods to find the particles.
"""
# We just want a smoothed field model of the image so that the residuals
# are simply the particles without other complications
m = models.SmoothFieldModel()
I = ilms.LegendrePoly2P1D(order=order, constval=image.get_image().mean())
s = states.ImageState(image, [I], pad=0, mdl=m)
if dofilter:
opt.do_levmarq(s, s.params)
pos = addsub.feature_guess(s, feature_rad, trim_edge=trim_edge, **kwargs)[0]
return pos
def get_initial_featuring(statemaker, feature_rad, actual_rad=None,
im_name=None, tile=None, invert=True, desc='', use_full_path=False,
featuring_params={}, statemaker_kwargs={}, **kwargs):
"""
Completely optimizes a state from an image of roughly monodisperse
particles.
The user can interactively select the image. The state is periodically
saved during optimization, with different filename for different stages
of the optimization.
Parameters
----------
statemaker : Function
A statemaker function. Given arguments `im` (a
:class:`~peri.util.Image`), `pos` (numpy.ndarray), `rad` (ndarray),
and any additional `statemaker_kwargs`, must return a
:class:`~peri.states.ImageState`. There is an example function in
scripts/statemaker_example.py
feature_rad : Int, odd
The particle radius for featuring, as passed to locate_spheres.
actual_rad : Float, optional
The actual radius of the particles. Default is feature_rad
im_name : string, optional
The file name of the image to load. If not set, it is selected
interactively through Tk.
tile : :class:`peri.util.Tile`, optional
The tile of the raw image to be analyzed. Default is None, the
entire image.
invert : Bool, optional
Whether to invert the image for featuring, as passed to trackpy.
Default is True.
desc : String, optional
A description to be inserted in saved state. The save name will
be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''
use_full_path : Bool, optional
Set to True to use the full path name for the image. Default
is False.
featuring_params : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
``get_initial_featuring``, such as ``'use_tp'`` or ``'minmass'``.
Default is ``{}``.
statemaker_kwargs : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
the statemaker function. Default is ``{}``.
Other Parameters
----------------
max_mem : Numeric
The maximum additional memory to use for the optimizers, as
passed to optimize.burn. Default is 1e9.
min_rad : Float, optional
The minimum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius smaller than this are identified
as fake and removed. Default is 0.5 * actual_rad.
max_rad : Float, optional
The maximum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius larger than this are identified
as fake and removed. Default is 1.5 * actual_rad, however you
may find better results if you make this more stringent.
rz_order : int, optional
If nonzero, the order of an additional augmented rscl(z)
parameter for optimization. Default is 0; i.e. no rscl(z)
optimization.
zscale : Float, optional
The zscale of the image. Default is 1.0
Returns
-------
s : :class:`peri.states.ImageState`
The optimized state.
See Also
--------
feature_from_pos_rad : Using a previous state's globals and
user-provided positions and radii as an initial guess,
completely optimizes a state.
get_particle_featuring : Using a previous state's globals and
positions as an initial guess, completely optimizes a state.
translate_featuring : Use a previous state's globals and
centroids methods for an initial particle guess, completely
optimizes a state.
Notes
-----
Proceeds by centroid-featuring the image for an initial guess of
particle positions, | then optimizing the globals + positions until
termination as called in _optimize_from_centroid.
The ``Other Parameters`` are passed to _optimize_from_centroid.
"""
if actual_rad is None:
actual_rad = feature_rad
_, im_name = _pick_state_im_name('', im_name, use_full_path=use_full_path)
im = util.Raw | Image(im_name, tile=tile)
pos = locate_spheres(im, feature_rad, invert=invert, **featuring_params)
if np.size(pos) == 0:
msg = 'No particles found. Try using a smaller `feature_rad`.'
raise ValueError(msg)
rad = np.ones(pos.shape[0], dtype='float') * actual_rad
s = statemaker(im, pos, rad, **statemaker_kwargs)
RLOG.info('State Created.')
if desc is not None:
states.save(s, desc=desc+'initial')
optimize_from_initial(s, invert=invert, desc=desc, **kwargs)
return s
def feature_from_pos_rad(statemaker, pos, rad, im_name=None, tile=None,
desc='', use_full_path=False, statemaker_kwargs={}, **kwargs):
"""
Gets a completely-optimized state from an image and an initial guess of
particle positions and radii.
The state is periodically saved during optimization, with different
filename for different stages of the optimization. The user can select
the image.
Parameters
----------
statemaker : Function
A statemaker function. Given arguments `im` (a
:class:`~peri.util.Image`), `pos` (numpy.ndarray), `rad` (ndarray),
and any additional `statemaker_kwargs`, must return a
:class:`~peri.states.ImageState`. There is an example function in
scripts/statemaker_example.py
pos : [N,3] element numpy.ndarray.
The initial guess for the N particle positions.
rad : N element numpy.ndarray.
The initial guess for the N particle radii.
im_name : string or None, optional
The filename of the image to feature. Default is None, in which
the user selects the image.
tile : :class:`peri.util.Tile`, optional
A tile of the sub-region of the image to feature. Default is
None, i.e. entire image.
desc : String, optional
A description to be inserted in saved state. The save name will
be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''
use_full_path : Bool, optional
Set to True to use the full path name for the image. Default
is False.
statemaker_kwargs : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
the statemaker function. Default is ``{}``.
Other Parameters
----------------
max_mem : Numeric
The maximum additional memory to use for the optimizers, as
passed to optimize.burn. Default is 1e9.
min_rad : Float, optional
The minimum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius smaller than this are identified
as fake and removed. Default is 0.5 * actual_rad.
max_rad : Float, optional
The maximum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius larger than this are identified
as fake and removed. Default is 1.5 * actual_rad, however you
may find better results if you make this more stringent.
invert : {'guess', True, False}
Whether t |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""DataGroupXanes: work with XANES data sets
============================================
- DataGroup
- DataGroup1D
- DataGroupXanes
"""
from .datagroup import MODNAME
from .datagroup1D import DataGroup1D
class DataGroupXanes(DataGroup1D):
"""DataGroup for XANES scans"""
def __ini | t__(self, kwsd=None, _larch=None):
super(DataGroupXanes, se | lf).__init__(kwsd=kwsd, _larch=_larch)
### LARCH ###
def datagroup_xan(kwsd=None, _larch=None):
"""utility to perform wrapped operations on a list of XANES data
groups"""
return DataGroupXanes(kwsd=kwsd, _larch=_larch)
def registerLarchPlugin():
return (MODNAME, {'datagroup_xan' : datagroup_xan})
if __name__ == '__main__':
pass
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Assign Addresses to interfaces """
from datetime import datetime
import re
from sqlalchemy import (Column, Integer, DateTime, ForeignKey, Sequence,
UniqueConstraint, Index)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relation, backref, deferred, validates
from sqlalchemy.sql import and_
from aquilon.exceptions_ import InternalError
from aquilon.aqdb.column_types import IP, AqStr, EmptyStr
from aquilon.aqdb.model import Base, Interface, ARecord, Network
from aquilon.aqdb.model.a_record import dns_fqdn_mapper
_TN = 'address_assignment'
_ABV = 'addr_assign'
class AddressAssignment(Base):
"""
Assignment of IP addresses to network interfaces.
It's kept as an association map to model the linkage, since we need to
have maximum ability to provide potentially complex configuration
scenarios, such as advertising certain VIP addresses from some, but not
all of the network interfaces on a machine (to be used for backup
servers, cluster filesystem servers, NetApp filers, etc.). While in
most cases we can assume VIPs are broadcast out all interfaces on the
box we still need to have the underlying model as the more complex
many to many relationship implemented here.
"""
__tablename__ = _TN
_label_check = re.compile('^[a-z0-9]{0,16}$')
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
assignment_type = Column(AqStr(32), nullable=False)
interface_id = Column(ForeignKey(Interface.id, ondelete='CASCADE'),
nullable=False)
label = Column(EmptyStr(16), nullable=False)
ip = Column(IP, nullable=False)
network_id = Column(ForeignKey(Network.id), nullable=False)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
interface = relation(Interface, innerjoin=True,
backref=backref('assignments', order_by=[label],
cascade='all, delete-orphan'))
# Setting viewonly is very important here as we do not want the removal of
# an AddressAssignment record to change the linked DNS record(s)
# Can't use backref or back_populates due to the different mappers
dns_records = relation(dns_fqdn_mapper,
primaryjoin=and_(network_id == dns_fqdn_mapper.c.network_id,
ip == dns_fqdn_mapper.c.ip),
foreign_keys=[dns_fqdn_mapper.c.ip,
dns_fqdn_mapper.c.network_id],
viewonly=True)
fqdns = association_proxy('dns_records', 'fqdn')
network = relation(Network, innerjoin=True,
backref=backref('assignments', passive_deletes=True,
order_by=[ip]))
__table_args__ = (UniqueConstraint(interface_id, ip),
UniqueConstraint(interface_id, label),
Index("%s_network_ip_idx" % _ABV, network_id, ip))
__mapper_args__ = {'polymorphic_on': assignment_type,
'polymorphic_identity': 'standard'}
@property
def logical_name(self):
"""
Compute an OS-agnostic name for this interface/address combo.
BIG FAT WARNING: do _NOT_ assume that this name really exist on the
host!
There are external systems like DSDB that can not handle having multiple
addresses on the same interface. Because of that this function generates
an unique name for every interface/address tuple.
"""
# Use the Linux naming convention because people are familiar with that
# and it is easy to parse if needed
name = self.interface.name
if self.label:
name += ":%s" % self.label
return name
@property
def is_shared(self):
return False
def __init__(self, label=None, network=None, **kwargs):
if not label:
label = ""
elif not self._label_check.match(label): # pragma: no cover
raise ValueError("Illegal address label '%s'." % label)
# Right now network_id is nullable due to how refresh_network works, so
# verify the network here
if not network: # pragma: no cover
raise InternalError("AddressAssignment needs a network")
super(AddressAssignment, self).__init__(label=label, network=network,
**kwargs)
def __repr__(self):
return "<Address %s on %s/%s>" % (self.ip,
self.interface.hardware_entity.label,
self.logical_name)
# Assigned to external classes here to avoid circular dependencies.
Interface.addresses = association_proxy('assignments', 'ip')
# Can't use backref or back_populates due to the different mappers
# This relation gives us the two other sides of the triangle mentioned above
# Do NOT consider the DNS environment here - whether the IP is used or not does
# not depend on its visibility in DNS
ARecord.assignments = relation(
AddressAssignment,
primaryjoin=and_(AddressAssignment.network_id == ARecord.network_id,
AddressAssignment.ip == ARecord.ip),
foreign_keys=[AddressAssignment.ip, AddressAssignment.network_id],
viewonly=True)
class SharedAddressAssignment(AddressAssignment):
priority = Column(Integer)
# As priority is an additio | nal col we c | annot make it non-null
@validates('priority')
def _validate_priority(self, key, value): # pylint: disable=W0613
if not value:
raise ValueError("Shared addresses require a priority")
return value
@property
def is_shared(self):
return True
__mapper_args__ = {'polymorphic_identity': 'shared'}
|
from __future__ import print_function
import time
import argparse
import grpc
from jaeger_client import Config
from grpc_opentracing import open_tracing_client_interceptor
from grpc_opentracing.grpcext import intercept_channel
import command_line_pb2
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name='trivial-client')
tracer = config.initialize_tracer()
tracer_interceptor = open_tracing_client_intercept | or(
tracer, log_payloads=args.log_payloads)
channel = grpc.insecure_channel('localhost:50051')
channel = intercept_channel(channel, tracer_interceptor)
stub = command_line_pb2.CommandLineStub(channel)
| response = stub.Echo(command_line_pb2.CommandRequest(text='Hello, hello'))
print(response.text)
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
run()
|
from collections import Counter
c = Counter(input())
print(min | (c['t'], c['r'], c['e']//2))
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to install ARM root image for cross building of ARM chrome on linux.
This script can be run manually but is more often run as part of gclient
hooks. When run from hooks this script should be a no-op on non-linux
platforms.
The sysroot image could be constructed from scratch based on the current
state or precise/arm but for consistency we currently use a pre-built root
image which was originally designed for building trusted NaCl code. The image
will normally need to be rebuilt every time chrome's build dependancies are
changed.
Steps to rebuild the arm sysroot image:
- cd $SRC/native_client
- ./tools/trusted_cross_toolchains/trusted-toolchain-creator.armel.precise.sh \
UpdatePackageLists
- ./tools/trusted_cross_toolchains/trusted-toolchain-creator.armel.precise.sh \
BuildJail $SRC/out/arm-sysroot.tar.gz
- gsutil cp -a public-read $SRC/out/arm-sysroot.tar.gz \
nativeclient-archive2/toolchain/ | $NACL_REV/sysroot-arm-trusted.tgz
"""
import os
import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
URL_PREFIX = 'https://storage.googleapis.com'
URL_PATH = 'nativeclient-archive2/toolchain'
REVISION = 13035
TARBALL = 'sysroot-arm-trusted.tgz'
def main(args):
if '--linux-only' in args:
# This argument is passed when run from the gclient hooks.
# In thi | s case we return early on non-linux platforms
# or if GYP_DEFINES doesn't include target_arch=arm
if not sys.platform.startswith('linux'):
return 0
if "target_arch=arm" not in os.environ.get('GYP_DEFINES', ''):
return 0
src_root = os.path.dirname(os.path.dirname(SCRIPT_DIR))
sysroot = os.path.join(src_root, 'arm-sysroot')
url = "%s/%s/%s/%s" % (URL_PREFIX, URL_PATH, REVISION, TARBALL)
stamp = os.path.join(sysroot, ".stamp")
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
print "ARM root image already up-to-date: %s" % sysroot
return 0
print "Installing ARM root image: %s" % sysroot
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, TARBALL)
curl = ['curl', '--fail', '-L', url, '-o', tarball]
if os.isatty(sys.stdout.fileno()):
curl.append('--progress')
else:
curl.append('--silent')
subprocess.check_call(curl)
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
from .context import CorpusContext
from .audio import Audio | Context
from .importable import ImportContext
from .lexical import LexicalContext
from .pause import PauseContext
from .utterance import UtteranceContext
from .structured import StructuredContext
from .syllabic import SyllabicContext
from | .spoken import SpokenContext
|
from SCons.Script import *
def exists(env):
return (env["PLATFORM"]=="win32")
def ConvertNewlines(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write(f_in.read().replace("\n","\r\n"))
f_out.close()
f_in.close()
return None
def ConvertNewlinesB(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write("\xef\xbb\xbf")
f_out.write(f_in.read().replace("\n","\r\n | "))
f_out.close()
f_in.close()
return None
def generate(env):
env["BUILDERS"]["ConvertNewlines"]=Builder(action=Co | nvertNewlines,suffix=".txt")
env["BUILDERS"]["ConvertNewlinesB"]=Builder(action=ConvertNewlinesB,suffix=".txt")
|
import requests
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
@csrf_exempt
@require_http_methods(["POST"])
def post_service_request(request):
payload = request.POST.copy()
outgoing = payload.dict()
if outgoing.get("internal_feedback", False):
if "internal_feedback" in outg | oing:
del outgoing["internal_feedback"]
api_key = settings.OPEN311["INTERNAL_FEEDBACK_API_KEY"]
else:
api_key = settings.OPEN311["API_KEY"]
outgoing["api_key"] = api_key
url = settings.OPEN311["URL_BASE"]
session = requests.Session()
# Modify parameters for request in case of City of Turku
| if "smbackend_turku" in settings.INSTALLED_APPS:
outgoing.pop("service_request_type")
outgoing.pop("can_be_published")
outgoing["address_string"] = "null"
outgoing["service_code"] = settings.OPEN311["SERVICE_CODE"]
r = session.post(url, data=outgoing)
if r.status_code != 200:
return HttpResponseBadRequest()
return HttpResponse(r.content, content_type="application/json")
|
from uuid import uuid4
from django.test import TestCase
from casexml.apps.case.cleanup import claim_case, get_first_claim
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.util import post_case_blocks
from corehq.apps.case_search.models import CLAIM_CASE_TYPE
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.ota.utils import get_restore_user
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.models import CommCareCase
DOM | AIN = 'test_domain'
USERNAME = 'lina.stern@ras.ru'
PASSWORD = 'hemato-encephalic'
# https://en.wikipedia.org/wiki/Lina_Stern
def index_to_dict(instance):
keys = ('identifier', 'referenced_type', 'referenced_id', 'relationship')
return {k: str(getattr(instance, k)) for k in keys}
class CaseClaimTests(TestCase):
def setUp(self):
super(CaseClaimTests, self).setUp()
self.domain = create_domain(DOMAIN)
self.user = CommCareUser.create(DOMAIN, USERNAM | E, PASSWORD, None, None)
self.restore_user = get_restore_user(DOMAIN, self.user, None)
self.host_case_id = uuid4().hex
self.host_case_name = 'Dmitri Bashkirov'
self.host_case_type = 'person'
self.create_case()
def tearDown(self):
self.user.delete(self.domain.name, deleted_by=None)
self.domain.delete()
super(CaseClaimTests, self).tearDown()
def create_case(self):
case_block = CaseBlock.deprecated_init(
create=True,
case_id=self.host_case_id,
case_name=self.host_case_name,
case_type=self.host_case_type,
owner_id='in_soviet_russia_the_case_owns_you',
).as_xml()
post_case_blocks([case_block], {'domain': DOMAIN})
def assert_claim(self, claim=None, claim_id=None):
if claim is None:
claim_ids = CommCareCase.objects.get_case_ids_in_domain(DOMAIN, CLAIM_CASE_TYPE)
self.assertEqual(len(claim_ids), 1)
claim = CommCareCase.objects.get_case(claim_ids[0], DOMAIN)
if claim_id:
self.assertEqual(claim.case_id, claim_id)
self.assertEqual(claim.name, self.host_case_name)
self.assertEqual(claim.owner_id, self.user.user_id)
self.assertEqual([index_to_dict(i) for i in claim.indices], [{
'identifier': 'host',
'referenced_type': 'person',
'referenced_id': self.host_case_id,
'relationship': 'extension',
}])
def test_claim_case(self):
"""
claim_case should create an extension case
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
self.assert_claim(claim_id=claim_id)
def test_claim_case_id_only(self):
"""
claim_case should look up host case details if only ID is passed
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id)
self.assert_claim(claim_id=claim_id)
def test_first_claim_one(self):
"""
get_first_claim should return one claim
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assert_claim(claim, claim_id)
def test_first_claim_none(self):
"""
get_first_claim should return None if not found
"""
claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assertIsNone(claim)
def test_closed_claim(self):
"""
get_first_claim should return None if claim case is closed
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
self._close_case(claim_id)
first_claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assertIsNone(first_claim)
def test_claim_case_other_domain(self):
malicious_domain = 'malicious_domain'
domain_obj = create_domain(malicious_domain)
self.addCleanup(domain_obj.delete)
claim_id = claim_case(malicious_domain, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
with self.assertRaises(CaseNotFound):
CommCareCase.objects.get_case(claim_id, malicious_domain)
def _close_case(self, case_id):
case_block = CaseBlock.deprecated_init(
create=False,
case_id=case_id,
close=True
).as_xml()
post_case_blocks([case_block], {'domain': DOMAIN})
|
ty.text() + '" '
exiftool_params += '-iptc:City="' + self.xmp_city.text() + '" '
# Map date/time and format stuff
if self.chk_gps_timestamp.isChecked():
exiftool_params += '-exif:Copyright="' + self.exif_Copyright.text() + '" '
if self.chk_gps_datestamp.isChecked():
exiftool_params += '-exif:UserComment="' + self.exif_UserComment.text() + '" '
if self.gps_mapdatum.text() == "":
exiftool_params += '-exif:GPSMapDatum="WGS-84" '
else:
exiftool_params += '-exif:GPSMapDatum="' + self.gps_mapdatum.text() + '" '
print(exiftool_params)
# Now write the data to the photo(s)
if self.chk_gps_backuporiginals.isChecked():
write_image_info(self, exiftool_params, qApp, True)
else:
write_image_info(self, exiftool_params, qApp, False)
#------------------------------------------------------------------------
# Edit -> Exif tab and actions
def clear_exif_fields(self):
self.exif_Make.setText("")
self.exif_Model.setText("")
self.exif_ModifyDate.setText("")
self.exif_DateTimeOriginal.setText("")
self.exif_CreateDate.setText("")
self.exif_Artist.setText("")
self.exif_Copyright.setText("")
self.exif_UserComment.setText("")
self.exif_ImageDescription.clear()
self.chk_exif_Make.setChecked(1)
self.chk_exif_Model.setChecked(1)
self.chk_exif_ModifyDate.setChecked(1)
self.chk_exif_DateTimeOriginal.setChecked(1)
self.chk_exif_CreateDate.setChecked(1)
self.chk_exif_Artist.setChecked(1)
self.chk_exif_Copyright.setChecked(1)
self.chk_exif_UserComment.setChecked(1)
self.chk_exif_ImageDescription.setChecked(1)
def copyexiffromselected(self,qApp):
# First clean input fields
clear_exif_fields(self)
exiftool_params = ' -e -n -exif:Make -exif:Model -exif:ModifyDate -exif:DateTimeOriginal -exif:CreateDate -exif:Artist -exif:Copyright -exif:UserComment -exif:ImageDescription '
p = read_image_info(self, exiftool_params)
if len(p) == 0:
data = False
message = ("<p>You are trying to copy exif info from your source image, but your source image "
"doesn't contain the specified exif data or doesn't seem to contain any exif data (or you didn't select an image).</p>")
ret = QMessageBox.warning(self, "Error copying exif info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
rowcounter = 0
for line in p_lines:
#try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
gpslat = 0
gpslon = 0
if descriptor == "Make":
self.exif_Make.setText(description)
if descriptor == "Camera Model Name":
self.exif_Model.setText(description)
if descriptor == "Modify Date":
self.exif_ModifyDate.setText(description)
if descriptor == "Date/Time Original":
self.exif_DateTimeOriginal.setText(description)
if descriptor == "Create Date":
self.exif_CreateDate.setText(description)
if descriptor == "Artist":
self.exif_Artist.setText(description)
if descriptor == "Copyright":
self.exif_Copyright.setText(description)
if descriptor == "User Comment":
self.exif_UserComment.setText(description)
if descriptor == "Image Description":
self.exif_ImageDescription.insertPlainText(description)
#print "rowcounter " + str(rowcounter) + " descriptor " + descriptor + " ;description " + description
rowcounter += 1
def saveexifdata(self, qApp):
exiftool_params = ""
if self.chk_exif_Make.isChecked():
exiftool_params = ' -exif:Make="' + self.exif_Make.text() + '" '
if self.chk_exif_Model.isChecked():
exiftool_params += '-exif:Model="' + self.exif_Model.text() + '" '
if self.chk_exif_ModifyDate.isChecked():
exiftool_params += '-exif:ModifyDate="' + self.exif_ModifyDate.text() + '" '
if self.chk_exif_DateTimeOriginal.isChecked():
exiftool_params += '-exif:DateTimeOriginal="' + self.exif_DateTimeOriginal.text() + '" '
if self.chk_exif_CreateDate.isChecked():
exiftool_params += '-exif:CreateDate="' + self.exif_CreateDate.text() + '" '
if self.chk_exif_Artist.isChecked():
exiftool_params += '-exif:Artist="' + self.exif_Artist.text() + '" '
if self.chk_exif_Copyright.isChecked():
exiftool_params += '-exif:Copyright="' + self.exif_Copyright.text() + '" '
if self.chk_exif_UserComment.isChecked():
exiftool_params += '-exif:UserComment="' + self.exif_UserComment.text() + '" '
if self.chk_exif_ImageDescription.isChecked():
ImgDescr = self.exif_ImageDescription.toPlainText()
exiftool_params += '-exif:ImageDescription="' + ImgDescr + '" '
if self.chk_exif_backuporiginals.isChecked():
write_image_info(self, exiftool_params, qApp, True)
else:
write_image_info(self, exiftool_params, qApp, False)
#------------------------------------------------------------------------
# Edit -> xmp tab and actions
def clear_xmp_fields(self):
self.xmp_creator.setText("")
self.xmp_rights.setText("")
self.xmp_label.setText("")
self.xmp_subject.setText("")
self.xmp_title.setText("")
self.xmp_rating1.setChecked(1)
self.xmp_description.clear()
self.xmp_person.setText("")
self.chk_xmp_creator.setChecked(1)
self.chk_xmp_rights.setChecked(1)
self.chk_xmp_label.setChecked(1)
self.chk_xmp_subject.setChecked(1)
self.chk_xmp_title.setChecked(1)
self.chk_xmp_rating.setChecked(1)
self.chk_xmp_description.setChecked(1)
self.chk_xmp_person.setChecked(1)
def copyxmpfromselected(self,qApp):
# First clean input fields
clear_xmp_fields(self)
xmptool_params = ' -e -n -xmp:Creator -xmp:Rights -xmp:Label -xmp:Subject -xmp:Title -xmp:Rating -xmp:Description -xmp:Person -xmp:PersonInImage '
p = read_image_info(self, xmptool_params)
if len(p) == 0:
data = False
message = ("<p>You are trying to copy xmp info from your source image, but your source image "
"doesn't contain the specified xmp data or doesn't seem to contain any xmp data (or you didn't select an image).</p>")
ret = QMessageBox.warning(self, "Error copying xmp info from source image", message)
else:
# remove last character which is the final ending \n (where \ is only the escape character)
p = p[:-1]
p_lines = re.split('\n',p)
rowcounter = 0
for line in p_lines:
#try:
descriptor, description = re.split(':', line,1)
descriptor = descriptor.strip()
description = description.strip()
gpslat = 0
gpslon = 0
if descriptor == "Creator":
self.xmp_creator.setText(description)
if descriptor == "Rights":
self.xmp_rights.setText(description)
if descriptor == "Label":
self.xmp_label.setText(description)
if | descriptor == "Subject":
self.xmp_subject.setText(description)
if descriptor == "Title":
self.xmp_title.setText(description)
if descriptor == "Rating":
if description == "1":
self.xmp_rating1.setChecked | (1)
elif description == "2":
self.xmp_rating2.setChecked(2)
elif description == "3":
self.xmp_rating3.setChecked(3)
elif description == "4":
self.xmp_rating4.setChecked(4)
elif description == "5":
self.xmp_rating5.setChecked(5)
if descriptor == "Description":
self.xmp_description.insertPlainText(description)
|
text = 'th | is is a sample file\nnew line'
savefile = open('newtext', 'w')
save | file.write(text)
savefile.close()
|
ike ID and title
def get_top_ten_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtopten = re.compile('<tr><td valign="top" align="right"><b>([0-9]+)</b></td><td width=100% style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a> <span class="small_grey">\(([^<]+)\)</span></td></tr>').findall(link)
for rank, movieid, urlend, title, trailerkind in matchtopten:
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '%s. ' % rank,
'date': ''}
returnmovies.append(movie)
return returnmovies
def get_recent_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtrecentupdates = re.compile('<td(?: valign="top" style="text-align:left;"><b style="white-space: nowrap;">([^<]*)</b)?></td><td width=100% style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a> <span class="small_grey">\(([^<]+)\)</span></td></tr>').findall(link)
for date, movieid, urlend, title, trailerkind in matchtrecentupdates:
if date != '':
lastdate = date
else:
date = lastdate
datearray = date.split(' ')
months_de_short = ['', 'Jan', 'Feb', 'M\xe4r', 'Apr', 'Mai', 'Juni', 'Juli', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez']
try:
date = ' (%s%02d.%s)' % (datearray[0], months_de_short.index(datearray[1]), '2011') # Fixme: dirty hack :(
except:
date = ''
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '',
'date': date}
returnmovies.append(movie)
return returnmovies
def get_current_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtacttrailers = re | .compile('<tr><td(?: valign="top"><b>[A-Z0-9]</b)?></td><td style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a></td></tr>').findall(link)
| for movieid, urlend, title in matchtacttrailers:
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '',
'date': ''}
returnmovies.append(movie)
return returnmovies
# Function to get a dict of detailed movie information like coverURL, plot and genres
def get_movie_infos(movieid, urlend='movie.html'):
returnmovie = {'movieid': movieid,
'title': '',
'otitle': '',
'coverurl': '',
'plot': '',
'genres': '',
'date': ''}
fullurl = '%s/media/trailer/%s,15,%s' % (MAIN_URL,
movieid,
urlend)
cachefile = 'id%s.cache' % movieid
link = get_cached_url(fullurl, cachefile, GetSetting('cache_movie_info'))
titlematch = re.compile('<h1>(.+?)</h1>.*<h2>\((.+?)\)</h2>', re.DOTALL).findall(link)
for title, otitle in titlematch:
returnmovie.update({'title': title, 'otitle': otitle})
covermatch = re.compile('src="([^"]+?)" width="150"').findall(link)
for coverurl in covermatch:
if coverurl != '/filme/grafiken/kein_poster.jpg':
returnmovie.update({'coverurl': MAIN_URL + coverurl})
plotmatch = re.compile('WERDEN! -->(.+?)</span>').findall(link)
for plot in plotmatch:
plot = re.sub('<[^<]*?/?>', '', plot)
returnmovie.update({'plot': plot})
releasedatematch = re.compile('Dt. Start:</b> ([0-9]+.+?)<img').findall(link)
for releasedateugly in releasedatematch:
datearray = releasedateugly.split(' ')
months_de_long = ['', 'Januar', 'Februar', 'M\xe4rz', 'April', 'Mai', 'Juni', 'Juli', 'August', 'September', 'Oktober', 'November', 'Dezember']
date = ' (%s%02d.%s)' % (datearray[0], months_de_long.index(datearray[1]), '2011') # Fixme: dirty hack :(
returnmovie.update({'date': date})
genresmatch = re.compile('<b style="font-weight:bold;">Genre:</b> (.+?)<br />', re.DOTALL).findall(link)
for allgenres in genresmatch:
returnmovie.update({'genres': allgenres})
return returnmovie
# Function to get a list of dicts which contains trailer- URL, resolution, releasedate
def get_movie_trailers(movieid, urlend='movie.html'):
returntrailers = []
fullurl = '%s/media/trailer/%s,15,%s' % (MAIN_URL,
movieid,
urlend)
cachefile = 'id%s.cache' % movieid
link = get_cached_url(fullurl, cachefile, GetSetting('cache_movie_info'))
matchtrailerblock = re.compile('<table border=0 cellpadding=0 cellspacing=0 align=center width=100%><tr><td class="standard">.+?<b style="font-weight:bold;">(.+?)</b><br />\(([0-9:]+) Minuten\)(.+?</td></tr></table><br /></td></tr></table><br />)', re.DOTALL).findall(link)
for trailername, duration, trailerblock in matchtrailerblock:
matchlanguageblock = re.compile('alt="Sprache: (..)">(.+?)>([^<]+)</td></tr></table></td>', re.DOTALL).findall(trailerblock)
for language, languageblock, date in matchlanguageblock:
datearray = date.split(' ')
months_de_short = ['', 'Jan', 'Feb', 'M\xe4rz', 'Apr', 'Mai', 'Juni', 'Juli', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez']
try:
date = datearray[0] + str(months_de_short.index(datearray[1])).zfill(2) + '.2011' # fixme: this could be made better, no idea how :)
except:
date = ''
matchtrailer = re.compile('generateDownloadLink\("([^"]+_([0-9]+)\.(?:mov|mp4)\?down=1)"\)').findall(languageblock)
for trailerurl, resolution in matchtrailer:
trailer = {'trailername': trailername,
'duration': duration,
'language': language,
'resolution': resolution,
'date': date,
'trailerurl': MAIN_URL + trailerurl}
returntrailers.append(trailer)
return returntrailers
# Functions to get the informations for xbmc
def show_categories():
add_dir(Language(30003), 3, os.path.join(IMAGE_DIR, 'database.png')) # Current
add_dir(Language(30001), 1, os.path.join(IMAGE_DIR, 'ranking.png')) # TopTen
add_dir(Language(30002), 2, os.path.join(IMAGE_DIR, 'schedule.png')) # Recent
end_dir()
def show_top_ten_movies():
toptenmovies = get_top_ten_movies()
show_movies(toptenmovies)
end_dir()
def show_recent_movies():
recentmovies = get_recent_movies()
show_movies(recentmovies)
end_dir()
def show_current_movies():
currentmovies = get_current_movies()
show_movies(currentmovies)
end_dir()
# Functions to show the informations in xbmc
def show_movies(movies):
counter = 0
ProgressDialog = xbmcgui.DialogProgress()
ProgressDialog.create(Language(30020), '%s %s' % (str(len(movies)), Language(30021)))
ProgressDialog.update(0)
for movie in movies:
movieinfo = get_movie_infos(movieid=movie['movieid'], urlend=movie['urlend'])
title = '%s%s%s' % (movie['rank'], movieinfo['title'], movie['date'])
add_movie(title=title,
movieid=movieinfo['movieid'],
coverurl=movieinfo['coverurl'],
plot=movieinfo['plot'],
otitle=movieinfo['otitle'],
genres=movieinfo['genres'],
releasedate=movieinfo['date'],
playcount=get_playcount(movie['movieid']))
counter += 1
ProgressDialog.update(100 * counter / len(movies),
'%s %s' % (str(len(movies)), Language(30021)), # x movies |
#!/usr/bin/env python
import webapp2
import logging
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from google.appengine.ext import ndb
from MailMessage import MailMessage
# the email domain of this app is @pomis-newsletterman.appspotmail.com
class EmailHandlerV1(InboundMailHandler):
def receive(self, mail_message):
logging.info(mail_message.to_mime_message())
# store message
service_id = mail_message.to.split('@')[0]
if '<' in service_id:
service_id = service_id.split('<')[1]
mime_message = str(mail_message.to_mime_message())
service_key = ndb.Key(MailMessage, service_id)
new_id = ndb.Model.allocate_ids(size = 1, parent = service_key)[0]
mail_message_key = ndb.Key(MailMessage, new_id, parent = service_key)
persistent_mail_message = MailMessage(parent = mail_message_key, mime_message = mime_message)
| persistent_mail_ | message.put()
app = webapp2.WSGIApplication([EmailHandlerV1.mapping()], debug=True)
|
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2009 Jonathan Matthew <jonathan@d14n.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import xml.dom.minidom as dom
import urllib.parse
import rb
from gi.repository import RB
# musicbrainz URLs
MUSICBRAINZ_RELEASE_URL = "http://musicbrainz.org/ws/2/release/%s?inc=artists"
MUSICBRAINZ_RELEASE_PREFIX = "http://musicbrainz.org/release/"
MUSICBRAINZ_RELEASE_SUFFIX = ".html"
MUSICBRAINZ_SEARCH_QUERY = "artist:\"%s\" AND release:\"%s\""
MUSICBRAINZ_SEARCH_URL = "http://musicbrainz.org/ws/2/release/?query=%s&limit=1"
# musicbrainz IDs
MUSICBRAINZ_VARIOUS_ARTISTS = "89ad4ac3-39f7-470e-963a-56509c546377"
# Amazon URL bits
AMAZON_IMAGE_URL = "http://images.amazon.com/images/P/%s.01.LZZZZZZZ.jpg"
class MusicBrainzSearch(object):
|
def get_release_cb (self, data, args):
(key, store, callback, cbargs) = args
if data is None:
print("musicbrainz release request returned nothing")
callback(*cbargs)
return
try:
parsed = dom.parseString(data)
storekey = RB.ExtDBKey.create_storage('album', key.get_field('album'))
# check that there' | s an artist that isn't 'various artists'
artist_tags = parsed.getElementsByTagName('artist')
if len(artist_tags) > 0:
artist_id = artist_tags[0].attributes['id'].firstChild.data
if artist_id != MUSICBRAINZ_VARIOUS_ARTISTS:
# add the artist name (as album-artist) to the storage key
nametags = artist_tags[0].getElementsByTagName('name')
if len(nametags) > 0:
artistname = nametags[0].firstChild.data
print("got musicbrainz artist name %s" % artistname)
storekey.add_field('artist', artistname)
# look for an ASIN tag
asin_tags = parsed.getElementsByTagName('asin')
if len(asin_tags) > 0:
asin = asin_tags[0].firstChild.data
print("got ASIN %s" % asin)
image_url = AMAZON_IMAGE_URL % asin
store.store_uri(storekey, RB.ExtDBSourceType.SEARCH, image_url)
else:
print("no ASIN for this release")
callback(*cbargs)
except Exception as e:
print("exception parsing musicbrainz response: %s" % e)
callback(*cbargs)
def try_search_artist_album (self, key, store, callback, *args):
album = key.get_field("album")
artist = key.get_field("artist")
if not album or not artist:
print("artist or album information missing")
callback(*args)
return
query = MUSICBRAINZ_SEARCH_QUERY % (artist.lower(), album.lower())
url = MUSICBRAINZ_SEARCH_URL % (urllib.parse.quote(query, safe=':'),)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
def search(self, key, last_time, store, callback, *args):
key = key.copy() # ugh
album_id = key.get_info("musicbrainz-albumid")
if album_id is None:
print("no musicbrainz release ID for this track")
self.try_search_artist_album(key, store, callback, args)
return
if album_id.startswith(MUSICBRAINZ_RELEASE_PREFIX):
album_id = album_id[len(MUSICBRAINZ_RELEASE_PREFIX):]
if album_id.endswith(MUSICBRAINZ_RELEASE_SUFFIX):
album_id = album_id[:-len(MUSICBRAINZ_RELEASE_SUFFIX)]
print("stripped release ID: %s" % album_id)
url = MUSICBRAINZ_RELEASE_URL % (album_id)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
|
#!/usr/bin/env python
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_SessionCompute_2
import pprint
# connector and config
client = DataStoreClient("mongodb", ConfigDB_SessionCompute_2)
config = ConfigDB_SessionCompute_2
# according to config
dataList = client.getData() # return an array of docs (like a csv reader)
output = []
ONE_HOUR_IN_SECONDS = 3600
if(dataList):
for i in dataList:
contributor_username = i[config.COLUMN]
current_user = contributor_username
start_time = None
end_time = None
duration = None
last_start_timestamp = None
count = 1
if contributor_username:
print "\n\n"
| print contributor_username.encode('utf-8')
while True:
doc = i['data'].next()
if doc is None:
| break;
print doc["timestamp"]
if start_time is None:
start_time = float(doc["timestamp"])
if end_time is None:
end_time = start_time + ONE_HOUR_IN_SECONDS
else:
if float(doc["timestamp"]) <= end_time:
end_time = float(doc["timestamp"]) + ONE_HOUR_IN_SECONDS
count += 1
else:
new_doc = {}
new_doc["start time"] = start_time
new_doc["end time"] = end_time
new_doc["duration"] = (end_time - start_time)
new_doc["edition_counts"] = count
new_doc["contributor_username"] = contributor_username
output.append(new_doc)
start_time = float(doc["timestamp"])
end_time = start_time + ONE_HOUR_IN_SECONDS
count = 1
if start_time:
new_doc = {}
new_doc["start time"] = start_time
new_doc["end time"] = end_time
new_doc["duration"] = (end_time - start_time)
new_doc["edition_counts"] = count
new_doc["contributor_username"] = contributor_username
output.append(new_doc)
pprint.pprint(output)
clientOutput = DataStoreClient("mongodb", ConfigDB_SessionCompute_2)
clientOutput.saveData(output)
# import datetime
# print(
# datetime.datetime.fromtimestamp(
# int("1176585742")
# ).strftime('%Y-%m-%d %H:%M:%S')
# )
# {
# start time:
# end time:
# duration:
# user:
# }
# import time
# timestamp2 = time.mktime(d.timetuple()) # DO NOT USE IT WITH UTC DATE
# datetime.fromtimestamp(timestamp2)
# datetime.datetime(2011, 1, 1, 0, 0)
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software wit | hout restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distri | bute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
from os import environ
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component calling the different backend procedures.
"""
@asyncio.coroutine
def onJoin(self, details):
procs = [u'com.mathservice.add2',
u'com.mathservice.mul2',
u'com.mathservice.div2']
try:
for proc in procs:
res = yield from self.call(proc, 2, 3)
print("{}: {}".format(proc, res))
except Exception as e:
print("Something went wrong: {}".format(e))
self.leave()
def onDisconnect(self):
asyncio.get_event_loop().stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug=False, # optional; log even more details
)
runner.run(Component)
|
#!/usr/bin/env python
'''
Plot distribution of each feature,
conditioned on its bfeature type
'''
import argparse
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from common import *
from information import utils
from scipy.stats import itemfreq
nbins = 100
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('features', type=load_npz,
help='Training data features (npz)')
parser.add_argument('output',
help='Output file with plots (pdf)')
return parser
if __name__ == "__main__":
args = opts().parse_args()
pdf = PdfPages(args.output)
dfs = args.features['ifeatures']
cfs = args.features['ffeatures']
print "Plotting float features"
bfs = args.features['bfeatures']
u = utils.unique_rows(bfs)
indices = [np.all(bfs==ui, axis=-1) for ui in u]
for j, f in enumerate(cfs.T):
print "...ffeature %d" % j
fig = plt.figure()
h = np.zeros(nbins)
not_nan = f[np.logical_not(np.isnan(f))]
f_min = not_nan.min()
f_max = not_nan.max()
x = np.linspace(f_min, f_max, nbins)
dx = (f_max - f_min) / nbins
for idx in indices:
h_new, bins = np.histogram(f[idx], range=(f_min, f_max), bins=nbins)
plt.bar(x, h_new, bott | om=h, width=dx)
| h += h_new
plt.xlim(f_min, f_max)
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('FFeature %d. # NaN = %d' % (j, np.sum(np.isnan(f))))
pdf.savefig(fig)
plt.close()
print "Plotting integer features"
for j, x in enumerate(dfs.T):
print "...dfeature %d" % j
freq = itemfreq(x)
fig = plt.figure()
xu = np.sort(np.unique(x))
h = np.zeros_like(xu)
for idx in indices:
f = itemfreq(x[idx])
h_new = np.zeros_like(h)
h_new[f[:,0]] = f[:,1]
plt.bar(xu, h_new, bottom=h)
h += h_new
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('DFeature %d' % j)
pdf.savefig(fig)
plt.close()
pdf.close()
|
#!/usr/bin/env python3
import os
from pathlib import Path
import numpy as np
from pysisyphus.helpers import geom_from_xyz_file
from pysisyphus.stocastic.align import matched_rmsd
THIS_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
def test_matched_rmsd():
geom1 = geom_from_xyz_file(THIS_DIR / "eins.xyz")
# Calling with the identical geometries should return RMSD of 0.
min_rmsd, (geom1_matched, geom2_matched) = matched_rmsd(geom1, geom1)
np.testing.assert_allclose(min_rmsd, 0.0, at | ol=1e-10)
np.testing.assert_allclose(geom1_matched.coords, geom2_matched.coords)
geom2 = geom_from_xyz_file(THIS_DIR / "zwei.xyz")
min_rmsd, _ = matched_rmsd(geom1, geom2)
np.testing.assert_allclose(min_rmsd, 0.057049, atol=1e-5)
if __name__ | == "__main__":
test_matched_rmsd()
|
ssertTrue(np.isfinite(b[0, 0]))
self.assertTrue(np.isfinite(b[100, 100]))
def test_reproject_stere(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_stere.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps_on_repro_gcps(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2.reproject_gcps()
n1.reproject(n2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps_on_repro_gcps.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape(), n2.shape())
self.assertEqual(type(n1[1]), np.ndarray)
def test_reproject_gcps_resize(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n2 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n1.reproject(n2)
n1.resize(2)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_reproject_gcps_resize.png')
n1.write_figure(tmpfilename, 2, clim='hist')
self.assertEqual(n1.shape()[0], n2.shape()[0] * 2)
self.assertEqual(n1.shape()[1], n2.shape()[1] * 2)
self.assertEqual(type(n1[1]), np.ndarray)
def test_undo(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
shape1 = n1.shape()
n1.resize(10)
n1.undo()
shape2 = n1.shape()
self.assertEqual(shape1, shape2)
def test_write_figure(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure.png')
n1.write_figure(tmpfilename)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_band(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_band.png')
n1.write_figure(tmpfilename, 2)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_clim(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_clim.png')
n1.write_figure(tmpfilename, 3, clim='hist')
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_legend(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_legend.png')
n1.write_figure(tmpfilename, 3, clim='hist', legend=True, titleString="Title String")
self.assertTrue(os.path.exists(tmpfilename))
def test_write_figure_logo(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_figure_logo.png')
n1.write_figure(tmpfilename, 3, clim='hist',
logoFileName=self.test_file_gcps)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_geotiffimage(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_geotiffimage.tif')
n1.write_geotiffimage(tmpfilename)
self.assertTrue(os.path.exists(tmpfilename))
def test_write_geotiffimage_if_band_id_is_given(self):
n1 = Nansat(self.test_file_stere, log_level | =40, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_write_geotiffimage.tif')
n1.write_geotiffimage(tmpfilename, band_id=1)
self.assertTrue(os.path.exists(tmpfilename))
def test_get_metadata(self):
n1 = Nansat(self.test_file_ster | e, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata()
self.assertEqual(type(m), dict)
self.assertTrue('filename' in m)
def test_get_metadata_key(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata('filename')
self.assertEqual(type(m), str)
def test_get_metadata_wrong_key(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
with self.assertRaises(ValueError):
n1.get_metadata('some_crap')
def test_get_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata(band_id=1)
self.assertEqual(type(m), dict)
self.assertTrue('name' in m)
def test_get_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
m = n1.get_metadata(band_id=1)
self.assertEqual(type(m), dict)
self.assertTrue('name' in m)
def test_set_metadata(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal')
m = n1.get_metadata('newKey')
self.assertEqual(m, 'newVal')
def test_set_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal', band_id=1)
m = n1.get_metadata('newKey', 1)
self.assertEqual(m, 'newVal')
def test_set_metadata_band_id(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
n1.set_metadata('newKey', 'newVal', band_id=1)
m = n1.get_metadata('newKey', 1)
self.assertEqual(m, 'newVal')
def test_get_band_number(self):
n1 = Nansat(self.test_file_stere, log_level=40, mapper=self.default_mapper)
self.assertEqual(n1.get_band_number(1), 1)
@unittest.skipUnless(MATPLOTLIB_IS_INSTALLED, 'Matplotlib is required')
def test_get_transect(self):
plt.switch_backend('agg')
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[28.31299128, 28.93691525],
[70.93709219, 70.69646524]],
[str('L_645')])
tmpfilename = os.path.join(self.tmp_data_path, 'nansat_get_transect.png')
plt.plot(t['lat'], t['L_645'], '.-')
plt.savefig(tmpfilename)
plt.close('all')
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields)
self.assertTrue('lon' in t.dtype.fields)
self.assertEqual(type(t['lat']), np.ndarray)
self.assertEqual(type(t['lon']), np.ndarray)
def test_get_transect_outside(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
t = n1.get_transect([[0, 28.31299128], [0, 70.93709219]], [1])
self.assertTrue('L_645' in t.dtype.fields)
self.assertTrue('line' in t.dtype.fields)
self.assertTrue('pixel' in t.dtype.fields)
self.assertTrue('lat' in t.dtype.fields) |
# Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICEN | SE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under | the License.
import subprocess
from setuptools import setup, Extension
classifiers = ["License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Build Tools"]
setup(name="sourcestamp",
version="0.1",
description="Compute timestamp for a source code tree",
author="Daniel Narvaez",
author_email="dwnarvaez@gmail.com",
url="http://github.com/dnarvaez/sourcestamp",
classifiers=classifiers,
ext_modules=[Extension("sourcestamp", ["src/sourcestamp.c"])])
|
config = {
"interfaces": {
"google.cloud.talent.v4beta1.TenantService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"CreateTenant": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetTenant": {
"timeout_millis": 60000,
"retry_codes | _name": "idempotent",
"retry_params_name": "default",
},
"UpdateTenant": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempoten | t",
"retry_params_name": "default",
},
"DeleteTenant": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListTenants": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
},
}
}
}
|
x = raw_inp | ut()
print x[0].upp | er() + x[1:]
|
.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
| attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, | '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
s |
from __future__ import unicode_literals
import uuid
from django.forms import UUIDField, ValidationError
from django.test import SimpleTestCase
class UUIDFieldTest(SimpleTestCase):
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000') |
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as c | m:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
|
# coding=utf-8
import asyncio
import random
import json
import hashlib
import aiohttp
import async_timeout
import sys
class BaiduTranslate:
lang_auto = 'auto'
lang_zh = 'zh'
lang_en = 'en'
timeout = 20
api_addr = 'http://fanyi-api.baidu.com/api/trans/vip/translate'
def __init__(self, loop=None):
self.appid = '20171009000086968'
self.secret = 'vZ36FjnZ91FoLJwe5NrF'
if loop is None:
self.async = False
self.loop = asyncio.get_event_loop()
else:
self.async = True
self.loop = loop
def translate(self, text, from_lang, to_lang):
if self.async:
return self._request(text, from_lang, to_lang)
else:
return self.loop.run_until_complete(self._request(text, from_lang, to_lang))
async def _request(self, text, from_lang, to_lang):
salt = random.randint(0, 2147483647)
sign = self.appid + text + str(salt) + self.secret
sign = hashlib.md5(sign.encode('utf-8')).hexdigest()
params = {'q': text, 'from': from_lang, 'to': to_lang | , 'appid': self.appid, 'salt': salt, 'sign': sign}
async with aiohttp.ClientSession(loop=self.loop) as session:
with async_timeout.timeout(self.timeout, loop=self.loop):
async with session.post(self.api_addr,
data=params) as resp:
body = await resp.read()
res = json.loads(body.decode('utf-8'))
if 'error_code' in res and | res['error_code'] != '52000':
raise RuntimeError(res['error_msg'])
return res['trans_result'][0]['dst']
|
from core.plugins.lib.proxies import MetricProxy, SourceProxy
from core.plugins.lib.models import PluginDataModel
from core.plugins.lib.fields import Field, ListField, DateTimeField, FloatField, IntegerField
from core.plugins.lib.scope import Scope, ZonePerm, BlockPerm
class BaseFitbitModel(PluginDataModel):
metric_proxy = MetricProxy(name="newsfeed")
source_proxy = SourceProxy(name="fitbit")
date = DateTimeField()
value = FloatField()
class StepModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="steps")
class DistanceModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="distance")
class TimeInBedModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="time_in_bed")
class MinutesAsleepModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="minutes_asleep")
class WeightModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="weight")
class SleepEfficiencyModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="sleep_efficiency")
class ActivityCaloriesModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="activity_calories")
class SleepStartTimeModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="sleep_start_time")
value = DateTimeField()
class CaloriesInModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="calories_in")
class CaloriesModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="calories")
class WaterModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="water")
MODEL_DICT = {
"activities/steps": StepModel,
"activities/dist | ance": DistanceModel,
"sleep/timeInBed": TimeInBedModel,
"sleep/minutesAsleep": MinutesAsleepModel,
"body/weight": WeightModel,
"sleep/efficiency": SleepEfficiencyModel,
"activities/activityCalories": ActivityCaloriesModel,
"sleep/startTime": SleepStartTimeModel,
"foods/log/caloriesIn": CaloriesInModel,
"activities | /calories": CaloriesModel,
"foods/log/water": WaterModel
}
|
(self, TypeError, msg)
except:
from sys import exc_info
exc, value, tb = exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
from types import UnicodeType
db = self._get_db()
charset = db.character_set_name()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index,
db.literal(arg))
if isinstance(q, unicode):
q = q.encode(charset)
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range(len(args))]))
if type(q) is UnicodeType:
q = q.encode(charset)
self._query(q)
self._executed = q
if not self._defer_warnings: self._warning_check()
return args
def _do_query(self, q):
db = self._get_db()
self._last_executed = q
db.query(q)
self._do_get_result()
return self.rowcount
def _query(self, q): return self._do_query(q)
def _fetch_row(self, size=1):
if not self._result:
return ()
return self._result.fetch_row(size, self._fetch_type)
def __iter__(self):
return iter(self.fetchone, None)
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
class CursorStoreResultMixIn(object):
"""This is a MixIn class which causes the entire result set to be
stored on the client side, i.e. it uses mysql_store_result(). If the
result set can be very large, consider adding a LIMIT clause to your
query, or using CursorUseResultMixIn instead."""
def _get_result(self): return self._get_db().store_result()
def _query(self, q):
rowcount = self._do_query(q)
self._post_get_result()
return rowcount
def _post_get_result(self):
self._rows = self._fetch_row(0)
self._result = None
def fetchone(self):
"""Fetches a single row from the cursor. None indicates that
no more rows are available."""
self._check_executed()
if self.rownumber >= len(self._rows): return None
result = self._rows[self.rownumber]
self.rownumber = self.rownumber+1
return result
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position according
to mode.
If mode is 'relative' (default), value is taken as offset to
the current position in the result set, if set to 'absolute',
value states an absolute target position."""
self._check_executed()
i | f mode == 'relative':
r = self.rownumber + value
| elif mode == 'absolute':
r = value
else:
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % `mode`)
if r < 0 or r >= len(self._rows):
self.errorhandler(self, IndexError, "out of range")
self.rownumber = r
def __iter__(self):
self._check_executed()
result = self.rownumber and self._rows[self.rownumber:] or self._rows
return iter(result)
class CursorUseResultMixIn(object):
"""This is a MixIn class which causes the result set to be stored
in the server and sent row-by-row to client side, i.e. it uses
mysql_use_result(). You MUST retrieve the entire result set and
close() the cursor before additional queries can be peformed on
the connection."""
_defer_warnings = True
def _get_result(self): return self._get_db().use_result()
def fetchone(self):
"""Fetches a single row from the cursor."""
self._check_executed()
r = self._fetch_row(1)
if not r:
self._warning_check()
return None
self.rownumber = self.rownumber + 1
return r[0]
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
r = self._fetch_row(0)
self.rownumber = self.rownumber + len(r)
self._warning_check()
return r
def __iter__(self):
return self
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
class CursorTupleRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as tuples,
which is the standard form required by DB API."""
_fetch_type = 0
class CursorDictRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as
dictionaries. This is a non-standard feature."""
_fetch_type = 1
def fetchoneDict(self):
"""Fetch a single row as a dictionary. Deprecated:
Use fetchone() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchoneDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchone()
def fetchmanyDict(self, size=None):
"""Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. |
from django.contrib import admin
from holidays.models import (Holiday, StaticHoliday,
NthXDayHoliday, NthXDayAfterHoliday, CustomHoliday)
class HolidayAdmin(admin.ModelAdmin):
| pass
class StaticHolidayAdmin(admin.ModelAdmin):
pass
class NthXDayHolidayAdmin(admin.Mod | elAdmin):
pass
class NthXDayAfterHolidayAdmin(admin.ModelAdmin):
pass
class CustomHolidayAdmin(admin.ModelAdmin):
pass
admin.site.register(Holiday, HolidayAdmin)
admin.site.register(StaticHoliday, StaticHolidayAdmin)
admin.site.register(NthXDayHoliday, NthXDayHolidayAdmin)
admin.site.register(NthXDayAfterHoliday, NthXDayAfterHolidayAdmin)
admin.site.register(CustomHoliday, CustomHolidayAdmin)
|
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.decorators import user_passes_test
from django.utils.importlib import import_module
from djangae.core.paginator import EmptyPage, PageNotAnInteger
from djangae.core.paginator import DatastorePaginator as Paginator
from google.appengine.ext import db
from google.appengine.ext.deferred import defer
from .models import Error, Event
import calendar
def get_permission_decorator():
if getattr(settings, 'CENTAUR_PERMISSION_DECORATOR', None):
module, decorator = settings.CENTAUR_PERMISSION_DECORATOR.rsplit('.', 1)
return getattr(import_module(module), decorator)
return user_passes_test(lambda u: u.is_superuser)
permission_decorator = get_permission_decorator()
def timestamp(datetime):
""" Returns UTC timestamp, this is included in python3 but not 2"""
return calendar.timegm(datetime.timetuple())
@permission_decorator
def index(request):
errors = Error.objects.all()
# Filter by user email
if request.GET.get('user', None):
errors_pks = [e.error.pk for e in Event.objects.filter(logged_in_user_email=request.GET.get('user'))]
errors = errors.filter(pk__in=errors_pks)
errors = errors.order_by("-last_event")
page = request.GET.get('page', 1)
paginator = Paginator(errors, 20)
try:
errors = paginator.page(page)
except PageNotAnInteger:
errors = paginator.page(1)
except EmptyPage:
errors = paginator.page(paginator.num_pages)
return render(request, "centaur/index.html", {"errors": errors})
@permission_decorator
def error(request, error_id, limit=200):
error = get_object_or_404(Error, pk=error_id)
events = error.events.all().order_by("-created")[:limit]
series = [
timestamp(event.created.replace(minute=0, second=0, microsecond=0))
for event in events
]
page = request.GET.get('page', 1)
paginator = Paginator(events, 1)
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginato | r.page(paginator.num_pages)
return render(request, "centaur/error.html", {
"error": error,
"events": events,
"series": series,
})
CLEANUP_QUEUE = getattr(settings, 'QUEUE_FOR_EVENT_CLEANUP', 'default')
@permission_decorator
def clear_old_events(request):
defer(_clear_old_events, _queue=CL | EANUP_QUEUE)
return HttpResponse("OK. Cleaning task deferred.")
EVENT_BATCH_SIZE = 400
ERROR_UPDATE_BATCH_SIZE = 50
def _update_error_count(error_id, events_removed):
@db.transactional(xg=True)
def txn():
_error = Error.objects.get(pk=error_id)
_error.event_count -= events_removed
_error.save()
txn()
def _clear_old_events():
from google.appengine.api.datastore import Query, Delete, Get
query = Query("centaur_event", keys_only=True)
query["created <= "] = timezone.now() - timedelta(days=30)
old_event_keys = list(query.Run(limit=EVENT_BATCH_SIZE))
old_events = filter(None, Get(old_event_keys))
errors = {}
for event in old_events:
data = errors.setdefault(event['error_id'], {'count': 0, 'event_keys':[]})
data['count'] += 1
data['event_keys'].append(event.key())
to_delete = []
for error_id, data in errors.items()[:ERROR_UPDATE_BATCH_SIZE]:
# Each event might be for a different error and while we can delete hundreds of events, we
# probably don't want to defer hundreds of tasks, so we'll only delete events from a handful of distinct events.
defer(_update_error_count, error_id, data['count'], _queue=CLEANUP_QUEUE)
to_delete.extend(data['event_keys'])
Delete(to_delete)
if len(old_event_keys) == EVENT_BATCH_SIZE or len(to_delete) < len(old_events):
# In case we didn't clear everything, run again to find more old events.
defer(_clear_old_events, _queue=CLEANUP_QUEUE)
|
import functools
import pfp.interp
def native(name, ret, interp=None, send_interp=False):
"""Used as a decorator to add the decorated function to the
pfp interpreter so that it can be used from within scripts.
:param str name: The name of the function as it will be exposed in template scripts.
:param pfp.fields.Field ret: The return type of the function (a class)
:param pfp.interp.PfpInterp interp: The specific interpreter to add the function to
:param bool send_interp: If the current interpreter should be passed to the function.
Examples:
The example below defines a ``Sum`` function that will return the sum of
all parameters passed to the function: ::
from pfp.fields import PYVAL
@native(name="Sum", ret=pfp.fields.Int64)
def sum_numbers(params, ctxt, scope, stream, coord):
res = 0
for param in params:
res += PYVAL(param)
return res
The code below is the code | for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it
requires that the interpreter be sent as a parameter: ::
@native(name="Int | 3", ret=pfp.fields.Void, send_interp=True)
def int3(params, ctxt, scope, stream, coord, interp):
if interp._no_debug:
return
if interp._int3:
interp.debugger = PfpDbg(interp)
interp.debugger.cmdloop()
"""
def native_decorator(func):
@functools.wraps(func)
def native_wrapper(*args, **kwargs):
return func(*args, **kwargs)
pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp)
return native_wrapper
return native_decorator
def predefine(template):
pfp.interp.PfpInterp.add_predefine(template)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
plot the results from the files igraph_degree_assort_study and degree_assortativity
'''
from igraph import *
import os
import numpy as np
import matplotlib.pyplot as plt
#########################
IN_DIR = '/home/sscepano/Projects7s/Twitter-workspace/ALL_SR'
img_out_plot = "7MOda_unweighted.png"
###### | ###################
#########################
# read from a file the res
#########################
def read_in_res():
f = open('7MODeg_assort_study.weighted_edge_list', 'r')
DA = []
TH = []
for line in f:
if line.startswith('stats for'):
th = float(line.split()[-1])
TH.append(th)
if line.startswith('The network is'):
da = float(line.split()[-1])
DA.append(da)
th_las | t = th
f2 = open('plot_da_0.2.txt', 'r')
for line in f2:
(th, da) = line.split()
th = float(th)
if th < th_last:
continue
da = float(da)
TH.append(th)
DA.append(da)
f3 = open('DA_SR_th.tab', 'w')
for i in range(len(TH)):
f3.write(str(TH[i]) + '\t' + str(DA[i]) + '\n')
return TH, DA
def plot_DA(xaxis, da):
x = np.array(xaxis)
y = np.array(da)
plt.plot(x, y, 'c')
plt.grid(True)
plt.title('SR network')
#plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure)
plt.ylabel('degree assortativity')
plt.xlabel('SR threshold')
plt.savefig(img_out_plot,format='png',dpi=200)
def main():
os.chdir(IN_DIR)
x, DA = read_in_res()
plot_DA(x, DA)
main() |
from dungeon.dungeon import Dungeon
def main():
testdungeon = Dungeon('level1.txt')
pr | int(testdungeon)
if main.__name__ == '__mai | n__':
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from getpass import getpass
from datetime import datetime
import pprint
from docopt import docopt
import requests
from .spelling import spellchecker
from .dispatch import dispatch
from .utils import login, validate_username
from .exceptions import ConnectionErrorException
PY3 = sys.version > '3'
if PY3:
pass
else:
input = raw_input
if sys.version < '3':
from urlparse import urljoin
else:
from urllib.parse import urljoin
GITHUB_USERS = 'https://api.github.com/users/'
def parse_respect_args(args):
'''
Respect
Usage:
respect <username> [--repos=<rep>] [--followers=<foll>] [--language=<lang>]
respect <username> bio
respect <username> stars [--verbose]
respect <username> repos [--verbose] [--language=<lang>]
respect -h | --help
Options:
-h, --help Shows this help information.
-v, --verbose Prints detailed information.
-r <rep> --repos <rep> Number of repositories [default: ].
-f <foll> --followers <foll> Number of followers [default: ].
-l <lang> --language <lang> Language name [default: ].
'''
args = docopt(parse_respect_args.__doc__, argv=args)
return args
def main():
"""
Main entry point for the `respect` command.
"""
| args = parse_respect_args(sys.argv[1:])
if validate_username(args['<username>']):
print("processing...")
else:
print("@"+args['<username>'], "is not a valid username.")
print("Username may only contain alphanumeric ASCII characters or "
"dashes and cannot begin with a dash.")
return
try:
| r = requests.get(urljoin(GITHUB_USERS, args['<username>']))
except ConnectionErrorException as e:
print('Connection Error from requests. Request again, please.')
print(e)
if r.status_code == 404 or r.status_code == 403:
session = login(401, args=args)
return dispatch(args, r, session)
elif r.status_code == 200:
return dispatch(args, response=r)
else:
raise UnknownStausCodeException
if __name__ == '__main__':
main()
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .base import TrackStatsHookBase
class ModelHookManager:
"""
This class registers and manages a set of hooks of subclassed from
`TrackStatsHookBase`. The given hook is registered on all modules within
'named_modules'.
Tracking is started and stopped for all hooks via `self.start_tracking()` and
`self.stop_tracking()`. Alternatively, this class can be used a context manager to
automate these calls. For example,
```
with hook_manager as hooks:
... # Train here
stats = hooks.get_statitics()
```
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function that
takes a name and module as inputs and then outputs a dictionary of
arguments to pass to the hook
"""
def __init__(
self,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
assert hook_type in ["forward", "backward", "pre_forward"]
assert issubclass(hook_class, TrackStatsHookBase)
# Register the hooks via class method.
tracked_vals = self.register_storage_hooks(named_modules,
hook_class=hook_class,
hook_type=hook_type,
hook_args=hook_args)
# These are the functions that called every forward or backward pass.
self.hooks = tracked_vals[0]
# These are handles to the hooks; PyTorch lets the user unregister
# hooks through these handles.
self._hook_handles = tracked_vals[1]
# These are the filtered modules that will be tracked.
self.tracked_modules = tracked_vals[2]
# Keep track of whether tracking is on.
self._tracking = False
@property
def tracking(self):
return self._tracking
def __enter__(self):
"""Start tracking when `with` is called."""
self.start_tracking()
return self
def __exit__(self, *args):
"""Stop tracking when `with` block is left."""
self.stop_tracking()
@classmethod
def register_storage_hooks(
cls,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
"""
Register hook on each module in 'named_modules'.
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function
that takes a name and module as inputs and then outputs a
dictionary of arguments to pass to the hook
"""
assert hook_type in ["forward", "backward", "pre_forward"]
hooks = []
handles = []
tracked_modules = dict()
# Register hooks on the modules.
for n, m in named_modules.items():
if callable(hook_args):
args = hook_args(n, m)
else:
args = hook_args or {}
hook = hook_class(name=n, **args)
if hook_type == "forward":
handle = m.register_forward_hook(hook)
elif hook_type == "pre_forward":
handle = m.register_forward_pre_hook(hook)
else:
handle = m.register_backward_hook( | hook)
hooks.append(hook)
handles.append(handle)
tracked_modules[n] = m
return hooks, handles, tracked_modules
def start_tracking(self):
self._tracking = True
for hook in self.hooks:
hook.start_tracking()
| def stop_tracking(self):
self._tracking = False
for hook in self.hooks:
hook.stop_tracking()
def get_statistics(self):
"""
This returns a generator with elements
`(name, module, statistic_0, ..., statistic_n)`.
"""
return (
(name, module, *hook.get_statistics())
for (name, module), hook in zip(self.tracked_modules.items(), self.hooks)
)
def remove_hooks(self):
"""
Remove all hooks from the model and stop tracking statistics.
"""
for handle in self._hook_handles:
handle.remove()
self.hooks = []
self._hook_handles = []
self.tracked_modules = dict()
|
# -*- encoding: utf-8 -*-
__author__ = 'pp'
__date__ = '6/25/14'
"""
georest.view.utils
~~~~~~~~~~~~~~~~~
helper/mixin things for views
"""
import sys
from functools import wraps
from flask import request
from .exceptions import InvalidRequest
from ..geo import GeoException
def get_json_content():
"""check content type and return raw text instrad of json data"""
if request.mimetype != 'application/json':
raise InvalidRequest('Only "application/json" supported')
try:
data = request.data.decode('utf-8')
# data = request | .get_data().decode('utf-8')
except UnicodeError:
raise InvalidRequest('Cannot decode content with utf-8')
return data
def get_if_match():
"""get if_match etag from request"""
etag = None
if request.if_match and not request.if_match.star_tag:
| try:
etag, = request.if_match.as_set() # only 1 allowed
except ValueError:
raise InvalidRequest('Cannot process if_match %s' % \
request.if_match)
return etag
def catcher(f):
"""catching uncatched errors, and filling the traceback"""
@wraps(f)
def decorator(*args, **kwargs):
try:
return f(*args, **kwargs)
except GeoException as e:
if not e.traceback:
e.traceback = sys.exc_info()[2]
raise
return decorator
|
import sys
from sim import Sim
from node import Node
from link import Link
from transport import Transport
from tcp import TCP
from network import Network
import optparse
import os
import subprocess
class AppHandler(object):
def __init__(self,filename, directory):
self.filename = filename
self.directory = directory
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.f = open("%s/%s" % (self.directory,self.filename),'w')
def receive_data(self,data):
Sim.trace('AppHandler',"application got %d bytes" % (len(data)))
self.f.write(data)
self.f.flush()
class Main(object):
def __init__(self):
self.iterations = 1 #set from flags
self.out_directory = '../output/received'
self.in_directory = '../data'
self.parse_options()
print self.filename
# self.total = 0.0;
for i in range(0, self.iterations):
self.run()
self.diff()
# for windowSize in [1000]:#, 2000, 5000, 10000, 15000, 20000]:
# print "--Results with window size " + str(windowSize)
#self.window = windowSize
# self.run()
# print "Average over " + str(iterations) + " iterations: " + str(self.total / float(iterations))
def parse_options(self):
parser = optparse.OptionParser(usage = "%prog [options]",
version = "%prog 0.1")
parser.add_option("-f","--filename",type="str",dest="filename",
default='test.txt',
| help="filename to send")
parser.add_option("-l","--loss",type="float",dest="loss",
default=0.0,
help="random loss rate")
parser.add_option("-w","--window",type="int",dest="window",
default=1000,
help="transmission window size")
parser.add_option("-i","--iterations",type="int",dest="iterations",
| default=1,
help="number of iterations to run")
(options,args) = parser.parse_args()
self.filename = options.filename
self.loss = options.loss
self.window = options.window
self.iterations = options.iterations
def diff(self):
args = ['diff','-u',self.in_directory + '/' + self.filename,self.out_directory+'/'+self.filename]
result = subprocess.Popen(args,stdout = subprocess.PIPE).communicate()[0]
print
if not result:
print "File transfer correct!"
else:
print "File transfer failed. Here is the diff:"
print
print result
sys.exit()
def run(self):
# parameters
Sim.scheduler.reset()
Sim.set_debug('AppHandler')
Sim.set_debug('TCP')
# setup network
net = Network('../networks/setup.txt')
net.loss(self.loss)
# setup routes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n1.add_forwarding_entry(address=n2.get_address('n1'),link=n1.links[0])
n2.add_forwarding_entry(address=n1.get_address('n2'),link=n2.links[0])
# setup transport
t1 = Transport(n1)
t2 = Transport(n2)
# setup application
a = AppHandler(self.filename, self.out_directory)
# setup connection
c1 = TCP(t1,n1.get_address('n2'),1,n2.get_address('n1'),1,a,window=self.window)
c2 = TCP(t2,n2.get_address('n1'),1,n1.get_address('n2'),1,a,window=self.window)
# send a file
with open(self.in_directory + '/' + self.filename,'r') as f:
while True:
data = f.read(10000)
if not data:
break
Sim.scheduler.add(delay=0, event=data, handler=c1.send)
# run the simulation
Sim.scheduler.run()
# print str(self.window) + " & " + \
# str(Sim.scheduler.current_time()) + " & " + \
# str(4116160.0 / float(Sim.scheduler.current_time())) + " & " + \
# str(c2.totalQueueingDelay / float(c1.totalPacketsSent)) + " \\\\"
# print str(self.window) + "," + str(4116160.0 / float(Sim.scheduler.current_time()))
print str(self.window) + "," + str(c2.totalQueueingDelay / float(c1.totalPacketsSent))
# print "Ave Queueing Delay: " + str(c2.totalQueueingDelay / float(c1.totalPacketsSent))
# print "Throughput: " + str(4116160.0 / float(Sim.scheduler.current_time()))
# self.total += Sim.scheduler.current_time()
if __name__ == '__main__':
m = Main()
|
y.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.TryMerge(d)
def _CMergeFromString(self, s):
raise AbstractMethod
def __getstate__(self):
return self.Encode()
def __setstate__(self, contents_):
self.__init__(contents=contents_)
def sendCommand(self, server, url, response, follow_redirects=1,
secure=0, keyfile=None, certfile=None):
data = self.Encode()
if secure:
if keyfile and certfile:
conn = httplib.HTTPSConnection(server, key_file=keyfile,
cert_file=certfile)
else:
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.putrequest("POST", url)
conn.putheader("Content-Length", "%d" %len(data))
conn.endheaders()
conn.send(data)
resp = conn.getresponse()
if follow_redirects > 0 and resp.status == 302:
m = URL_RE.match(resp.getheader('Location'))
if m:
protocol, server, url = m.groups()
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects - 1,
secure=(protocol == 'https'),
keyfile=keyfile,
certfile=certfile)
if resp.status != 200:
raise ProtocolBufferReturnError(resp.status)
if response is not None:
response.ParseFromString(resp.read())
return response
def sendSecureCommand(self, server, keyfile, certfile, url, response,
follow_redirects=1):
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects,
secure=1, keyfile=keyfile, certfile=certfile)
def __str__(self, prefix="", printElemNumber=0):
raise AbstractMethod
def ToASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII)
def ToCompactASCII(self):
return self._CToASCII(ProtocolMessage._NUMERIC_ASCII)
def ToShortASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII)
_NUMERIC_ASCII = 0
_SYMBOLIC_SHORT_ASCII = 1
_SYMBOLIC_FULL_ASCII = 2
def _CToASCII(self, output_format):
raise AbstractMethod
def ParseASCII(self, ascii_string):
raise AbstractMethod
def ParseASCIIIgnoreUnknown(self, ascii_string):
raise AbstractMethod
def Equals(self, other):
raise AbstractMethod
def __eq__(self, other):
if other.__class__ is self.__class__:
return self.Equals(other)
return NotImplemented
def __ne__(self, other):
if other.__class__ is self.__class__:
return not self.Equals(other)
return NotImplemented
def Output(self, e):
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferEncodeError, '\n\t'.join(dbg)
self.OutputUnchecked(e)
return
def OutputUnchecked(self, e):
raise AbstractMethod
def OutputPartial(self, e):
raise AbstractMethod
def Parse(self, d):
self.Clear()
self.Merge(d)
return
def Merge(self, d):
self.TryMerge(d)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
return
def TryMerge(self, d):
raise AbstractMethod
def CopyFrom(self, pb):
if (pb == self): return
self.Clear()
self.MergeFrom(pb)
def MergeFrom(self, pb):
raise AbstractMethod
def lengthVarInt32(self, n):
return self.lengthVarInt64(n)
def lengthVarInt64(self, n):
if n < 0:
return 10
result = 0
while 1:
result += 1
n >>= 7
if n == 0:
break
return result
def lengthString(self, n):
return self.lengthVarInt32(n) + n
def DebugFormat(self, value):
return "%s" % value
def DebugFormatInt32(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed32(value)
return "%d" % value
def DebugFormatInt64(self, value):
if (value <= -20000000000000 or value >= 20000000000000):
return self.DebugFormatFixed64(value)
return "%d" % value
def DebugFormatString(self, value):
def escape(c):
o = ord(c)
if o == 10: return r"\n"
if o == 39: return r"\'"
if o == 34: return r'\"'
if o == 92: return r"\\"
if o >= 127 or o < 32: return "\\%03o" % o
return c
return '"' + "".join([escape(c) for c in value]) + '"'
def DebugFormatFloat(self, value):
return "%ff" % value
def DebugFormatFixed32(self, value):
if (value < 0): value += (1L<<32)
return "0x%x" % value
def DebugFormatFixed64(self, value):
if (value < 0): value += (1L<<64)
return "0x%x" % value
def DebugFormatBool(self, value):
if value:
return "true"
else:
return "false"
class Encoder:
NUMERIC = 0
DOUBLE = 1
STRING = 2
STARTGROUP = 3
ENDGROUP = 4
FLOAT = 5
MAX_TYPE = 6
def __init__(self):
self.buf = array.array('B')
return
def buffer(self):
return self.buf
def put8(self, v):
if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big"
self.buf.append(v & 255)
return
def put16(self, v):
if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big"
self | .buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
return
def put32(self, v):
if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big"
self.buf.append((v | >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
return
def put64(self, v):
if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
self.buf.append((v >> 32) & 255)
self.buf.append((v >> 40) & 255)
self.buf.append((v >> 48) & 255)
self.buf.append((v >> 56) & 255)
return
def putVarInt32(self, v):
buf_append = self.buf.append
if v & 127 == v:
buf_append(v)
return
if v >= 0x80000000 or v < -0x80000000:
raise ProtocolBufferEncodeError, "int32 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarInt64(self, v):
buf_append = self.buf.append
if v >= 0x8000000000000000 or v < -0x8000000000000000:
raise ProtocolBufferEncodeError, "int64 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarUint64(self, v):
buf_append = self.buf.append
if v < 0 or v >= 0x10000000000000000:
raise ProtocolBufferEncodeError, "uint64 too big"
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putFloat(self, v):
a = array.array('B')
a.fromstring(struct.pack("<f", v))
self.buf.extend(a)
return
def putDouble(self, v):
a = array.array('B')
a.fromstring(struct.pack("<d", v))
self.buf.extend(a)
return
def putBoolean(self, v):
if v:
self.buf.append(1)
else:
self.buf.append(0)
return
def putPrefixedString(self, v):
v = str(v)
self.putVarInt32(len(v))
self.buf.fromstring(v)
return
def putRawString(self, v):
self.buf.fromstring(v)
class Decoder:
def __init__(self, buf, idx, limit):
self.buf = buf
self.idx = idx
self.limit = limit
return
def avail(self):
return self.limit - self.idx
def buffer(self):
return self.buf
def pos(self):
return self.idx
def skip(self, n):
if self.idx + n > self.limit: raise ProtocolBufferDecodeError, "truncated"
self.idx += n
return
def skipData(self, tag):
t = tag & 7
if t == Encoder.NUMERIC:
self.getVarInt64()
elif t == Encod |
"""
Copyright (C) 2017, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the | Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along | with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import time
import shutil
import argparse
import configparser
import operator
import itertools
import struct
import numpy as np
import pandas as pd
import tensorflow as tf
import model.yolo2.inference as inference
import utils
def transpose_weights(weights, num_anchors):
ksize1, ksize2, channels_in, _ = weights.shape
weights = weights.reshape([ksize1, ksize2, channels_in, num_anchors, -1])
coords = weights[:, :, :, :, 0:4]
iou = np.expand_dims(weights[:, :, :, :, 4], -1)
classes = weights[:, :, :, :, 5:]
return np.concatenate([iou, coords, classes], -1).reshape([ksize1, ksize2, channels_in, -1])
def transpose_biases(biases, num_anchors):
biases = biases.reshape([num_anchors, -1])
coords = biases[:, 0:4]
iou = np.expand_dims(biases[:, 4], -1)
classes = biases[:, 5:]
return np.concatenate([iou, coords, classes], -1).reshape([-1])
def transpose(sess, layer, num_anchors):
v = next(filter(lambda v: v.op.name.endswith('weights'), layer))
sess.run(v.assign(transpose_weights(sess.run(v), num_anchors)))
v = next(filter(lambda v: v.op.name.endswith('biases'), layer))
sess.run(v.assign(transpose_biases(sess.run(v), num_anchors)))
def main():
model = config.get('config', 'model')
cachedir = utils.get_cachedir(config)
with open(os.path.join(cachedir, 'names'), 'r') as f:
names = [line.strip() for line in f]
width, height = np.array(utils.get_downsampling(config)) * 13
anchors = pd.read_csv(os.path.expanduser(os.path.expandvars(config.get(model, 'anchors'))), sep='\t').values
func = getattr(inference, config.get(model, 'inference'))
with tf.Session() as sess:
image = tf.placeholder(tf.float32, [1, height, width, 3], name='image')
func(image, len(names), len(anchors))
tf.contrib.framework.get_or_create_global_step()
tf.global_variables_initializer().run()
prog = re.compile(r'[_\w\d]+\/conv(\d*)\/(weights|biases|(BatchNorm\/(gamma|beta|moving_mean|moving_variance)))$')
variables = [(prog.match(v.op.name).group(1), v) for v in tf.global_variables() if prog.match(v.op.name)]
variables = sorted([[int(k) if k else -1, [v for _, v in g]] for k, g in itertools.groupby(variables, operator.itemgetter(0))], key=operator.itemgetter(0))
assert variables[0][0] == -1
variables[0][0] = len(variables) - 1
variables.insert(len(variables), variables.pop(0))
with tf.name_scope('assign'):
with open(os.path.expanduser(os.path.expandvars(args.file)), 'rb') as f:
major, minor, revision, seen = struct.unpack('4i', f.read(16))
tf.logging.info('major=%d, minor=%d, revision=%d, seen=%d' % (major, minor, revision, seen))
for i, layer in variables:
tf.logging.info('processing layer %d' % i)
total = 0
for suffix in ['biases', 'beta', 'gamma', 'moving_mean', 'moving_variance', 'weights']:
try:
v = next(filter(lambda v: v.op.name.endswith(suffix), layer))
except StopIteration:
continue
shape = v.get_shape().as_list()
cnt = np.multiply.reduce(shape)
total += cnt
tf.logging.info('%s: %s=%d' % (v.op.name, str(shape), cnt))
p = struct.unpack('%df' % cnt, f.read(4 * cnt))
if suffix == 'weights':
ksize1, ksize2, channels_in, channels_out = shape
p = np.reshape(p, [channels_out, channels_in, ksize1, ksize2]) # Darknet format
p = np.transpose(p, [2, 3, 1, 0]) # TensorFlow format (ksize1, ksize2, channels_in, channels_out)
sess.run(v.assign(p))
tf.logging.info('%d parameters assigned' % total)
remaining = os.fstat(f.fileno()).st_size - f.tell()
transpose(sess, layer, len(anchors))
saver = tf.train.Saver()
logdir = utils.get_logdir(config)
if args.delete:
tf.logging.warn('delete logging directory: ' + logdir)
shutil.rmtree(logdir, ignore_errors=True)
os.makedirs(logdir, exist_ok=True)
model_path = os.path.join(logdir, 'model.ckpt')
tf.logging.info('save model into ' + model_path)
saver.save(sess, model_path)
if args.summary:
path = os.path.join(logdir, args.logname)
summary_writer = tf.summary.FileWriter(path)
summary_writer.add_graph(sess.graph)
tf.logging.info('tensorboard --logdir ' + logdir)
if remaining > 0:
tf.logging.warn('%d bytes remaining' % remaining)
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('file', help='Darknet .weights file')
parser.add_argument('-c', '--config', nargs='+', default=['config.ini'], help='config file')
parser.add_argument('-d', '--delete', action='store_true', help='delete logdir')
parser.add_argument('-s', '--summary', action='store_true')
parser.add_argument('--logname', default=time.strftime('%Y-%m-%d_%H-%M-%S'), help='the name of TensorBoard log')
parser.add_argument('--level', default='info', help='logging level')
return parser.parse_args()
if __name__ == '__main__':
args = make_args()
config = configparser.ConfigParser()
utils.load_config(config, args.config)
if args.level:
tf.logging.set_verbosity(args.level.upper())
main()
|
model_search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.response-format?" + \
"[q=search term&" + \
"fq=filter-field:(filter-term)&additional-params=values]" + \
"&api-key=9key"
"""http://api.nytimes.com/svc/search/v2/articlesearch.json?q=terrorism+OR+terrorist
&begin_date=19900102&end_date=19900103&sort=newest&api-key=
key"""
search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.json?" + \
"[q=terror]" + \
"&api-key=key"
precise_search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.json"
terms = "?q=terrorism+OR+terrorist"
api = "&api-key=key"
print(precise_search+terms+dates+api)
"""
aggressive for looping in order to overcome the ten article limit. instead search each key word PER JOUR, and then concat the jsons into a nice pandas dataframe, and then eventually a csv.
"""
months_list = ["%.2d" % i for i in range(1,2)]
days_list = ["%.2d" % i for i in range(1,32)]
json_files = []
print(months_list)
for x in months_list:
month_s = x
month_e = x
for y in days_list:
day_s = y
day_e = str(int(y)+1).zfill(2)
year_s = "1990"
year_e = "1990"
start = year_s + month_s + day_s
end = year_e + month_e + day_e
dates = "&begin_date="+start+"&end_date="+end+"&sort=newest"
#print(start + " "+end + "\n" +dates)
r = requests.get(precise_search+te | rms+dates+api)
original_json = json.loads(r.text)
response_json = original_json['response']
json_file = response_json['docs']
json_files.append(json_file)
frames = []
for x in json_files:
df = pd.DataFrame.from_dict(x)
frames.append(df)
#print(frames)
re | sult = pd.concat(frames)
result
|
import os
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag()
def custom_css():
theme_path = os.path.join(
settings.MEDIA_ROOT,
"overrides.css"
)
if os.path.exists(theme_path):
return mark_safe(
'<link rel="stylesheet" type="text/css" href="{}" />'.format(
os.path.join(settings.MEDIA_URL, "overrides.css")
)
)
| return ""
@register.simple_tag()
def custom_js():
theme_path = os.path.join(
settings.MEDIA_ROOT,
"override | s.js"
)
if os.path.exists(theme_path):
return mark_safe(
'<script src="{}"></script>'.format(
os.path.join(settings.MEDIA_URL, "overrides.js")
)
)
return ""
|
constants import *
def case3(output=True):
accuracy_in_each_turn = list()
precision_in_each_turn_spam = list()
recall_in_each_turn_spam = list()
precision_in_each_turn_ham = list()
recall_in_each_turn_ham = list()
m = np.loadtxt(open("resources/normalized_data.csv","rb"),delimiter=',')
shuffled = np.random.permutation(m)
valid.validate_cross_validation(NUMBER_OF_ROUNDS,TRAIN_TEST_RATIO)
# equiprobable priors
prior_spam = 0.5
prior_ham = 0.5
for i in xrange(NUMBER_OF_ROUNDS):
# we're using cross-validation so each iteration we take a different
# slice of the data to serve as test set
train_set,test_set = prep.split_sets(shuffled,TRAIN_TEST_RATIO,i)
#parameter estimation
#but now we take ALL attributes into consideration
sample_means_word_spam = list()
sample_means_word_ham = list()
sample_variances_word_spam = list()
sample_variances_word_ham = list()
# all but the last one
for attr_index in xrange(57):
sample_means_word_spam.append(nb.take_mean_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_means_word_ham.append(nb.take_mean_ham(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_spam.append(nb.take_variance_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_ham.append(nb.take_variance_ham(train_set,attr_index,SPAM_ATTR_INDEX))
#sample standard deviations from sample variances
sample_std_devs_spam = map(lambda x: x ** (1/2.0), sample_variances_word_spam)
sample_std_devs_ham = map(lambda x: x ** (1/2.0), sample_variances_word_ham)
hits = 0.0
misses = 0.0
#number of instances correctly evaluated as spam
correctly_is_spam = 0.0
#total number of spam instances
is_spam = 0.0
#total number of instances evaluated as spam
guessed_spam = 0.0
#number of instances correctly evaluated as ham
correctly_is_ham = 0.0
#total number of ham instances
is_ham = 0.0
#total number of instances evaluated as ham
guessed_ham = 0.0
# now we test the hypothesis against the test set
for row in test_set:
# ou seja, o produto de todas as prob. condicionais das palavras dada a classe
# eu sei que ta meio confuso, mas se olhar com cuidado eh bonito fazer isso tudo numa linha soh! =)
product_of_all_conditional_probs_spam = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_spam[cur], sample_std_devs_spam[cur]).pdf(row[CASE_2_ATTRIBUTE_INDEXES[cur]]) , xrange(10), 1)
# nao precisa dividir pelo termo de normalizacao pois so queremos saber qual e o maior!
posterior_spam = prior_spam * product_of_all_conditional_probs_spam
product_of_all_conditional_probs_ham = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_ham[cur], sample_std_devs_ham[cur]).pdf(row[CASE_2_ATTRIBUTE_INDEXES[cur]]) , xrange(10), 1)
posterior_ham = prior_ham * product_of_all_conditional_probs_ham
# whichever is greater - that will be our prediction
if posterior_spam > posterior_ham:
guess = 1
else:
guess = 0
if(row[SPAM_ATTR_INDEX] == guess):
hits += 1
else:
misses += 1
# we'll use these to calculate metrics
if (row[SPAM_ATTR_INDEX] = | = 1 ):
is_spam += 1
|
if guess == 1:
guessed_spam += 1
correctly_is_spam += 1
else:
guessed_ham += 1
else:
is_ham += 1
if guess == 1:
guessed_spam += 1
else:
guessed_ham += 1
correctly_is_ham += 1
#accuracy = number of correctly evaluated instances/
# number of instances
#
#
accuracy = hits/(hits+misses)
#precision_spam = number of correctly evaluated instances as spam/
# number of spam instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_spam == 0):
precision_spam = 0
else:
precision_spam = correctly_is_spam/is_spam
#recall_spam = number of correctly evaluated instances as spam/
# number of evaluated instances como spam
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_spam == 0):
recall_spam = 0
else:
recall_spam = correctly_is_spam/guessed_spam
#precision_ham = number of correctly evaluated instances as ham/
# number of ham instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_ham == 0):
precision_ham = 0
else:
precision_ham = correctly_is_ham/is_ham
#recall_ham = number of correctly evaluated instances as ham/
# number of evaluated instances como ham
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_ham == 0):
recall_ham = 0
else:
recall_ham = correctly_is_ham/guessed_ham
accuracy_in_each_turn.append(accuracy)
precision_in_each_turn_spam.append(precision_spam)
recall_in_each_turn_spam.append(recall_spam)
precision_in_each_turn_ham.append(precision_ham)
recall_in_each_turn_ham.append(recall_ham)
# calculation of means for each metric at the end
mean_accuracy = np.mean(accuracy_in_each_turn)
std_dev_accuracy = np.std(accuracy_in_each_turn)
variance_accuracy = np.var(accuracy_in_each_turn)
mean_precision_spam = np.mean(precision_in_each_turn_spam)
std_dev_precision_spam = np.std(precision_in_each_turn_spam)
variance_precision_spam = np.var(precision_in_each_turn_spam)
mean_recall_spam = np.mean(recall_in_each_turn_spam)
std_dev_recall_spam = np.std(recall_in_each_turn_spam)
variance_recall_spam = np.var(recall_in_each_turn_spam)
mean_precision_ham = np.mean(precision_in_each_turn_ham)
std_dev_precision_ham = np.std(precision_in_each_turn_ham)
variance_precision_ham = np.var(precision_in_each_turn_ham)
mean_recall_ham = np.mean(recall_in_each_turn_ham)
std_dev_recall_ham = np.std(recall_in_each_turn_ham)
variance_recall_ham = np.var(recall_in_each_turn_ham)
if output:
print "\033[1;32m"
print '============================================='
print 'CASE 3 - ALL ATTRIBUTES - USING NORMAL MODEL'
print '============================================='
print "\033[00m"
print 'MEAN ACCURACY: '+str(round(mean_accuracy,5))
print 'STD. DEV. OF ACCURACY: '+str(round(std_dev_accuracy,5))
print 'VARIANCE OF ACCURACY: '+str(round(variance_accuracy,8))
print ''
print 'MEAN PRECISION FOR SPAM: '+str(round(mean_precision_spam,5))
print 'STD. DEV. OF PRECISION FOR SPAM: '+str(round(std_dev_precision_spam,5))
print 'VARIANCE OF PRECISION FOR SPAM: '+str(round(variance_precision_spam,8))
print ''
print 'MEAN RECALL FOR SPAM: '+str(round(mean_recall_spam,5))
print 'STD. DEV. OF RECALL FOR SPAM: '+str(round(std_dev_recall_spam,5))
print 'VARIANCE OF RECALL FOR SPAM: '+str(round(variance_recall_spam,8))
print ''
print 'MEAN PRECISION FOR HAM: '+str(round(mean_precision_ham,5))
print 'STD. DEV. OF PRECISION FOR HAM: '+str(round(std_dev_precision_ham,5))
print 'VARIANCE OF PRECISION FOR HAM: '+str(round(variance_precision_ham,8))
print ''
print 'MEAN RECALL FOR HAM: '+str(round(mean_recall_ham,5))
print 'STD. DEV. OF RECALL FOR HAM: '+str(round(std_dev_recall_ham,5))
pri |
#!/usr/bin/python
# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rhsm_repository
short_description: Manage RHSM repositories using the subscription-manager command
description:
- Manage(Enable/Disable) RHSM repositories to the Red Hat Subscription
Management entitlement platform using the C(subscription-manager) command.
version_added: '2.5'
author: Giovanni Sciortino (@giovannisciortino)
notes:
- In order to manage RHSM repositories the system must be already registered
to RHSM manually or using the Ansible C(redhat_subscription) module.
requirements:
- subscription-manager
options:
state:
description:
- If state is equal to present or disabled, indicates the desired
repository state.
choices: [present, enabled, absent, disabled]
required: True
default: "present"
name:
description:
- The ID of repositories to enable.
- To operate on several repositories this can accept a comma separated
list or a YAML list.
required: True
'''
EXAMPLES = '''
- name: Enable a RHSM repository
rhsm_repository:
name: rhel-7-server-rpms
- name: Disable all RHSM repositories
rhsm_repository:
name: '*'
state: disabled
- name: Enable all repositories starting with rhel-6-server
rhsm_repository:
name: rhel-6-server*
state: enabled
- name: Disable all repositories except rhel-7-server-rpms
rhsm_repository:
name: "{{ item }}"
state: disabled
with_items: "{{
rhsm_repository.repositories |
map(attribute='id') |
difference(['rhel-7-server-rpms']) }}"
'''
RETURN = '''
repositories:
description:
- The list of RHSM repositories with their states.
- When this module is used to change the repositories states, this list contains the updated states after the changes.
returned: success
type: list
'''
import re
import os
from fnmatch import fnmatch
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
def run_subscription_manager(mo | dule, argumen | ts):
# Execute subuscription-manager with arguments and manage common errors
rhsm_bin = module.get_bin_path('subscription-manager')
if not rhsm_bin:
module.fail_json(msg='The executable file subscription-manager was not found in PATH')
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
module.fail_json(msg='This system has no repositories available through subscriptions')
elif rc == 1:
module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
else:
return rc, out, err
def get_repository_list(module, list_parameter):
# Generate RHSM repository list and return a list of dict
if list_parameter == 'list_enabled':
rhsm_arguments = ['repos', '--list-enabled']
elif list_parameter == 'list_disabled':
rhsm_arguments = ['repos', '--list-disabled']
elif list_parameter == 'list':
rhsm_arguments = ['repos', '--list']
rc, out, err = run_subscription_manager(module, rhsm_arguments)
skip_lines = [
'+----------------------------------------------------------+',
' Available Repositories in /etc/yum.repos.d/redhat.repo'
]
repo_id_re_str = r'Repo ID: (.*)'
repo_name_re_str = r'Repo Name: (.*)'
repo_url_re_str = r'Repo URL: (.*)'
repo_enabled_re_str = r'Enabled: (.*)'
repo_id = ''
repo_name = ''
repo_url = ''
repo_enabled = ''
repo_result = []
for line in out.split('\n'):
if line in skip_lines:
continue
repo_id_re = re.match(repo_id_re_str, line)
if repo_id_re:
repo_id = repo_id_re.group(1)
continue
repo_name_re = re.match(repo_name_re_str, line)
if repo_name_re:
repo_name = repo_name_re.group(1)
continue
repo_url_re = re.match(repo_url_re_str, line)
if repo_url_re:
repo_url = repo_url_re.group(1)
continue
repo_enabled_re = re.match(repo_enabled_re_str, line)
if repo_enabled_re:
repo_enabled = repo_enabled_re.group(1)
repo = {
"id": repo_id,
"name": repo_name,
"url": repo_url,
"enabled": True if repo_enabled == '1' else False
}
repo_result.append(repo)
return repo_result
def repository_modify(module, state, name):
name = set(name)
current_repo_list = get_repository_list(module, 'list')
updated_repo_list = deepcopy(current_repo_list)
matched_existing_repo = {}
for repoid in name:
matched_existing_repo[repoid] = []
for idx, repo in enumerate(current_repo_list):
if fnmatch(repo['id'], repoid):
matched_existing_repo[repoid].append(repo)
# Update current_repo_list to return it as result variable
updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
changed = False
results = []
diff_before = ""
diff_after = ""
rhsm_arguments = ['repos']
for repoid in matched_existing_repo:
if len(matched_existing_repo[repoid]) == 0:
results.append("%s is not a valid repository ID" % repoid)
module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
for repo in matched_existing_repo[repoid]:
if state in ['disabled', 'absent']:
if repo['enabled']:
changed = True
diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
results.append("Repository '%s' is disabled for this system" % repo['id'])
rhsm_arguments += ['--disable', repo['id']]
elif state in ['enabled', 'present']:
if not repo['enabled']:
changed = True
diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
results.append("Repository '%s' is enabled for this system" % repo['id'])
rhsm_arguments += ['--enable', repo['id']]
diff = {'before': diff_before,
'after': diff_after,
'before_header': "RHSM repositories",
'after_header': "RHSM repositories"}
if not module.check_mode:
rc, out, err = run_subscription_manager(module, rhsm_arguments)
results = out.split('\n')
module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True),
state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
),
supports_check_mode=True,
)
name = module.params['name']
state = module.params['state']
repository_modify(module, state, name)
if __name__ == '__main__':
main()
|
import unittest
from factorial import fact
class TestFactorial(unittest.TestCase):
"""
Our basic test class
"""
def test_fact(self):
"""
The actual test.
Any method which starts with ` | `test_`` will considered as a t | est case.
"""
res = fact(5)
self.assertEqual(res, 120)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.report_aeroo import report_aeroo
from openerp.addons.at_base import util
from openerp.osv import fields, osv
from openerp.tools.translate import _
class inovice_attachment_wizard(osv.TransientModel):
_name = "account.invoice.attachment.wizard"
_description = "Invoice Attachment Wizard"
def action_import(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0])
invoice_id = util.active_id(context, "account.invoice")
if not invoice_id:
raise osv.except_osv(_("Error!"), _("No invoice found"))
report_obj = self.pool.get("ir.actions.report.xml")
data=base64.decodestring(wizard.document)
data = report_aeroo.fixPdf(data)
if not data:
| raise osv.except_osv(_("Error!"), _("PDF is corrupted and unable to fix!"))
if not report_obj.write_attachment(cr, uid, "account.invoice", invoice_id, report_name="account.report_invoice", datas=base64.encodestring(data), context=context, origin="account.invoice | .attachment.wizard"):
raise osv.except_osv(_("Error!"), _("Unable to import document (check if invoice is validated)"))
return { "type" : "ir.actions.act_window_close" }
_columns = {
"document" : fields.binary("Document")
} |
# partial unit test for gmpy2 threaded mpz functionality
# relies on Tim Peters' "doctest.py" test-driver
import gmpy2 as _g, doctest, sys, operator, gc, queue, threading
from functools import reduce
__test__={}
def _tf(N=2, _K=1234**5678):
"""Takes about 100ms on a first-generation Macbook Pro"""
for i in range(N): assert (_g.mpz(1234)**5678)==_K
a=_g.mpz(123)
b=_g.mpz(456)
c=_g.mpz(123456789123456789)
def factorize(x=c):
r'''
(Takes about 25ms, on c, on a first-generation Macbook Pro)
>>> factorize(a)
[3, 41]
>>> factorize(b)
[2, 2, 2, 3, 19]
>>>
'''
import gmpy2 as _g
savex=x
prime=2
x=_g.mpz(x)
factors=[]
while x>=prime:
newx,mult=x.remove(prime)
if mult:
factors.extend([int(prime)]*mult)
x=newx
prime=_g.next_prime(prime)
for factor in factors: assert _g.is_prime(factor)
from operator import mul
assert reduce(mul, factors)==savex
return factors
def elemop(N=1000):
r'''
(Takes about 40ms on a first-generation Macbook Pro)
'''
for i in range(N):
assert a+b == 579
assert a-b == -333
assert b*a == a*b == 56088
assert b%a == 87
assert divmod(a, b) == (0, 123)
assert divmod(b, a) == (3, 87)
assert -a == -123
assert pow(a, 10) == 792594609605189126649
assert pow(a, 7, b) == 99
assert cmp(a, b) == -1
assert '7' in str(c)
assert '0' not in str(c)
assert a.sqrt() == 11
assert _g.lcm(a, b) == 18696
assert _g.fac(7) == 5040
assert _g.fib(17) == 1597
assert _g.divm(b, a, 20) == 12
assert _g.divm(4, 8, 20) == 3
assert _g.divm(4, 8, 20) == 3
assert _g.mpz(20) == 20
assert _g.mpz(8) == 8
assert _g.mpz(4) == 4
assert a.invert(100) == 87
def _test(chat=None):
if chat:
print("Unit tests for gmpy2 (threading)")
print(" on Python %s" % sys.version)
print("Testing gmpy2 {0}".format(_g.version()))
print(" Mutliple-precision library: {0}".format(_g.mp_version()))
print(" Floating-point library: {0}".format(_g.mpfr_version()))
print(" Complex library: {0}".format(_g.mpc_version()))
print(" Caching Valu | es: (Number) {0}".format(_g.get_cache()[0]))
print(" Caching Values: (Size, | limbs) {0}".format(_g.get_cache()[1]))
thismod = sys.modules.get(__name__)
doctest.testmod(thismod, report=0)
if chat: print("Repeating tests, with caching disabled")
_g.set_cache(0,128)
sav = sys.stdout
class _Dummy:
def write(self,*whatever):
pass
try:
sys.stdout = _Dummy()
doctest.testmod(thismod, report=0)
finally:
sys.stdout = sav
if chat:
print()
print("Overall results for thr:")
return doctest.master.summarize(chat)
class DoOne(threading.Thread):
def __init__(self, q):
threading.Thread.__init__(self)
self.q = q
def run(self):
while True:
task = self.q.get()
if task is None: break
task()
def _test_thr(Ntasks=5, Nthreads=1):
q = queue.Queue()
funcs = (_tf, 1), (factorize, 4), (elemop, 2)
for i in range(Ntasks):
for f, n in funcs:
for x in range(n):
q.put(f)
for i in range(Nthreads):
q.put(None)
thrs = [DoOne(q) for i in range(Nthreads)]
for t in thrs: t.start()
for t in thrs: t.join()
if __name__=='__main__':
_test(1)
|
class WordDistance(object):
def __init__(self, words):
""" |
initialize your data structure here.
:type words: List[str]
"""
self.word_dict = {}
for idx, w in enumerate(words):
self.word_dict[w] = | self.word_dict.get(w, []) + [idx]
def shortest(self, word1, word2):
"""
Adds a word into the data structure.
:type word1: str
:type word2: str
:rtype: int
"""
return min(abs(i - j) for i in self.word_dict[word1] for j in self.word_dict[word2])
# Your WordDistance object will be instantiated and called as such:
# wordDistance = WordDistance(words)
# wordDistance.shortest("word1", "word2")
# wordDistance.shortest("anotherWord1", "anotherWord2") |
# pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud iam servicetaccount'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
name=dict(default=None, type='str'),
display_name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
gcloud = GcloudIAMServiceAccount(module.params['name'], module.params['display_name'])
state = module.params['state']
api_rval = gcloud.list_service_accounts()
#####
# Get
#####
if state == 'list':
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gcloud.delete_service_account()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = gcloud.create_service_account()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
# update
elif gclo | ud.needs_update():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an update.')
api_rval = gcloud.update_service_account()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(cha | nged=True, results=api_rval, state="present|update")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
from framework.db import models
from framework.config import config
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import ResourceInterface
from framework.lib.general import cprint
import os
import logging
from framework.utils import FileOperations
class ResourceDB(BaseComponent, ResourceInterface):
COMPONENT_NAME = "resource"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.db_config = self.get_component("db_config")
self.target = self.get_component("target")
self.db = self.get_component("db")
self.LoadResourceDBFromFile(self.config.get_profile_path("RESOURCES_PROFILE"))
def LoadResourceDBFromFile(self, file_path): # This needs to be a list instead of a dictionary to preserve order in python < 2.7
logging.info("Loading Resources from: " + file_path + " ..")
resources = self.GetResourcesFromFile(file_path)
# Delete all old resources which are not edited by user
# because we may have updated the resource
self.db.session.query(models.Resource).filter_by(dirty=False).delete()
# resources = [(Type, Name, Resource), (Type, Name, Resource),]
for Type, Name, Resource in resources:
self.db.session.add(models.Resource(resource_type=Type, resource_name=Name, resource=Resource))
self.db.session.commit()
def GetResourcesFromFile(self, resource_file):
resources = set( | )
ConfigFile = FileOperations.open(resource_file, 'r').re | ad().splitlines() # To remove stupid '\n' at the end
for line in ConfigFile:
if '#' == line[0]:
continue # Skip comment lines
try:
Type, Name, Resource = line.split('_____')
# Resource = Resource.strip()
resources.add((Type, Name, Resource))
except ValueError:
cprint("ERROR: The delimiter is incorrect in this line at Resource File: "+str(line.split('_____')))
return resources
def GetReplacementDict(self):
configuration = self.db_config.GetReplacementDict()
configuration.update(self.target.GetTargetConfig())
configuration.update(self.config.GetReplacementDict())
return configuration
def GetRawResources(self, ResourceType):
filter_query = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter_by(resource_type = ResourceType)
# Sorting is necessary for working of ExtractURLs, since it must run after main command, so order is imp
sort_query = filter_query.order_by(models.Resource.id)
raw_resources = sort_query.all()
return raw_resources
def GetResources(self, ResourceType):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResources(ResourceType)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
def GetRawResourceList(self, ResourceList):
raw_resources = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter(models.Resource.resource_type.in_(ResourceList)).all()
return raw_resources
def GetResourceList(self, ResourceTypeList):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResourceList(ResourceTypeList)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
|
a given user
-skipuser: Only process pages not edited by a given user
-timestamp: (With -user or -skipuser). Only check for a user where his edit is
not older than the given timestamp. Timestamp must be writen in
MediaWiki timestamp format which is "%Y%m%d%H%M%S"
If this parameter is missed, all edits are checked but this is
restricted to the last 100 edits.
-summary: Lets you pick a custom edit summary. Use quotes if edit summary
contains spaces.
-always Don't bother asking to confirm any of the changes, Just Do It.
-addcat: Appends the give | n category to every page that is edited. This is
useful when a category is being broken out from a template
parameter or when templates are being upmerged but more information
must be preserved.
other: First argument is the old templa | te name, second one is the new
name.
If you want to address a template which has spaces, put quotation
marks around it, or use underscores.
Examples:
If you have a template called [[Template:Cities in Washington]] and want to
change it to [[Template:Cities in Washington state]], start
python pwb.py template "Cities in Washington" "Cities in Washington state"
Move the page [[Template:Cities in Washington]] manually afterwards.
If you have a template called [[Template:test]] and want to substitute it only
on pages in the User: and User talk: namespaces, do:
python pwb.py template test -subst -namespace:2 -namespace:3
Note that -namespace: is a global Pywikibot parameter
This next example substitutes the template lived with a supplied edit summary.
It only performs substitutions in main article namespace and doesn't prompt to
start replacing. Note that -putthrottle: is a global Pywikibot parameter.
python pwb.py template -putthrottle:30 -namespace:0 lived -subst -always \
-summary:"BOT: Substituting {{lived}}, see [[WP:SUBST]]."
This next example removes the templates {{cfr}}, {{cfru}}, and {{cfr-speedy}}
from five category pages as given:
python pwb.py template cfr cfru cfr-speedy -remove -always \
-page:"Category:Mountain monuments and memorials" \
-page:"Category:Indian family names" \
-page:"Category:Tennis tournaments in Belgium" \
-page:"Category:Tennis tournaments in Germany" \
-page:"Category:Episcopal cathedrals in the United States" \
-summary:"Removing Cfd templates from category pages that survived."
This next example substitutes templates test1, test2, and space test on all
pages:
python pwb.py template test1 test2 "space test" -subst -always
"""
#
# (C) Daniel Herding, 2004
# (C) Rob W.W. Hooft, 2003-2005
# (C) xqt, 2009-2015
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import re
from warnings import warn
import pywikibot
from pywikibot import i18n, pagegenerators, xmlreader, Bot
from pywikibot.exceptions import ArgumentDeprecationWarning
from scripts.replace import ReplaceRobot as ReplaceBot
class XmlDumpTemplatePageGenerator(object):
"""
Generator which yields Pages that transclude a template.
These pages will be retrieved from a local XML dump file
(cur table), and may not still transclude the template.
"""
def __init__(self, templates, xmlfilename):
"""
Constructor.
Arguments:
* templateNames - A list of Page object representing the searched
templates
* xmlfilename - The dump's path, either absolute or relative
"""
self.templates = templates
self.xmlfilename = xmlfilename
def __iter__(self):
"""Yield page objects until the entire XML dump has been read."""
mysite = pywikibot.Site()
dump = xmlreader.XmlDump(self.xmlfilename)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
templatePatterns = []
for template in self.templates:
templatePattern = template.title(withNamespace=False)
if mysite.namespaces[10].case == 'first-letter':
templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
templatePattern[0].lower(),
templatePattern[1:])
templatePattern = re.sub(' ', '[_ ]', templatePattern)
templatePatterns.append(templatePattern)
templateRegex = re.compile(
r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}'
% '|'.join(templatePatterns))
for entry in dump.parse():
if templateRegex.search(entry.text):
page = pywikibot.Page(mysite, entry.title)
yield page
class TemplateRobot(ReplaceBot):
"""This bot will replace, remove or subst all occurrences of a template."""
def __init__(self, generator, templates, **kwargs):
"""
Constructor.
@param generator: the pages to work on
@type generator: iterable
@param templates: a dictionary which maps old template names to
their replacements. If remove or subst is True, it maps the
names of the templates that should be removed/resolved to None.
@type templates: dict
"""
self.availableOptions.update({
'subst': False,
'remove': False,
'summary': None,
'addedCat': None,
})
Bot.__init__(self, generator=generator, **kwargs)
self.templates = templates
# get edit summary message if it's empty
if not self.getOption('summary'):
comma = self.site.mediawiki_message('comma-separator')
params = {'list': comma.join(self.templates.keys()),
'num': len(self.templates)}
site = self.site
if self.getOption('remove'):
self.options['summary'] = i18n.twntranslate(
site, 'template-removing', params)
elif self.getOption('subst'):
self.options['summary'] = i18n.twntranslate(
site, 'template-substituting', params)
else:
self.options['summary'] = i18n.twntranslate(
site, 'template-changing', params)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
# The group 'parameters' will either match the parameters, or an
# empty string if there are none.
replacements = []
exceptions = {}
namespace = self.site.namespaces[10]
for old, new in self.templates.items():
if namespace.case == 'first-letter':
pattern = '[' + \
re.escape(old[0].upper()) + \
re.escape(old[0].lower()) + \
']' + re.escape(old[1:])
else:
pattern = re.escape(old)
pattern = re.sub(r'_|\\ ', r'[_ ]', pattern)
templateRegex = re.compile(r'\{\{ *(' + ':|'.join(namespace) +
r':|[mM][sS][gG]:)?' + pattern +
r'(?P<parameters>\s*\|.+?|) *}}',
re.DOTALL)
if self.getOption('subst') and self.getOption('remove'):
replacements.append((templateRegex,
r'{{subst:%s\g<parameters>}}' % new))
exceptions['inside-tags'] = ['ref', 'gallery']
elif self.getOption('subst'):
replacements.append((templateRegex,
r'{{subst:%s\g<parameters>}}' % old))
ex |
# -*- coding: utf-8 -*-
from canaimagnulinux.wizard.interfaces import IChat
from canaimagnulinux.wizard.interfaces import ISocialNetwork
from canaimagnulinux.wizard.utils import CanaimaGnuLinuxWizardMF as _
from collective.beaker.interfaces import ISession
from collective.z3cform.wizard import wizard
from plone import api
from plone.z3cform.fieldsets import group
from z3c.form import field
try:
from zope.browserpage import viewpagetemplatefile
except ImportError:
# Plone < 4.1
from zope.app.pagetem | plate import viewpagetemplatefile
import logging
logger = logging.getLogger(__name__)
class ChatGroup(group.Group) | :
prefix = 'chats'
label = _(u'Chats Information')
fields = field.Fields(IChat)
class SocialNetworkGroup(group.Group):
prefix = 'socialnetwork'
label = _(u'Social Network Information')
fields = field.Fields(ISocialNetwork)
class SocialNetworkStep(wizard.GroupStep):
prefix = 'Social'
label = _(u'Social Network accounts')
description = _(u'Input your social networks details')
template = viewpagetemplatefile.ViewPageTemplateFile('templates/socialnetwork.pt')
fields = field.Fields()
groups = [ChatGroup, SocialNetworkGroup]
def __init__(self, context, request, wizard):
# Use collective.beaker for session managment
session = ISession(request, None)
self.sessionmanager = session
super(SocialNetworkStep, self).__init__(context, request, wizard)
def load(self, context):
member = api.user.get_current()
data = self.getContent()
# Chats group
if not data.get('irc', None):
irc = member.getProperty('irc')
if type(irc).__name__ == 'object':
irc = None
data['irc'] = irc
if not data.get('telegram', None):
telegram = member.getProperty('telegram')
if type(telegram).__name__ == 'object':
telegram = None
data['telegram'] = telegram
if not data.get('skype', None):
skype = member.getProperty('skype')
if type(skype).__name__ == 'object':
skype = None
data['skype'] = skype
# Social Network group
if not data.get('twitter', None):
twitter = member.getProperty('twitter')
if type(twitter).__name__ == 'object':
twitter = None
data['twitter'] = twitter
if not data.get('instagram', None):
instagram = member.getProperty('instagram')
if type(instagram).__name__ == 'object':
instagram = None
data['instagram'] = instagram
if not data.get('facebook', None):
facebook = member.getProperty('facebook')
if type(facebook).__name__ == 'object':
facebook = None
data['facebook'] = facebook
def apply(self, context, initial_finish=False):
data = self.getContent()
return data
def applyChanges(self, data):
member = api.user.get_current()
member.setMemberProperties(mapping={
'irc': data['irc'],
'telegram': data['telegram'],
'skype': data['skype'],
'twitter': data['twitter'],
'instagram': data['instagram'],
'facebook': data['facebook']}
)
|
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"realpath"]
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_ATIME]
def islink(s):
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
def exists(s):
"""Return true if the pathname refers to an existing file or directory."""
try:
st = os.stat(s)
except os.error:
return 0
return 1
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len | (prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility | with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
|
from datetime import datetime
def | foo(p):
"""Foo
:param datetime p: a date | time
<ref>
""" |
kState(QtCore.Qt.Checked)
else:
field.setCheckState(QtCore.Qt.Unchecked)
elif isinstance(value, float):
field = QtWidgets.QLineEdit(repr(value), self)
field.setCursorPosition(0)
field.setValidator(QtGui.QDoubleValidator(field))
field.validator().setLocale(QtCore.QLocale("C"))
dialog = self.get_dialog()
dialog.register_float_field(field)
field.textChanged.connect(lambda text: dialog.update_buttons())
elif isinstance(value, int):
field = QtWidgets.QSpinBox(self)
field.setRange(-1e9, 1e9)
field.setValue(value)
elif isinstance(value, datetime.datetime):
field = QtWidgets.QDateTimeEdit(self)
field.setDateTime(value)
elif isinstance(value, datetime.date):
field = QtWidgets.QDateEdit(self)
field.setDate(value)
else:
field = QtWidgets.QLineEdit(repr(value), self)
self.formlayout.addRow(label, field)
# print(self.formlayout.fieldGrowthPolicy())
self.widgets.append(field)
def get(self):
valuelist = []
for index, (label, value) in enumerate(self.data):
field = self.widgets[index]
if label is None:
# Separator / Comment
continue
elif tuple_to_qfont(value) is not None:
value = field.get_font()
elif (isinstance(value, six.string_types)
or mcolors.is_color_like(value)):
value = six.text_type(field.text())
elif isinstance(value, (list, tuple)):
index = int(field.currentIndex())
if isinstance(value[0], (list, tuple)):
value = value[index][0]
else:
value = value[index]
elif isinstance(value, bool):
value = field.checkState() == QtCore.Qt.Checked
elif isinstance(value, float):
value = float(str(field.text()))
elif isinstance(value, int):
value = int(field.value())
elif isinstance(value, datetime.datetime):
value = field.dateTime().toPyDateTime()
elif isinstance(value, datetime.date):
value = field.date().toPyDate()
else:
value = eval(str(field.text()))
valuelist.append(value)
return valuelist
class FormComboWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.combobox = QtWidgets.QComboBox()
layout.addWidget(self.combobox)
self.stackwidget = QtWidgets.QStackedWidget(self)
layout.addWidget(self.stackwidget)
self.combobox.currentIndexChanged.connect(self.stackwidget.setCurrentIndex)
self.widgetlist = []
for data, title, comment in datalist:
self.combobox.addItem(title)
widget = FormWidget(data, comment=comment, parent=self)
self.stackwidget.addWidget(widget)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormTabWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.tabwidget = QtWidgets.QTabWidget()
layout.addWidget(self.tabwidget)
self.setLayout(layout)
self.widgetlist = []
for data, title, comment in datalist:
if len(data[0]) == 3:
widget = FormComboWidget(data, comment=comment, parent=self)
else:
widget = FormWidget(data, comment=comment, parent=self)
index = self.tabwidget.addTab(widget, title)
self.tabwidget.setTabToolTip(index, comment)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormDialog(QtWidgets.QDialog):
"""Form Dialog"""
def __init__(self, data, title="", comment="", icon=None, parent=None, apply=None):
QtWidgets.QDialog.__init__(self, parent)
self.apply_callback = apply
# Form
if isinstance(data[0][0], (list, tuple)):
self.formwidget = FormTabWidget(data, comment=comment, parent=self)
elif len(data[0]) == 3:
self.formwidget = FormComboWidget(data, comment=comment, parent=self)
else:
self.formwidget = FormWidget(data, comment=comment, parent=self)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.formwidget)
self.float_fields = []
self.formwidget.setup()
# Button box
self.bbox = bbox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.formwidget.update_buttons.connect(self.update_buttons)
if self.apply_callback is not None:
apply_btn = bbox.addButton(QtWidgets.QDialogButtonBox.Apply)
apply_btn.clicked.connect(self.apply)
bbox.accepted.connect(self.accept)
bbox.rejected.connect(self.reject)
layout.addWidget(bbox)
self.setLayout(layout)
self.setWindowTitle(title)
if not isinstance(icon, QtGui.QIcon):
icon = QtWidgets.QWidget().style().standardIcon(QtWidgets.QStyle.SP_MessageBoxQuestion)
self.setWindowIcon(icon)
def register_float_field(self, field):
self.float_fields.append(field)
def update_buttons(self):
valid = True
for field in self.float_fields:
if not is_edit_valid(field):
valid = False
for btn_type in (QtWidgets.QDialogButtonBox.Ok,
QtWidgets.QDialogButtonBox.Apply):
btn = self.b | box.button(btn_type)
if btn is not None:
btn.setEnabled(valid)
def accept(self):
self.data = self.formwidget.get()
QtWidgets.QDialog.accept(self)
def reject(self):
self.data = None
QtWidgets.QDialog.reject(self)
def apply(self):
self.apply_callback(self.formwidget.get())
def get(self):
"""Return form result"""
return self.data
def fedit(data, title=" | ", comment="", icon=None, parent=None, apply=None):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
data: datalist, datagroup
title: string
comment: string
icon: QIcon instance
parent: parent QWidget
apply: apply callback (function)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
-> one field for each member of a datalist
-> one tab for each member of a top-level datagroup
-> one page (of a multipage widget, each page can be selected with a combo
box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g., if the module is used directly from the interpreter)
if QtWidgets.QApplication.startingUp():
_app = QtWidgets.QApplication([])
dialog = FormDial |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from PIL import Image
def graf2png(weburl, username, password, timeout, imgname, hwin, wwin, onlypanel):
driver = webdriver.PhantomJS()
driver.set_window_size(hwin, wwin)
driver.get(weburl)
# Introducimos username
in_user = driver.find_element_by_name('username')
in_user.clear()
in_user.send_keys(username)
# Introducimos password
in_pass = driver.find_element_by_id('inputPassword')
in_pass.clear()
in_pass.send_keys(password)
in_pass.send_keys(Keys.ENTER)
# Espera a que cargue la consulta
time.sleep(timeout)
# Timestamp para evitar sobreescribir capturas
currtime = time.strftime("%y%m%d%H%M%S", time.localtime())
imgname = imgname + currtime + '.png'
# Realizar screenshot
driver.save_screenshot(imgname)
print("Screen guardada como: " + imgname)
# Recortar panel(?)
# Solo funciona con los paneles cuya clase sea 'panel-fullscreen',
# esta es la clase que tiene por defecto los paneles cuando
# generas un enlace para compartir. (Share Panel > Link > Copy)
if (onlypanel):
panel = driver.find_element_by_class_name('panel-fullscreen | ')
plocation = panel.location
psize = panel.size
left = plocation['x']
top = plocation['y']
right = plocation['x'] + psize['width']
bottom = plocation['y'] + psize['height']
pimg = Image.op | en(imgname)
pimg = pimg.crop((left, top, right, bottom))
pimgname = 'panel_' + imgname
pimg.save(pimgname)
print("Panel recortado guardado como: " + pimgname)
|
# -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
# ---- Standard library imports
import os
import csv
import sys
# ---- Third party imports
import numpy as np
from xlrd import xldate_as_tuple
# ---- Local imports
from gwhat.brf_mod import __install_dir__
def produce_BRFInputtxt(well, time, wl, bp, et):
comment = 'No comment men'
wlu = 'feet'
bpu = 'feet'
etu = 'NONE'
sampleinterval = time[1]-time[0]
timeunits = 'days'
N = len(time)
yr, mth, day, hr, mn, sec = xldate_as_tuple(time[0], 0)
dst = '%02d/%02d/%d, %02d:%02d:%02d' % (yr, mth, day, hr, mn, sec)
yr, mth, day, hr, mn, sec = xldate_as_tuple(time[-1], 0)
det = '%02d/%02d/%d, %02d:%02d:%02d' % (yr, mth, day, hr, mn, sec)
fcontent = []
fcontent.append(['Comment: %s' % comment])
fcontent.append(['Well: %s' % well])
fcontent.append(['WL Units: %s' % wlu])
fcontent.append(['BP Units: %s' % bpu])
fcontent.append(['ET Units: %s' % etu])
fcontent.append(['Sample Interval: %f' % sampleinterval])
fcontent.append(['Time Units: %s' % timeunits])
fcontent.append(['Data Start Time: %s' % dst])
fcontent.append(['Data End Time: %s' % det])
fcontent.append(['Number of Data: %d' % N])
fcontent.append(['Time WL BP ET'])
# Add the data to the file content.
wl = (100 - wl) * 3.28084
bp = bp * 3.28084
t = time - time[0]
fcontent.extend([[time[i], wl[i], bp[i], et[i]] for i in range(N)])
filename = os.path.join(__install_dir__, 'BRFInput.txt')
with open(filename, 'w', encoding='utf8') as f:
writer = writer = csv.writer(f, delimiter='\t', lineterminator='\n')
writer.writerows(fcontent)
def produce_par_file(lagBP, lagET, detrend_waterlevels=True,
correct_waterlevels=True):
"""
Create the parameter file requires by the KGS_BRF program.
"""
brfinput = os.path.join(__install_dir__, 'BRFInput.txt')
brfoutput = os.path.join(__install_dir__, 'BRFOutput.txt')
wlcinput = os.path.join(__install_dir__, 'WLCInput.txt')
wlcoutput = os.path.join(__install_dir__, 'WLCOutput.txt')
detrend = 'Yes' if detrend_waterlevels else 'No'
correct = 'Yes' if correct_waterlevels else 'No'
par = []
par.append(['BRF Option (C[ompute] or R[ead]): Compute'])
par.append(['BRF Input Data File: %s' % brfinput])
par.append(['Number of BP Lags: %d' % lagBP])
par.append(['Number of BP ET: %d' % lagET])
par.append(['BRF Output Data File: %s' % brfoutput])
par.append(['Detrend data? (Y[es] or N[o]): %s' % detrend])
par.append(['Correct WL? (Y[es] or N[o]): %s' % correct])
par.append(['WLC Input Data File: %s' % wlcinput])
par.append(['WLC Output Data File: %s' % wlcoutput])
filename = os.path.join(__install_dir__, 'kgs_brf.par')
with open(filename, 'w', encoding='utf8') as f:
writer = csv.writer(f, delimiter='\t', lineterminator='\n')
writer.writerows(par)
def run_kgsbrf():
exename = os.path.join(__install_dir__, 'kgs_brf.exe')
parname = os.path.join(__install_dir__, 'kgs_brf.par')
if os.path.exists(exename) and os.path.exists(parname):
if os.name == 'nt':
os.system('""%s" < "%s""' % (exename, parname))
def read_brf_output():
"""
Read the barometric response function from the output file produced
by kgs_brf.exe.
"""
filename = os.path.join(__install | _dir__, 'BRFOutput.txt')
with open(filename, 'r') as f:
reader = list(csv.reader(f))
header = []
for row in reader:
header.append(row)
if 'LagNo Lag A sdA SumA sdSumA B sdB SumB sdSumB' in row[0]:
break
# well = header[2][0].split()[-1]
# date0 = header[8][0].split()[-1]
# date1 = header[9][0].split()[-1]
data = reader[len(header):]
dataf = []
count = 1
for row in data:
if count == 1:
| dataf.append([float(i) for i in row[0].split()])
count += 1
elif count in [2, 3]:
dataf[-1].extend([float(i) for i in row[0].split()])
count += 1
elif count == 4:
dataf[-1].extend([float(i) for i in row[0].split()])
count = 1
# Remove non valid data.
dataf = [row for row in dataf if row[4] > -999]
# Format data into numpy arrays
dataf = np.array(dataf)
lag = dataf[:, 1]
A = dataf[:, 4]
err = dataf[:, 5]
return lag, A, err
if __name__ == "__main__":
# plt.close('all')
# produce_par_file()
run_kgsbrf()
load_BRFOutput(show_ebar=True, msize=5, draw_line=False)
# plt.show()
|
_LUA_CMD_VERSION = '3'
_LUA_VERSION = '2.0'
_LUA_RECOVERY = 'LUARecovery'
_RM_HDISK = 'RemoveDevice'
_MGT_CONSOLE = 'ManagementConsole'
class LUAType(object):
"""LUA Vendors."""
IBM = "IBM"
EMC = "EMC"
NETAPP = "NETAPP"
HDS = "HDS"
HP = "HP"
OTHER = "OTHER"
class LUAStatus(object):
"""LUA Recovery status codes."""
DEVICE_IN_USE = '1'
ITL_NOT_RELIABLE = '2'
DEVICE_AVAILABLE = '3'
STORAGE_NOT_INTEREST = '4'
LUA_NOT_INTEREST = '5'
INCORRECT_ITL = '6'
FOUND_DEVICE_UNKNOWN_UDID = '7'
FOUND_ITL_ERR = '8'
def normalize_lun(scsi_id):
"""Normalize the lun id to Big Endian
:param scsi_id: Volume lun id
:return: Converted LUN id in Big Endian as per the RFC 4455
"""
# PowerVM keeps LUN identifiers in hex format.
lun = '%x' % int(scsi_id)
# For drivers which support complex LUA lun-id exceeding more than 2
# bytes in such cases we need to append 8 zeros else 12 zeros to
# pass 8 byte lun-id
if len(lun) == 8:
lun += "00000000"
else:
lun += "000000000000"
return lun
class ITL(object):
"""The Nexus ITL.
See SCSI ITL. This is the grouping of the SCSI initiator, target and
LUN.
"""
def __init__(self, initiator, target, lun):
"""Create the ITL.
:param initiator: The initiator WWPN.
:param target: The target WWPN.
:param lun: The LUN identifier. Ex. 2 (an int). The identifier will
be formatted from a generic integer LUN ID to match
PowerVM's LUN Identifier format.
"""
self.initiator = initiator.lower().replace(':', '')
self.target = target.lower().replace(':', '')
self.lun = normalize_lun(lun)
def __eq__(self, other):
if other is None or not isinstance(other, ITL):
return False
return (self.initiator == other.initiator and
self.target == other.target and
self.lun == other.lun)
def __hash__(self):
return hash(self.initiator) ^ hash(self.target) ^ hash(self.lun)
def __ne__(self, other):
return not self.__eq__(other)
def good_discovery(status, device_name):
"""Checks the hdisk discovery results for a good discovery.
Acceptable LUA discovery statuses are :-
DEVICE_AVAILABLE: hdisk discovered on all the ITL paths and available.
DEVICE_IN_USE: hdisk discovered on all the ITL paths and is in-use by
the server.
FOUND_ITL_ERR: hdisk is discovered on some of the ITL paths and available.
This can happen if there are multiple ITL nexus paths are passed, and
hdisk is discovered on few of the paths only. This can happen if multiple
target wwpns and vios wwpns exists and only few are connected. If hdisk
can be discovered on ANY of the paths its considered for good discovery.
"""
return device_name is not None and status in [
LUAStatus.DEVICE_AVAILABLE, LUAStatus.DEVICE_IN_USE,
LUAStatus.FOUND_ITL_ERR]
def build_itls(i_wwpns, t_wwpns, lun):
"""This method builds the list of ITLs for all of the permutations.
An ITL is specific to an initiator, target, and LUN. However, with multi
pathing, there are several scenarios where a given LUN will have many ITLs
because of multiple initiators or targets.
The initiators should be tied to a given Virtual I/O Server (or perhaps
specific WWPNs within a VIOS).
:param i_wwpns: List or set of initiator WWPNs.
:param t_wwpns: List or set of target WWPNs.
:param lun: The LUN identifier. Ex. 2 (an int). The identifier will be
formatted from a generic integer LUN ID to match PowerVM's
LUN Identifier format.
:return: List of all the ITL permutations.
"""
return [ITL(i, t, lun) for i, t in itertools.product(i_wwpns, t_wwpns)]
def discover_hdisk(adapter, vios_uuid, itls, vendor=LUAType.OTHER,
device_id=None):
"""Attempt to discover a hard disk attached to a Virtual I/O Server.
See lua_recovery. This method attempts that call and analyzes the
results. On certain failure conditions (see below), this method will find
stale LPARs, scrub storage artifacts associated with them, and then retry
lua_recovery. The retry is only attempted once; that result is returned
regardless.
The main objective of this method is to resolve errors resulting from
incomplete cleanup of previous LPARs. The stale LPAR's storage mappings
can cause hdisk discovery to fail because it thinks the hdisk is already in
use.
Retry conditions: The scrub-and-retry will be triggered if:
o dev_name is None; or
o status is anything other than DEVICE_AVAILABLE or FOUND_ITL_ERR. (The
latter is acceptable because it means we discovered some, but not all, of
the ITLs. This is okay as long as dev_name is set.)
:param adapter: The pypowervm adapter.
:param vios_uuid: The Virtual I/O Server UUID.
:param itls: A list of ITL objects.
:param vendor: The vendor for the LUN. See the LUAType.* constants.
:param device_id: The device ID parameter in the LUA input XML.
Typically the base 64 encoded pg83 value.
:return status: The status code from the discover process.
See LUAStatus.* constants.
:return dev_name: The name of the discovered hdisk.
:return udid: The UDID of the device.
"""
# First attempt
status, devname, udid = lua_recovery(adapter, vios_uuid, itls,
vendor=vendor, device_id=device_id)
# Do we need to scrub and retry?
if not good_discovery(status, devname):
vwrap = pvm_vios.VIOS.get(adapter, uuid=vios_u | uid,
xag=(c.XAG.VIO_SMAP, c.XAG.VIO_FMAP))
scrub_ids = tsk_stg.find_stale_lpars(vwrap)
if scrub_ids:
# Detailed warning message by _log_lua_status
LOG.warning(_("hdisk discovery failed; will scrub stale storage "
"for LPAR IDs %s and retry."), scrub_ids)
# Scrub from just the VIOS in question.
| scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap])
tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task)
scrub_task.execute()
status, devname, udid = lua_recovery(adapter, vios_uuid, itls,
vendor=vendor,
device_id=device_id)
return status, devname, udid
def lua_recovery(adapter, vios_uuid, itls, vendor=LUAType.OTHER,
device_id=None):
"""Logical Unit Address Recovery - discovery of a FC-attached hdisk.
When a new disk is created externally (say on a block device), the Virtual
I/O Server may or may not discover it immediately. This method forces a
discovery on a given Virtual I/O Server.
:param adapter: The pypowervm adapter.
:param vios_uuid: The Virtual I/O Server UUID.
:param itls: A list of ITL objects.
:param vendor: The vendor for the LUN. See the LUAType.* constants.
:param device_id: The device ID parameter in the LUA input XML.
Typically the base 64 encoded pg83 value.
:return status: The status code from the discover process.
See LUAStatus.* constants.
:return dev_name: The name of the discovered hdisk.
:return udid: The UDID of the device.
"""
# Reduce the ITLs to ensure no duplicates
itls = set(itls)
# Build the LUA recovery XML
lua_xml = _lua_recovery_xml(itls, adapter, vendor=vendor,
device_id=device_id)
# Build up the job & invoke
resp = adapter.read(
pvm_vios.VIOS.schema_type, root_id=vios_uuid,
suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_LUA_RECOVERY)
job_wrapper = pvm_job.Job.wrap(resp)
job_parms = [job_wrapper.create_job_parameter('inputXML', lua_xml,
cdata=True)]
job_wrapper.run_job(vios_uuid, job_parms=job_ |
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s'
', message : %s') % (msg_id, message_data))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d | . There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q | , connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by
the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green
thread.
"""
def __init__(self, conf, callback, connection_pool):
"""
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = |
owing:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
co | nfig['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment | (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that |
# - | *- coding: utf-8 -*-
from ihm.main_window import launch
if __name__ == '__main__':
launc | h()
|
"""
This component provides HA cover support for Abode Security System.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.abode/
"""
import logging
from homeassistant.components.abode import AbodeDevice, DOMAIN as ABODE_DOMAIN
from homeassistant.components.cover import CoverDevice
DEPENDENCIES = ['abode']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Abode cover devices."""
import abodepy.helpers.constants as CONST
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_COVER):
if data.is_excluded(device):
continue
devices.append(AbodeCov | er(data, device))
data.devices.extend(devices)
add_devices(devices)
class AbodeCover(AbodeDevice, CoverDevice):
"""Representation of an Abode cover."""
@property
def is_closed( | self):
"""Return true if cover is closed, else False."""
return not self._device.is_open
def close_cover(self, **kwargs):
"""Issue close command to cover."""
self._device.close_cover()
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self._device.open_cover()
|
source", resource['name'], resource['version'], "zip")
paths.append(
download(resource['name'], '%s/%s-%s.zip' % (resources_path, resource['name'], resource['version']), url))
return paths
def download_services(services):
paths = []
for service in services:
# for release < 1.22
if service is not None:
if service['name'] == "gravitee-gateway-services-ratelimit":
url = get_download_url("io.gravitee.policy", service['name'], service['version'], "zip")
else:
url = get_download_url("io.gravitee.discovery", service['name'], service['version'], "zip")
paths.append(
download(service['name'], '%s/%s-%s.zip' % (services_path, service['name'], service['version']), url))
return paths
def download_connectors(connectors):
paths = []
for connector in connectors:
url = get_download_url("io.gravitee.cockpit", connector['name'], connector['version'], "zip")
paths.append(
download(connector['name'], '%s/%s-%s.zip' % (resources_path, connector['name'], connector['version']), url))
return paths
def download_ui(ui, default_version):
v = default_version if 'version' not in ui else ui['version']
url = get_download_url("io.gravitee.management", ui['name'], v, "zip")
return download(ui['name'], '%s/%s-%s.zip' % (tmp_path, ui['name'], v), url)
def download_portal_ui(ui, default_version):
v = default_version if 'version' not in ui else ui['version']
url = get_download_url("io.gravitee.portal", ui['name'], v, "zip")
return download(ui['name'], '%s/%s-%s.zip' % (tmp_path, ui['name'], v), url)
def download_reporters(reporters):
paths = []
for reporter in reporters:
name = "gravitee-reporter-elasticsearch" if "gravitee-elasticsearch" == reporter['name'] else reporter['name']
url = get_download_url("io.gravitee.reporter", name, reporter['version'], "zip")
paths.append(
download(name, '%s/%s-%s.zip' % (reporters_path, name, reporter['version']), url))
return paths
def download_repositories(repositories):
paths = []
for repository in repositories:
if repository['name'] != "gravitee-repository-gateway-bridge-http":
name = "gravitee-repository-elasticsearch" if "gravitee-elasticsearch" == repository['name'] else repository['name']
url = get_download_url("io.gravitee.repository", name, repository['version'], "zip")
paths.append(download(name, '%s/%s-%s.zip' % (repositories_path, name, repository['version']), url))
else:
for name in ["gravitee-repository-gateway-bridge-http-client", "gravitee-repository-gateway-bridge-http-server"]:
url = get_download_url("io.gravitee.gateway", name, repository['version'], "zip")
paths.append(download(name, '%s/%s-%s.zip' % (repositories_path, name, repository['version']), url))
return paths
def prepare_gateway_bundle(gateway):
print("==================================")
print("Prepare %s" % gateway)
bundle_path = unzip([gateway])[0]
print(" bundle_path: %s" % bundle_path)
copy_files_into(policies_path, bundle_path + "plugins")
copy_files_into(resources_path, bundle_path + "plugins")
copy_files_into(repositories_path, bundle_path + "plugins", [".*gravitee-repository-elasticsearch.*"])
copy_files_into(reporters_path, bundle_path + "plugins")
copy_files_into(services_path, bundle_path + "plugins")
copy_files_into(connectors_path, bundle_path + "plugins")
os.makedirs(bundle_path + "plugins/ext/repository-jdbc", exist_ok=True)
def prepare_ui_bundle(ui):
print("==================================")
print("Prepare %s" % ui)
bundle_path = unzip([ui])[0]
print(" bundle_path: %s" % bundle_path)
def prepare_mgmt_bundle(mgmt):
print("==================================")
print("Prepare %s" % mgmt)
bundle_path = unzip([mgmt])[0]
print(" bundle_path: %s" % bundle_path)
copy | _files_into(policies_path, bundle_path + "plugins")
copy_files_into(resources_path, bundle_path + "plugins")
copy_files_into(fetchers_path, bundle_path + "plugins")
copy_files_into(repositories_path, bundle_path + "plugins", [".*gravitee-repository | -ehcache.*", ".*gravitee-repository-gateway-bridge-http-client.*", ".*gravitee-repository-gateway-bridge-http-server.*"])
copy_files_into(services_path, bundle_path + "plugins", [".*gravitee-gateway-services-ratelimit.*"])
copy_files_into(connectors_path, bundle_path + "plugins")
os.makedirs(bundle_path + "plugins/ext/repository-jdbc", exist_ok=True)
def prepare_policies(version):
print("==================================")
print("Prepare Policies")
dist_dir = get_dist_dir_name()
policies_dist_path = "%s/%s/gravitee-policies-%s" % (tmp_path, dist_dir, version)
os.makedirs(policies_dist_path, exist_ok=True)
copy_files_into(policies_path, policies_dist_path)
copy_files_into(services_path, policies_dist_path)
def package(version, release_json):
print("==================================")
print("Packaging")
packages = []
exclude_from_full_zip_list = [re.compile(".*graviteeio-policies.*")]
dist_dir = get_dist_dir_name()
full_zip_name = "graviteeio-full-%s" % version
# how to create a symbolic link ?
#if jdbc:
# full_zip_name = "graviteeio-full-jdbc-%s" % version
full_zip_path = "%s/%s/%s.zip" % (tmp_path, dist_dir, full_zip_name)
dirs = [os.path.join("%s/%s/" % (tmp_path, dist_dir), fn) for fn in next(os.walk("%s/%s/" % (tmp_path, dist_dir)))[1]]
# add release.json
jsonfile_name = "release.json"
jsonfile_absname = os.path.join("%s/%s/%s" % (tmp_path, dist_dir, jsonfile_name))
jsonfile = open(jsonfile_absname, "w")
jsonfile.write("%s" % json.dumps(release_json, indent=4))
jsonfile.close()
with zipfile.ZipFile(full_zip_path, "w", zipfile.ZIP_DEFLATED) as full_zip:
print("Create %s" % full_zip_path)
packages.append(full_zip_path)
full_zip.write(jsonfile_absname, jsonfile_name)
for d in dirs:
with zipfile.ZipFile("%s.zip" % d, "w", zipfile.ZIP_DEFLATED) as bundle_zip:
print("Create %s.zip" % d)
packages.append("%s.zip" % d)
dir_abs_path = os.path.abspath(d)
dir_name = os.path.split(dir_abs_path)[1]
for dirname, subdirs, files in os.walk(dir_abs_path):
exclude_from_full_zip = False
for pattern in exclude_from_full_zip_list:
if pattern.match(d):
exclude_from_full_zip = True
break
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(dir_abs_path) - len(dir_name):]
bundle_zip.write(absname, arcname)
if exclude_from_full_zip is False:
full_zip.write(absname, "%s/%s" % (full_zip_name, arcname))
if len(files) == 0:
absname = os.path.abspath(dirname)
arcname = absname[len(dir_abs_path) - len(dir_name):]
bundle_zip.write(absname, arcname)
if exclude_from_full_zip is False:
full_zip.write(absname, "%s/%s" % (full_zip_name, arcname))
return packages
def rename(string):
return string.replace("gravitee", "graviteeio") \
.replace("management-standalone", "management-api") \
.replace("management-webui", "management-ui") \
.replace("portal-webui", "portal-ui") \
.replace("standalone-", "")
def clean_dir_names():
print("==================================")
print("Clean directory names")
dirs = [os.path.join("%s/%s/" % (tmp_path, get_dist_dir_name()), fn) for fn in next(os.walk("%s/%s/" % (tmp_path, get_dist_dir_name())))[1]]
for d i |
import pandas as pd
from sqlalchemy import create_engine
from bcpp_rdb.private_settings import Rdb
class CCC(object):
"""CDC data for close clinical cohort."""
def __init__(self):
self.engine = create_engine('postgresql://{user}:{password}@{host}/{db}'.format(
user=Rdb.user, password=Rdb.password, host=Rdb.host, db=Rdb.name),
connect_args={})
with self.engine.connect() as conn, conn.begin():
self.df_enrolled = pd.read_sql_query(self.sql_enrolled, conn)
def sql_enrolled(self):
"""
* If patient is from BCPP survye, oc_study_id is a BHP identifier.
* ssid is the CDC allocated identifier of format NNN-NNNN.
"""
return """select ssid as cdcid, oc_study_id as subject_identifier,
appt_date f | rom dw.oc_crf_ccc_enrollment"""
def sql_refused(self):
"""
* If patient is from BCPP survye, oc_study_id is a BHP identifier.
* ssid is the CDC allocated identifier of format NNN-NNNN.
"""
return """select ssid as cdcid, oc_study_id as subject_ide | ntifier,
appt_date from dw.oc_crf_ccc_enrollment"""
|
c | lass Information(object):
def __init__(self, pygame):
self.pygame = pygame
self.display_fps = False
def _show_fps(self, clock, screen):
font = self. | pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("fps: {0:.2f}".format(clock.get_fps()), True, (0, 0, 0))
screen.blit(text, [0, 0])
def show_fps(self, clock, screen):
if self.display_fps:
self._show_fps(clock, screen)
def toggle_fps(self):
self.display_fps = not self.display_fps
|
#!/usr/bin/env python
from __future__ import division
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import linalg
import csv
import codecs
import copy
def comparison(trajOne, trajTwo):
segmentOne=np.array(trajOne)
segmentTwo=np.array(trajTwo)
for i in range(2,5):
segm | entOne[:,i]= segmentOne[:,i] - segmentOne[0,i]
segmentTwo[:,i]= segmentTwo[:,i] - segmentTwo[0,i]
dist=0
for i in range(min(len(trajOne), len(trajTwo))):
dist = dist + np.linalg.norm(segmentOne[i,2:]-segmentTwo[i,2:])
return dist
def plotTraj(jointTrajectory):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.tick_params(labelsize=28)
ax.set_xlabel("$\Theta_{1}$ [deg]", size=30)
ax.set_ylabel("$\Theta_{2}$ [deg]", size=30)
ax.set_zl | abel("$\Theta_{3}$ [deg]", size=30)
# ax.plot(jointTrajectory[:,2], jointTrajectory[:,3], jointTrajectory[:,4], lw=2,color='red',label='Human-Guided Random Trajectory')
ax.plot(jointTrajectory[:,2], jointTrajectory[:,3], jointTrajectory[:,4], lw=2,color='red',label='Human-Guided Random Trajectory .')
ax.legend(prop={'size':30})
plt.show()
def plotDistances(trajOne, trajTwo):
segmentOne=np.array(trajOne)
segmentTwo=np.array(trajTwo)
for i in range(2,5):
segmentOne[:,i]= segmentOne[:,i] - segmentOne[0,i]
segmentTwo[:,i]= segmentTwo[:,i] - segmentTwo[0,i]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.tick_params(labelsize=30)
ax.set_xlabel("$\Theta_{1}$ [deg]", size=30)
ax.set_ylabel("$\Theta_{2}$ [deg]", size=30)
ax.set_zlabel("$\Theta_{3}$ [deg]", size=30)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_zticklabels('')
for i in range(len(segmentOne)):
if i==0:
ax.plot([segmentOne[i,2], segmentTwo[i,2]],[ segmentOne[i,3], segmentTwo[i,3]], [segmentOne[i,4], segmentTwo[i,4]], lw=2,color='blue',label='Distances')
else:
ax.plot([segmentOne[i,2], segmentTwo[i,2]],[ segmentOne[i,3], segmentTwo[i,3]], [segmentOne[i,4], segmentTwo[i,4]], lw=2,color='blue')
ax.plot(segmentOne[:,2], segmentOne[:,3], segmentOne[:,4], lw=3,color='red',label='Segment 1')
ax.plot(segmentTwo[:,2], segmentTwo[:,3], segmentTwo[:,4], lw=3,color='green',label='Segment 2')
ax.legend(prop={'size':30})
plt.show()
def plotSingle(trajOne):
segmentOne=np.array(trajOne)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_zticklabels('')
ax.plot(segmentOne[:,2], segmentOne[:,3], segmentOne[:,4], lw=5,color='red',label='Segment 1')
plt.show()
def plot2DXiP():
x = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170]
y = [94, 86, 72, 58, 46, 41, 38, 31, 27, 22, 13, 10, 8, 6, 5, 3, 2, 1]
plt.plot(x, y, linewidth=4)
plt.ylim([0,94])
plt.xlim([0,170])
plt.xlabel(u"\u03BE", fontsize=30)
plt.ylabel('Number of Primitives', fontsize=30)
plt.tick_params(labelsize=25)
#plt.title('Relation between ' + u'\u03BE' + ' and primitives', fontsize=30)
plt.grid(True)
plt.show()
def saveToCSV(primitives, tau, xi):
fileName='db_var/db_'+ 'tau' +str(tau) + '_xi'+ str(xi)
# to write in CSV
with open(fileName, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_MINIMAL)
for k in range(len(primitives)):
wr.writerow(primitives[k])
# to replace '"'
contents = codecs.open(fileName, encoding='utf-8').read()
s = contents.replace('"', '')
with open(fileName, "wb") as f:
f.write(s.encode("UTF-8"))
def main():
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42 # type 42 fonts (truetype) for IEEE papercept system
#325, 350
tau=0.5 # length in seconds
xi=50
jointTrajectory= np.loadtxt('../../main/app/record/recordedTeo/state/data.log')
x=list(jointTrajectory)
plotTraj(jointTrajectory)
numberOfSlices=0
realDataMatrix=[]
slicesList=[]
temp0=np.array(x).astype('float')
#slices by time
newTimeValue= np.ceil(temp0[-1][1] - temp0[0][1])
numberOfSlices = int(newTimeValue/tau)
X = np.array(x)
## OVERLOAD
for h in range(numberOfSlices):
initial=(X.shape[0]/numberOfSlices)*h
final=(X.shape[0]/numberOfSlices)*(h+1)
if X[initial:final].shape[0] == 0:
print 'NO POINTS IN THIS SET. PROBABLY NUMBEROFPOINTS < NUMBEROFSLICES'
else:
slicesList.append(X[initial:final].tolist())
plotDistances(slicesList[20],slicesList[50])
primitives=[]
primitives.append(slicesList[0])
for i in range(numberOfSlices):
for k in range(len(primitives)):
jay = comparison(slicesList[i],primitives[k])
if jay < xi:
#print 'Similar to primitive', k,'in database. Jay', jay
jay=-1
break
if jay !=-1:
#print 'Adding segment', i,'to database'
primitives.append(slicesList[i])
print 'xi:', xi
print 'tau:', tau
print 'number of primitives:', len(primitives)
# making a copy to be modified
relativePrims=copy.deepcopy(primitives)
# making them relative
for a in range(len(primitives)):
for b in range(len(primitives[a])):
for c in range(2,5):
relativePrims[a][b][c]= primitives[a][b][c] - primitives[a][b-1][c]
if b==0:
relativePrims[a][b][c]= primitives[a][b][c] - primitives[a][0][c]
#saveToCSV(relativePrims, tau, xi)
if __name__ == '__main__':
main()
|
import os
import sys
import numpy as np
import math
def findBinIndexFor(aFloatValue, binsList):
#print "findBinIndexFor: %s" % aFloatValue
returnIndex = -1
for i in range(len(binsList)):
thisBin = binsList[i]
if (aFloatValue >= thisBin[0]) and (aFloatValue < thisBin[1]):
returnIndex = i
break
return returnIndex
def compute_joint_prob(joint_list, vals1, vals2, bins1=None, bins2=None, asFreq=False):
returnDict = {}
for rec in joint_list:
val1 = rec[0]
val2 = rec[1]
#Find name by which first val should appear
dictName1 = val1
if bins1 is not None:
dictName1 = findBinIndexFor(val1, bins1)
#Find name by which second val should appear
dictName2 = val2
if bins2 is not None:
dictName2 = findBinIndexFor(val2, bins2)
#If first name is not present in dict,
#then initialize it
if dictName1 not in returnDict:
returnDict[dictName1] = {}
for val in vals2:
#Determine name under which
#y-values should appear (i.e. as bin names
#or as given names)
asDictName = val
if bins2 is not None:
asDictName = findBinIndexFor(val, bins2)
returnDict[dictName1][asDictName] = 0
returnDict[dictName1][dictName2]+=1
if not asFreq:
#Normalize values
for key in returnDict:
for secondKey in returnDict[key]:
returnDict[key][secondKey] = float(returnDict[key][secondKey]) / len(joint_list)
return returnDict
def getXForFixedY(joint_prob_dist, yVal):
returnList = []
for key in joint_prob_dist:
returnList.append( joint_prob_dist[key][yVal])
return returnList
def compute_h(floatsList):
returnFloat = None
acc = 0
for f in floatsList:
if f != 0:
acc = acc - f * math.log(f, 2)
returnFloat = acc
return returnFloat
# Computes Kullback-Leibler divergence between
# P(X,Y) and P(X)
def conditional_entropy(joint_prob_dist, xVals, yVals):
returnFloat = None
h_acc = 0
marginal_y_dist = getYMarginalDist(joint_prob_dist)
for x in xVals:
for y in yVals:
joint_xy = 0
marginal_y = 0
if not x in joint_prob_dist or y not in joint_prob_dist[x]:
joint_xy = 0
else:
joint_xy = joint_prob_dist[x][y]
if not y in marginal_y_dist:
marginal_y = 0
else:
marginal_y = marginal_y_dist[y] |
if joint_xy!=0 and marginal_y!=0:
h_acc-=joint_xy*math.log(joint_xy/marginal_y, 2)
# for yVal in yVals:
# new_xDist = getXForFixedY(joint_prob_dist, yVal)
# h_yVal = compute_h(new_xDist)
# p_yVal = reduce(lambda x, y: x+y, new_xDist)
# h_acc+=p_yVal * h_yVal
returnFloat = h_acc
r | eturn returnFloat
def getYMarginalDist(joint_prob_dist):
returnDict = {}
for xKey in joint_prob_dist:
for yKey in joint_prob_dist[xKey]:
if not yKey in returnDict:
returnDict[yKey] = 0
returnDict[yKey]+=joint_prob_dist[xKey][yKey]
return returnDict
def getXMarginalDist(joint_prob_dist):
returnDict = {}
for key in joint_prob_dist:
yVals = joint_prob_dist[key]
marginalVal = reduce(lambda x,y: x+y, [yVals[e] for e in yVals])
returnDict[key] = marginalVal
return returnDict
def entropy_loss(joint_prob_dist, xVals, yVals):
returnFloat = None
priorsDict = getXMarginalDist(joint_prob_dist)
priors = priorsDict.values()
h_prior = compute_h(priors)
h_conditional = conditional_entropy(joint_prob_dist, xVals, yVals)
returnFloat = h_prior - h_conditional
return returnFloat
|
onfig) as sess:
sess.run(self.init) # sess.run(tf.initialize_all_variables())
dynStats = DynStats(validation=valid_data is not None)
for epoch in range(epochs):
train_error, runTime = getRunTime(
lambda:
self.trainEpoch(
sess=sess,
data_provider=train_data,
extraFeedDict={
self.is_training: True,
}
)
)
if np.isnan(train_error):
raise Exception('do something with your learning rate because it is extremely high')
if valid_data is None:
if verbose:
# print 'EndEpoch%02d(%.3f secs):err(train)=%.4f,acc(train)=%.2f,err(valid)=%.2f,acc(valid)=%.2f, ' % \
# (epoch + 1, runTime, train_error, train_accuracy, valid_error, valid_accuracy)
print 'End Epoch %02d (%.3f secs): err(train) = %.6f' % (
epoch + 1, runTime, train_error)
dynStats.gatherStats(train_error=train_error)
else:
# if (epoch + 1) % 1 == 0:
valid_error = self.validateEpoch(
sess=sess,
data_provider=valid_data,
extraFeedDict={self.is_training: False},
)
if np.isnan(valid_error):
raise Exception('do something with your learning rate because it is extremely high')
if verbose:
print 'End Epoch %02d (%.3f secs): err(train) = %.6f, err(valid)=%.6f' % (
epoch + 1, runTime, train_error, valid_error)
dynStats.gatherStats(train_error=train_error, valid_error=valid_error)
preds_dict, test_error, twod_dict = self.getPredictions(batch_size=batch_size, data_provider=preds_dp,
sess=sess) if preds_gather_enabled else (
None, None, None)
if verbose:
if preds_gather_enabled:
print "total test error: {}".format(test_error)
print
if preds_gather_enabled:
return dynStats, self.trimPredsDict(preds_dict,
data_provider=preds_dp), preds_dp.get_targets_dict_trimmed(), twod_dict
else:
return dynStats
def getGraph(self,
batch_size,
enc_num_units,
hidden_enc_num_units,
hidden_enc_dim,
hidden_dec_dim,
hidden_dec_num_units,
dec_num_units,
ts_len,
learning_rate=ADAM_DEFAULT_LEARNING_RATE, # default of Adam is 1e-3
verbose=True):
# momentum = 0.5
# tf.reset_default_graph() #kind of redundant statement
graph = tf.Graph() # create new graph
with graph.as_default():
with tf.name_scope('parameters'):
self.is_training = tf.placeholder(tf.bool, name="is_training")
with tf.name_scope('data'):
inputs = tf.placeholder(dtype=self.dtype,
shape=(batch_size, ts_len, self.INPUT_FEATURE_LEN), name="inputs")
targets = inputs[:, :, self.TS_INPUT_IND]
if verbose:
print "targets"
print targets
print
decoder_extra_inputs = tf.placeholder(dtype=self.dtype,
shape=(batch_size, ts_len, self.DATE_FEATURE_LEN),
name="decoder_extra_inputs")
self.decoder_extra_inputs = decoder_extra_inputs
sequence_lens = tf.placeholder(tf.int32, shape=(batch_size,), name="sequence_lens_placeholder")
self.sequence_lens = sequence_lens
sequence_len_mask = tf.placeholder(tf.int32, shape=(batch_size, ts_len),
name="sequence_len_mask_placeholder")
self.sequence_len_mask = sequence_len_mask
with tf.name_scope('encoder_rnn_layer'):
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.GRUCell(num_units=enc_num_units, activation=self.DEFAULT_ACTIVATION_RNN()),
inputs=inputs,
initial_state=None,
dtype=self.dtype,
sequence_length=sequence_lens
)
if verbose:
print encoder_outputs
print encoder_final_state
print
with tf.variable_scope('hidden_encoder_rnn_layer'):
hidden_encoder_outputs, hidden_encoder_final_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.GRUCell(num_units=hidden_enc_num_units,
activation=self.DEFAULT_ACTIVATION_RNN()),
inputs=encoder_outputs,
initial_state=encoder_final_state,
dtype=self.dtype,
sequence_length=sequence_lens
)
if verbose:
print hidden_encoder_outputs
print hidden_encoder_final_state
print
with tf.name_scope('encoder_state_out_hidden_process'):
hidden_enc_layer = fully_connected_layer_with_batch_norm(fcId='encoder_state_out_hidden_process',
inputs=hidden_encoder_final_state,
input_dim=hidden_enc_num_units,
output_dim=hidden_enc_dim,
is_training=self.is_training,
nonlinearity=tf.nn.elu)
if verbose:
| print hidden_enc_layer
print
with tf.name_scope('encoder_state_out_process'):
# don't really care for encoder outputs, but only for its final state
# the encoder consumes all the input to get a sense of the trend of price history
# fully_connected_layer_with_ | batch_norm_and_l2(fcId='encoder_state_out_process',
# inputs=encoder_final_state,
# input_dim=enc_num_units, output_dim=self.DIM_REDUCTION,
# is_training=self.is_training, lamda2=0)
ww_enc_out = generate_weights_var(ww_id='encoder_state_out_process', input_dim=hidden_enc_dim,
output_dim=self.DIM_REDUCTION,
dtype=self.dtype)
nonlinearity = tf.nn.elu
avoidDeadNeurons = 0.1 if nonlinearity == tf.nn.relu else 0. # prevent zero when relu
bb_enc_out = tf.Variable(avoidDeadNeurons * tf.ones([self.DIM_REDUCTION]),
name='biases_{}'.format('encoder_state_out_process'))
# out_affine = tf.matmul(inputs, weights) + biases
affine_enc_out = tf.add(tf.matmul(hidden_enc_layer, ww_enc_out), bb_enc_out)
self.twod = affine_enc_out ######### HERE WE GET THE TWO DIM REPRESENTATION OF OUR TIMESERIES ##########
batchNorm = batchNormWrapper('encoder_state_out_process', affine_enc_out, self.is_training)
nonlinear_enc_out = nonlin |
import sys
sys.path.insert(1,"../../../")
import h2o, tests
def deepLearningDemo():
# Training data
train_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv"))
train_data = train_data.drop('Site')
train_data['Angaus'] = train_data['Angaus'].asfactor()
print train_data.describe()
train_data.head()
# Testing data
test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv"))
test_data['Angaus'] = test_data['Angaus'].asfactor()
print test_data.describe()
test_data.head()
# Run GBM
gbm = h2o.gbm(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
| validation_y= test_data ['Angaus'],
ntrees=100,
distribution="bernoulli")
gbm.show()
# Run DeepLearning
dl = h2o.deeplearning(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
validation_y= test_data ['Angaus'],
loss = 'CrossEntropy',
epochs | = 1000,
hidden = [20, 20, 20])
dl.show()
if __name__ == "__main__":
tests.run_test(sys.argv, deepLearningDemo)
|
vcs links, regression test for issue #798.
"""
result = script.pip(
'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git'
)
assert (
Path('scratch') / 'pip-test-package-0.1.1.zip'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
def test_only_binary_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_no_deps_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--no-deps',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--no-binary=fake',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_download_specify_platform(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_9_x86_64',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl')
fake_wheel(data, 'fake-2.0-py2.py3-none-linux_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_10_x86_64',
'fake'
)
assert (
Path('scratch') /
'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl'
in result.files_created
)
# OSX platform wheels are not backward-compatible.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_8_x86_64',
'fake',
expect_error=True,
)
# No linux wheel provided for this version.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==1',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==2'
)
assert (
Path('scratch') / 'fake-2.0-py2.py3-none-linux_x86_64.whl'
in result.files_created
)
def test_download_platform_manylinux(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-manylinux1_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'manylinux1_x86_64',
'fake',
)
assert (
Path('scratch') /
'fake-1.0-py2.py3-none-manylinux1_x86_64.whl'
in result.files_created
)
# When specifying the platform, manylinux1 needs to be the
# explicit platform--it won't ever be added to the compatible
# tags.
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-linux_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
def test_download_specify_python_version(script, data):
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links | ,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
| 'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '27',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '33',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2-none-any.whl')
fake_wheel(data, 'fake-2.0-py3-none-any.whl')
# No py3 provided for version 1.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake==1.0',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2-none-any.whl'
in result.files_created |
from bottle import route, default_app
app = default_app()
data = {
"id": 78874,
"seriesName": "Firefly",
"aliases": [
"Serenity"
],
"banner": "graphical/78874-g3.jpg",
"seriesId": "7097",
"status": "Ended",
"firstAired": "2002-09-20",
"network": "FOX (US)",
"networkId": "",
"runtime": "45",
"genre": [
"D | rama",
"Science-Fiction"
],
"overview": "In the far-distant future, Captain Malcolm \"Mal\" Reynolds is a renegade former brown-coat sergeant, now turned smuggler & rogue, "
"who is the commander of a small spacecraft, with a loyal hand-picked crew made up of the first mate, Zoe Warren; the pilot Hoban \"Wash\" Washburn; "
"t | he gung-ho grunt Jayne Cobb; the engineer Kaylee Frye; the fugitives Dr. Simon Tam and his psychic sister River. "
"Together, they travel the far reaches of space in search of food, money, and anything to live on.",
"lastUpdated": 1486759680,
"airsDayOfWeek": "",
"airsTime": "",
"rating": "TV-14",
"imdbId": "tt0303461",
"zap2itId": "EP00524463",
"added": "",
"addedBy": None,
"siteRating": 9.5,
"siteRatingCount": 472,
}
@route('/api')
def api():
return data
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 201 | 4-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from . import const_settings as setting
from . import const_localize as localize
from . import const_sort_methods as sort_method
from . import const_content_types as content_type
from . import const_paths as paths
__all__ = ['setting', | 'localize', 'sort_method', 'content_type', 'paths']
|
l plus 2" or channel == "canal+ 2 hd":
channel = "canal+ 2"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 1 ...30" or channel == "canal+ 1... 30":
channel = "canal+ 1 ...30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ series":
channel = "canal+ series"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "goltv" or channel == "golt":
channel = "gol televisión"
epg_channel = epg_formulatv(params, channel)
| return epg_channel
elif channel == "40 TV":
channel = "40 tv"
epg_channel = epg | _formulatv(params, channel)
return epg_channel
elif channel == "canal sur" or channel == "andalucia tv":
channel = "canal sur"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn white":
channel = "axn white"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "xtrm":
channel = "xtrm"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "calle 13" or channel == "calle 13 hd":
channel = "calle 13"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "amc" or channel == "amc españa":
channel = "amc (españa)"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "barça tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "andalucía tv" or channel == "andalucia tv":
channel = "andalucia-tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "canal barca"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 30" or channel == "canal+ ...30" or channel == "canal plus 30":
channel = "canal+ 1... 30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ accion" or channel == "canal+ acción" or channel=="canal plus accion":
channel = "canal+ acción"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ comedia" or channel == "canal plus comedia":
channel = "canal+ comedia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ decine" or channel == "canal plus decine":
channel = "canal+ dcine"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ deporte" or channel == "canal plus deporte":
channel = "canal+ deporte"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ futbol" or channel == "canal+ fútbol" or channel == "canal plus fútbol" or channel == "canal plus futbol":
channel = "canal+ fútbol"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ liga":
channel = "canal+ liga"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ golf" or channel == "canal plus golf":
channel = "golf+"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ toros" or channel == "canal plus toros":
channel = "canal+ toros"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ extra" or channel=="canal+ xtra":
channel = "canal+ xtra"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal 33" or channel == "canal33":
channel = "canal33"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal cocina":
channel = "canal cocina"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cartoon network" or channel == "cartoon network hd":
channel = "cartoon network"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "castilla-la mancha televisión" or channel == "castilla-la mancha tv":
channel = "castilla-la-mancha"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "caza y pesca":
channel = "caza-y-pesca"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "clan" or channel == "clan tve 50" or channel == "clan tve":
channel = "clan tve"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nickelodeon":
channel = "nickelodeon"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "boing":
channel = "boing"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cnbc":
channel = "cnbc"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cnn-international" or channel == "cnn int":
channel = "cnn international"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cosmopolitan" or channel == "cosmopolitan tv":
channel = "cosmopolitan"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "a&e" or channel == "a&e españa":
channel = "a&e españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ dcine" or channel == "canal plus dcine":
channel = "dcine espanol"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "decasa":
channel = "decasa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "discovery channel":
channel = "discovery channel"
epg_channel = epg_formulatv(params, channel)
elif channel == "national geographic":
channel = "national geographic"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "discovery max":
channel = "discovery max"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney channel":
channel = "disney channel"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disn |
ctType, values.W_Object, procedure,
values.W_Object, values.W_Object], simple=False, extra_info=True)
@jit.unroll_safe
def do_checked_procedure_check_and_extract(type, v, proc, v1, v2, env, cont, calling_app):
from pycket.interpreter import check_one_val, return_value
if isinstance(v, values_struct.W_RootStruct):
struct_type = jit.promote(v.struct_type())
if type.has_subtype(struct_type):
offset = struct_type.get_offset(type)
assert offset != -1
return v.ref_with_extra_info(offset, calling_app, env,
receive_first_field(proc, v, v1, v2, calling_app, env, cont))
return proc.call([v, v1, v2], env, cont)
################################################################
# printing
@expose("system-library-subpath", [default(values.W_Object, values.w_false)])
def sys_lib_subpath(mode):
# Pycket is 64bit only a.t.m.
if w_system_sym == w_windows_sym:
return values.W_Path(r"win32\\x86_64")
elif w_system_sym == w_macosx_sym:
return values.W_Path("x86_64-macosx")
else:
# FIXME: pretend all unicies are linux for now
return values.W_Path("x86_64-linux")
@expose("primitive-closure?", [values.W_Object])
def prim_clos(v):
return values.w_false
################################################################
# built-in struct types
def define_struct(name, w_super=values.w_null, fields=[]):
immutables = range(len(fields))
symname = values.W_Symbol.make(name)
w_struct_type = values_struct.W_StructType.make_simple(
w_name=symname,
w_super_type=w_super,
init_field_count=len(fields),
auto_field_count=0,
immutables=immutables)
expose_val("struct:" + name, w_struct_type)
expose_val(name, w_struct_type.constructor)
# this is almost always also provided
expose_val("make-" + name, w_struct_type.constructor)
expose_val(name + "?", w_struct_type.predicate)
struct_acc = w_struct_type.accessor
for field, field_name in enumerate(fields):
w_name = values.W_Symbol.make(field_name)
acc = values_struct.W_StructFieldAccessor(struct_acc, field, w_name)
expose_val(name + "-" + field_name, acc)
return w_struct_type
exn = \
define_struct("exn", values.w_null, ["message", "continuation-marks"])
exn_fail = \
define_struct("exn:fail", exn)
exn_fail_contract = \
define_struct("exn:fail:contract", exn_fail)
exn_fail_contract_arity = \
define_struct("exn:fail:contract:arity", exn_fail)
exn_fail_contract_divide_by_zero = \
define_struct("exn:fail:contract:divide-by-zero", exn_fail)
exn_fail_contract_non_fixnum_result = \
define_struct("exn:fail:contract:non-fixnum-result", exn_fail)
exn_fail_contract_continuation = \
define_struct("exn:fail:contract:continuation", exn_fail)
exn_fail_contract_variable = \
define_struct("exn:fail:contract:variable", exn_fail, ["id"])
exn_fail_syntax = \
define_struct("exn:fail:syntax", exn_fail, ["exprs"])
exn_fail_syntax_unbound = \
define_struct("exn:fail:syntax:unbound", exn_fail_syntax)
exn_fail_syntax_missing_module = \
define_struct("exn:fail:syntax:missing-module", exn_fail_syntax, ["path"])
exn_fail_read = \
define_struct("exn:fail:read", exn_fail, ["srclocs"])
exn_fail_read_eof = \
define_struct("exn:fail:read:eof", exn_fail_read)
exn_fail_read_non_char = \
define_struct("exn:fail:read:non-char", exn_fail_read)
exn_fail_fs = \
define_struct("exn:fail:filesystem", exn_fail)
exn_fail_fs_exists = \
define_struct("exn:fail:filesystem:exists", exn_fail_fs)
exn_fail_fs_version = \
define_struct("exn:fail:filesystem:version", exn_fail_fs)
exn_fail_fs_errno = \
define_struct("exn:fail:filesystem:errno", exn_fail_fs, ["errno"])
exn_fail_fs_missing_module = \
define_struct("exn:fail:filesystem:missing-module", exn_fail_fs, ["path"])
exn_fail_network = \
define_struct("exn:fail:network", exn_fail)
exn_fail_network_errno = \
define_struct("exn:fail:network:errno", exn_fail_network, ["errno"])
exn_fail_out_of_memory = \
define_struct("exn:fail:out-of-memory", exn_fail)
exn_fail_unsupported = \
define_struct("exn:fail:unsupported", exn_fail)
exn_fail_user = \
define_struct("exn:fail:user", exn_fail)
exn_break = \
define_struct("exn:break", exn)
exn_break_hang_up = \
define_struct("exn:break:hang-up", exn_break)
exn_break_terminate = \
define_struct("exn:break:terminate", exn_break)
srcloc = define_struct("srcloc",
fields=["source", "line", "column", "position", "span"])
date_struct = define_struct("date", fields=["second",
"minute",
"hour",
"day",
"month",
"year",
"week-day",
"year-day",
"dst?"
"time-zone-offset"])
date_star_struct = define_struct("date*", date_struct,
fields=["nanosecond", "time-zone-name"])
arity_at_least = define_struct("arity-at-least", values.w_null, ["value"])
for args in [ ("char-symbolic?",),
("char-graphic?",),
("char-blank?",),
("char-iso-control?",),
("char-punctuation?",),
("char-upper-case?",),
("char-title-case?",),
("char-lower-case?",),
]:
make_dummy_char_pred(*args)
for args in [ ("subprocess?",),
("file-stream-port?",),
("terminal-port?",),
("byte-ready?",),
("char-ready?",),
("handle-evt?",),
("thread?",),
("thread-running?",),
("thread-dead?",),
("semaphore-try-wait?",),
("link-exists?",),
("chaperone-channel",),
("impersonate-channel",),
]:
define_nyi(*args)
@expose("unsafe-make-place-local", [values.W_Object])
def unsafe_make_place_local(v):
return values.W_MBox(v)
@expose("unsafe-place-local-ref", [values.W_MBox], simple=False)
def unsafe_make_place_local(p, env, cont):
return p.unbox(env, cont)
@expose("unsafe-place-local-set | !", [values.W_MBox, values.W_Object], simple=False)
def unsafe_make_place_local(p, v, env, cont):
return p.set_box(v, env, cont)
@expose("set!-transformer?", [values.W_Object], only_old=True)
def set_bang_transformer(v):
if isinstance(v, values.W_AssignmentTransformer):
return values.w_true
elif isinstance(v, values_struct.W_RootStruct):
w_property = v.struct_type().read_property(
values_struct.w_prop_set_bang_tr | ansformer)
return values.W_Bool.make(w_property is not None)
else:
return values.w_false
@expose("object-name", [values.W_Object])
def object_name(v):
if isinstance(v, values.W_Prim):
return v.name
elif isinstance(v, values_regex.W_AnyRegexp) or isinstance(v, values.W_Port):
return v.obj_name()
return values_string.W_String.fromstr_utf8(v.tostring()) # XXX really?
@expose("find-main-config", [])
def find_main_config():
return values.w_false
@expose("version", [])
def version():
from pycket.env import w_version
version = w_version.get_version()
if version == '':
version = "old-pycket"
return values_string.W_String.fromascii("unknown version" if version is None else version)
@continuation
def sem_post_cont(sem, env, cont, vals):
sem.post()
from pycket.interpreter import return_multi_vals
return return_multi_vals(vals, env, cont)
@expose("call-with-semaphore", simple=False, extra_info=True)
def call_with_sem(args, env, cont, extra_call_info):
if len(args) < 2:
raise SchemeException("error call-with-semaphore")
sem = args[0]
f = args[1]
if len(args) == 2:
new_args = []
fail = None
else:
|
import sys
from resources.datatables import Options
from resources.datatables import StateStatus
def addPlanetSpawns(core, planet):
stcSvc = core.staticService
objSvc = core.objectService
#junkdealer
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5694), float(6.5), float(4182), float(0.707), float(-0.707))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5717), float(6.5), float(4159), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5086), float(6), float(4142), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5147), float(6.5), float(4158), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5114), float(6.5), float(4161), float(0.71), float(-0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5222), float(6), float(4217), float(0.71), float(-0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5127), float(6), float(4239), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5761), f | loat(6.6), float(4234), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5475), float(6), float(4105), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-4999), float(6), float(4119), float(0.71), float(0.71))
s | tcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5883), float(6), float(4214), float(0.71), float(0.71))
return
|
#!/usr/bin/env python
#
# Copyright 2016 timercrack
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. | You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under | the License.
def str_to_number(s):
try:
if not isinstance(s, str):
return s
return int(s)
except ValueError:
return float(s)
|
'VolumeId': vol_id,
'DeleteOnTermination': True
}
}
]
)
if instance_running:
client.start_instances(InstanceIds=[instance_id])
if self.verbose:
self.log.debug(
"Deleting unencrypted volumes for: %s" % instance_id)
for v in vol_set:
client.delete_volume(VolumeId=v['VolumeId'])
# Clean-up transient tags on newly created encrypted volume.
for v, vol_id in paired:
client.delete_tags(
Resources=[vol_id],
Tags=[
{'Key': 'maid-crypt-remediation'},
{'Key': 'maid-origin-volume'},
{'Key': 'maid-instance-device'}
]
)
def stop_instance(self, client, instance_id):
instance_state = self.instance_map[instance_id]['State']['Name']
if instance_state in ('shutting-down', 'terminated'):
self.log.debug('Skipping terminating instance: %s' % instance_id)
return
elif instance_state in ('running',):
client.stop_instances(InstanceIds=[instance_id])
self.wait_on_resource(client, instance_id=instance_id)
return True
return False
def create_encrypted_volume(self, ec2, v, key_id, instance_id):
# Create a current snapshot
results = ec2.create_snapshot(
VolumeId=v['VolumeId'],
Description="maid transient snapshot for encryption",)
transient_snapshots = [results['SnapshotId']]
ec2.create_tags(
Resources=[results['SnapshotId']],
Tags=[
{'Key': 'maid-crypto-remediation', 'Value': 'true'}])
self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
# Create encrypted snapshot from current
results = ec2.copy_snapshot(
SourceSnapshotId=results['SnapshotId'],
SourceRegion=v['AvailabilityZone'][:-1],
Description='maid transient snapshot for encryption',
Encrypted=True,
KmsKeyId=key_id)
transient_snapshots.append(results['SnapshotId'])
ec2.create_tags(
Resources=[results['SnapshotId']],
Tags=[
{'Key': 'maid-crypto-remediation', 'Value': 'true'}
])
self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
# Create encrypted volume, also tag so we can recover
results = ec2.create_volume(
Size=v['Size'],
VolumeType=v['VolumeType'],
SnapshotId=results['SnapshotId'],
AvailabilityZone=v['AvailabilityZone'],
Encrypted=True)
ec2.create_tags(
Resources=[results['VolumeId']],
Tags=[
{'Key': 'maid-crypt-remediation', 'Value': instance_id},
{'Key': 'maid-origin-volume', 'Value': v['VolumeId']},
{'Key': 'maid-instance-device',
'Value': v['Attachments'][0]['Device']}])
# Wait on encrypted volume creation
self.wait_on_resource(ec2, volume_id=results['VolumeId'])
# Delete transient snapshots
for sid in transient_snapshots:
ec2.delete_snapshot(SnapshotId=sid)
return results['VolumeId']
def get_encryption_key(self):
kms = local_session(self.manager.session_factory).client('kms')
key_alias = self.data.get('key')
result = kms.describe_key(KeyId=key_alias)
key_id = result['KeyMetadata']['KeyId']
return key_id
def wait_on_resource(self, *args, **kw):
# Sigh this is dirty, but failure in the middle of our workflow
# due to overly long resource creation is complex to unwind,
# with multi-volume instances. Wait up to three times (actual
# wait time is a per resource type configuration.
# Note we wait for all resource creation before attempting to
# patch an instance, so even on resource creation failure, the
# instance is not modified
try:
return self._wait_on_resource(*args, **kw)
except Exception:
try:
return self._wait_on_resource(*args, **kw)
except Exception:
return self._wait_on_resource(*args, **kw)
def _wait_on_resource(
self, client, snapshot_id=None, volume_id=None, instance_id=None):
# boto client waiters poll every 15 seconds up to a max 600s (5m)
if snapshot_id:
if self.verbose:
self.log.debug(
"Waiting on snapshot completion %s" % snapshot_id)
waiter = client.get_waiter('snapshot_completed')
waiter.wait(SnapshotIds=[snapshot_id])
if self.verbose:
self.log.debug("Snapshot: %s completed" % snapshot_id)
elif volume_id:
if self.verbose:
self.log.debug("Waiting on volume creation %s" % volume_id)
waiter = client.get_waiter('volume_available')
waiter.wait(VolumeIds=[volume_id])
if self.verbose:
self.log.debug("Volume: %s create | d" % volume_id)
elif | instance_id:
if self.verbose:
self.log.debug("Waiting on instance stop")
waiter = client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=[instance_id])
if self.verbose:
self.log.debug("Instance: %s stopped" % instance_id)
@EBS.action_registry.register('snapshot')
class CreateSnapshot(BaseAction):
"""Snapshot an EBS volume.
Tags may be optionally added to the snapshot during creation.
- `copy-volume-tags` copies all the tags from the specified
volume to the corresponding snapshot.
- `copy-tags` copies the listed tags from each volume
to the snapshot. This is mutually exclusive with
`copy-volume-tags`.
- `tags` allows new tags to be added to each snapshot. If
no tags are specified, then the tag `custodian_snapshot`
is added.
The default behavior is `copy-volume-tags: true`.
:example:
.. code-block:: yaml
policies:
- name: snapshot-volumes
resource: ebs
filters:
- Attachments: []
- State: available
actions:
- type: snapshot
copy-tags:
- Name
tags:
custodian_snapshot: True
"""
schema = type_schema(
'snapshot',
**{'copy-tags': {'type': 'array', 'items': {'type': 'string'}},
'copy-volume-tags': {'type': 'boolean'},
'tags': {'type': 'object'}})
permissions = ('ec2:CreateSnapshot', 'ec2:CreateTags',)
def validate(self):
if self.data.get('copy-tags') and 'copy-volume-tags' in self.data:
raise PolicyValidationError(
"Can specify copy-tags or copy-volume-tags, not both")
def process(self, volumes):
client = local_session(self.manager.session_factory).client('ec2')
retry = get_retry(['Throttled'], max_attempts=5)
for vol in volumes:
vol_id = vol['VolumeId']
tags = [{
'ResourceType': 'snapshot',
'Tags': self.get_snapshot_tags(vol)
}]
retry(self.process_volume, client=client, volume=vol_id, tags=tags)
def process_volume(self, client, volume, tags):
try:
client.create_snapshot(VolumeId=volume, TagSpecifications=tags)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidVolume.NotFound':
return
raise
def get_snapshot_tags(self, resource):
user_tags = self.data.get('tags', {}) or {'custodian_snapshot': ''}
copy_tags = self.data.get('copy-tags', []) or self.data.get('copy-volume-tags', True)
return coalesce_copy_user_tag |
from django_comments.forms import CommentForm
from bogofil | ter.models import BogofilterComment
import time
class BogofilterCommentForm(CommentForm):
def get_comment_model(self):
retu | rn BogofilterComment
|
import subprocess
import pynotify
import time
def notify_with_subprocess(title, message):
subprocess.Popen(['notify-send', title, message])
return
def notify_with_pynotify(title, message):
| pynotify.init("Test")
notice = pynotify.Notification(title, message)
notice.show()
return
def update_with_pynotify():
pynotify.init("app_name")
n = pynotify.Notification("", "message A", icon='some_icon')
n.set_urgency(pynotify.URGENCY_CRITICAL)
n.set_timeout(10)
n.show()
n.update("","message B")
n.show()
def callback_function(notification | =None, action=None, data=None):
print "It worked!"
pynotify.init("app_name")
n = pynotify.Notification("Title", "body")
n.set_urgency(pynotify.URGENCY_NORMAL)
n.set_timeout(100)
n.show()
#n.add_action("clicked","Button text", callback_function, None)
#n.update("Notification", "Update for you")
#n.show()
#update_with_pynotify()
|
#!/usr/bin/env python
# Creates and saves a JSON file to update the D3.js graphs
import MySQLdb
import MySQLdb.cursors
import json
import Reference as r
import logging
def CreateSentimentIndex(NegativeWords, PositiveWords, TotalWords):
''' Creates a sentiment value for the word counts'''
if TotalWords != 0:
Sentiment = ((PositiveWords - NegativeWords)/float(TotalWords))
return Sentiment
def CreateJsonData(QueryResults):
''' Creates a list of dictionaries containing the dates and sentiment indexes'''
Output = []
for Row in QueryResults:
RowDate = Row['DateTime'].strftime('%Y-%m-%d %H:%M:%S')
RowSentime | nt = CreateSentimentIndex(Row['Negative'], Row['Positive'], Row['TotalWords'])
Output.append({"date" : RowDate, "index" : RowSentiment})
return Output
def Outpu | tJsonFile(InputDictionary):
'''Saves a dictionary to an output file in a JSON format'''
JsonOutput = json.dumps(InputDictionary)
OutputFileName = 'json/twittermetrics_sentiment.js'
FileOutput = open(OutputFileName,'w')
print >> FileOutput, JsonOutput
return True
def CreateJsonFile():
'''Extracts data from the database and saves a JSON file to the server'''
FN_NAME = "CreateJsonFile"
dbDict = MySQLdb.connect(
host=r.DB_HOST,
user=r.DB_USER,
passwd=r.DB_PASSWORD,
db=r.DB_NAME,
cursorclass=MySQLdb.cursors.DictCursor
)
curDict = dbDict.cursor()
Query = "SELECT " + r.KR_FIELD_TOTALWORDS + ", " + r.KR_FIELD_POSITIVE + ", " + r.KR_FIELD_NEGATIVE + ", " + r.KR_FIELD_DATETIME + " FROM " + r.DB_TABLE_KEYWORDSRESULTS + ";"
logging.debug(FN_NAME, Query)
curDict.execute(Query)
QueryResults = curDict.fetchall()
Output = CreateJsonData(QueryResults)
ProcessResult = OutputJsonFile(Output)
logging.info('%s - JSON file created and saved to server with result %s', FN_NAME, ProcessResult)
dbDict.close
return ProcessResult
|
#
# DBus interface for payload Repo files image source.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a co | py o | f the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.interface import dbus_interface
from pyanaconda.modules.common.constants.interfaces import PAYLOAD_SOURCE_REPO_FILES
from pyanaconda.modules.payloads.source.source_base_interface import PayloadSourceBaseInterface
@dbus_interface(PAYLOAD_SOURCE_REPO_FILES.interface_name)
class RepoFilesSourceInterface(PayloadSourceBaseInterface):
"""Interface for the payload Repo files image source."""
pass
|
import pandas as pd
import numpy as np
import sklearn.preprocessing
from sklearn.linear_model import LinearRegression, LogisticRegression
FILENAME = 'BrainMets.xlsx'
MONTHS_TO_LIVE = 9
N_TRAIN = 250
def categorical_indices(values):
"""
When we have a categorical feature like 'cancer type', we want to transform its unique values
to indices in some range [0, ..., n-1] where n is the number of categories
"""
unique = values.unique()
indices = np.zeros(len(values), dtype=int)
for (i, v) in enumerate(sorted(unique)):
indices[np.array(values == v)] = i
return indices
def load_dataframe(filename = FILENAME):
df = pd.read_excel(filename, 'DATA', header=1)
df['cancer type'] = df['cancer type'].str.lower().str.strip()
# df['cancer type'] = categorical_indices(cancer_type)
df['Brain Tumor Sx'] = df['Brain Tumor Sx'].astype('float')
# df['Brain Tumor Sx'] = categorical_indices(brain_tumor_sx)
return df
def get_expert_predictions(df):
expert_predictions = {}
experts = [
'Prediction(Cleveland Clinic)',
' Prediction (Lanie Francis)',
'Prediction(Flickinger)',
'Prediction(Loefler',
'Prediction(Knisely)',
'Prediction(Lunsford)',
'Prediction (Tahrini)',
'Prediction (Sheehan)',
'Prediction (Linskey)',
'Prediction(friedman)',
'Prediction(Stupp)',
'Prediction(Rakfal)',
'Prediction(Rush)',
' Prediction( Kondziolka)'
]
for expert in experts:
expert_predictions[expert] = df[expert]
return expert_predictions
def feature_selection(df, Y, training_set_mask):
Y_training = Y[training_set_mask]
df_training = df.ix[training_set_mask]
fields = []
n_tumors = df['# of tumors']
n_tumors_training = n_tumors[training_set_mask]
def impute(X, df, name, model, postprocess = lambda x: x, maxval = None):
Y = df[name]
missing = n | p.array(Y.isnull())
X_train = X[~(missing)]
Y_train = Y[~missing]
X_test = X[missing]
model.fit(X_train, Y_train)
Y_test = model.predict(X_test)
Y_test = postprocess(Y_test)
if maxval:
Y_test = np.minimum(Y_test, maxval)
Y_filled = Y.copy()
Y_filled[missing] = Y_test
df[name] = Y_filled
def impute_missing_features(df):
input_fields = df[[
'Brain Tumor Sx',
'RPA',
'ECOG',
'P | rior WBRT',
'Diagnosis of Primary at the same time as Brain tumor'
]]
X = np.array(input_fields)
missing = df['Extracranial Disease Status'].isnull()
impute(X, df, 'Extracranial Disease Status', LogisticRegression())
impute(X, df, 'K Score', LinearRegression(), lambda x: 10*(x.astype('int')/10), maxval = 100)
return df
def extract_features(df, binarize_categorical):
df = df.copy()
df['log_age']= np.log2(df['age'])
df = impute_missing_features(df)
df['# of tumors > 1'] = df['# of tumors'] > 1
df['# of tumors > 4'] = df['# of tumors'] > 4
df['# of tumors > 10'] = df['# of tumors'] > 10
df['age <45'] = df['age'] < 45
df['age 45-55'] = (df['age'] >= 45) & (df['age'] < 55)
df['age 55-65'] = (df['age'] >= 55) & (df['age'] < 65)
df['age 65-75'] = (df['age'] >= 65) & (df['age'] < 75)
df['age >=75'] = (df['age'] >= 75)
df['age <40'] = df['age'] < 40
df['age 40-50'] = (df['age'] >= 40) & (df['age'] < 50)
df['age 50-60'] = (df['age'] >= 50) & (df['age'] < 60)
df['age 50-70'] = (df['age'] >= 50) & (df['age'] < 70)
df['age 60-70'] = (df['age'] >= 60) & (df['age'] < 70)
df['age 70-80'] = (df['age'] >= 70) & (df['age'] < 80)
df['age >=80'] = (df['age'] >= 80)
df['age >=70'] =df['age'] >= 70
df['age 45-60'] = (df['age'] >= 45) & (df['age'] < 60)
df['Normalized K Score'] = df['K Score'] / 100.0
continuous_fields = [
'# of tumors > 1',
'age 50-70',
'age >=70',
'Normalized K Score',
]
binary_fields = [
'Prior WBRT',
'Diagnosis of Primary at the same time as Brain tumor'
]
9, 12, 14, 15, 16, 18, 20, 22, 25
categorical_fields = [
'Extracranial Disease Status',
'cancer type',
'Brain Tumor Sx',
'RPA',
'ECOG',
]
vectors = []
for field in continuous_fields + binary_fields:
v = np.array(df[field]).astype('float')
vectors.append(v)
for field in categorical_fields:
values = df[field]
if binarize_categorical:
unique = np.unique(values)
print "Expanding %s into %d indicator variables: %s" % (field, len(unique), unique)
for i, v in enumerate(sorted(unique)):
print len(vectors), field, v, np.sum(values == v)
vec = np.zeros(len(values), dtype='float')
vec[np.array(values == v)] = 1
vectors.append(vec)
else:
vectors.append(categorical_indices(values))
X = np.vstack(vectors).T
print X.dtype, X.shape
return X
def make_dataset(df, binarize_categorical):
"""
Load dataset with continuous outputs
"""
dead = np.array(df['Dead'] == 1)
Y = np.array(np.array(df['SurvivalMonths']))
expert_predictions = get_expert_predictions(df)
test_set_mask = np.zeros(len(df), dtype=bool)
# training set is any data point for which we have no expert
# predictions
for expert_Y in expert_predictions.values():
test_set_mask |= ~expert_Y.isnull()
X = extract_features(df, binarize_categorical)
return X, Y, dead, expert_predictions, test_set_mask
def make_labeled_dataset(df, months_to_live = MONTHS_TO_LIVE, binarize_categorical = True):
X, Y_continuous, dead, expert_predictions, test_set_mask = make_dataset(df, binarize_categorical)
# get rid of patients for whom we don't have a long enough history
mask = np.array(dead | (Y_continuous >= months_to_live))
X = X[mask]
Y = dead[mask] & (Y_continuous[mask] < months_to_live)
return X, Y
# TODO: fill in missing cancer types
def annotate_5year_survival(df):
five_year_survival = {
'breast': 25,
'nsclc': 4,
'sclc' : None,
'rcc' : 12.1,
'melanoma' : 16.1,
'carcinoid' : None,
'endometrial' : 17.5,
'sarcoma' : None,
'colon' : 12.9,
'rectal' : None,
'prostate' : 28,
'uterine' : None ,
'nasopharyngeal' : None,
'thyroid' : 54.7,
}
def load_dataset(filename = FILENAME, binarize_categorical = True):
df = load_dataframe(filename)
return make_dataset(df, binarize_categorical = binarize_categorical)
def load_labeled_dataset(filename = FILENAME, months_to_live = MONTHS_TO_LIVE, binarize_categorical = True):
df = load_dataframe(filename)
return make_labeled_dataset(df, months_to_live, binarize_categorical = binarize_categorical)
def split_labeled_dataset(df, months_to_live = MONTHS_TO_LIVE, n_train = N_TRAIN, binarize_categorical = True, shuffle = True, verbose = True):
X, y = make_labeled_dataset(df, months_to_live = months_to_live, binarize_categorical = binarize_categorical)
if shuffle:
idx = np.arange(len(y))
np.random.shuffle(idx)
y = y[idx]
X = X[idx]
Xtrain = X[:n_train]
Ytrain = y[:n_train]
Xtest = X[n_train:]
Ytest = y[n_train:]
if verbose:
print Xtest[[0,1,2], :]
print Ytest[[0,1,2]]
print np.mean(Ytrain)
print np.mean(Ytest)
print Xtrain.shape
print Xtest.shape
return Xtrain, Ytrain, Xtest, Ytest
def load_dataset_splits(filename = FILENAME, months_to_live = MONTHS_TO_LIVE, n_train = N_TRAIN):
df = load_dataframe(filename)
return split_dataset(df, months_to_live, n_train)
|
# -*- coding: utf-8 -*-
from rdflib import Namespace
ONTOLEX = Namesp | ace("http://www.w3.org/ns/lemon/ontolex#")
LEXINFO = Namespace("http://www.lexinfo.net/ontology/2.0/lexinfo#")
DECOMP = Namespac | e("http://www.w3.org/ns/lemon/decomp#")
ISOCAT = Namespace("http://www.isocat.org/datcat/")
LIME = Namespace("http://www.w3.org/ns/lemon/lime#")
|
'''
'''
import sys
import os
import gzip
import regex
# 'borrowed' from CGAT - we may not need this functionality
# ultimately. When finalised, if req., make clear source
def openFile(filename, mode="r", create_dir=False):
''' | open file called *filename* with mode *mode*.
gzip - compressed files are recognized by the
suffix ``.gz`` and opened transparently.
Note that there are differences in the file
like objects returned, for example in the
ability to seek.
Arguments
---------
filename : string
mode : string
File opening mode
create_dir : bool
If True, the directory containing filename
will be cr | eated if it does not exist.
Returns
-------
File or file-like object in case of gzip compressed files.
'''
_, ext = os.path.splitext(filename)
if create_dir:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if ext.lower() in (".gz", ".z"):
if sys.version_info.major >= 3:
if mode == "r":
return gzip.open(filename, 'rt', encoding="ascii")
elif mode == "w":
return gzip.open(filename, 'wt', encoding="ascii")
else:
raise NotImplementedError(
"mode '{}' not implemented".format(mode))
else:
return gzip.open(filename, mode)
else:
return open(filename, mode)
def checkError(barcode, whitelist, limit=1):
near_matches = set()
comp_regex = regex.compile("(%s){e<=1}" % barcode)
comp_regex2 = regex.compile("(%s){e<=1}" % barcode[:-1])
b_length = len(barcode)
for whitelisted_barcode in whitelist:
w_length = len(whitelisted_barcode)
if barcode == whitelisted_barcode:
continue
if (max(b_length, w_length) > (min(b_length, w_length) + 1)):
continue
if comp_regex.match(whitelisted_barcode) or comp_regex2.match(whitelisted_barcode):
near_matches.add(whitelisted_barcode)
if len(near_matches) > limit:
return near_matches
return near_matches
# partially 'borrowed' from CGAT - we may not need this functionality
# ultimately. When finalised, if req., make clear source
def FastqIterator(infile):
'''iterate over contents of fastq file.'''
while 1:
line1 = infile.readline()
if not line1:
break
if not line1.startswith('@'):
raise ValueError("parsing error: expected '@' in line %s" % line1)
line2 = infile.readline()
line3 = infile.readline()
if not line3.startswith('+'):
raise ValueError("parsing error: expected '+' in line %s" % line3)
line4 = infile.readline()
# incomplete entry
if not line4:
raise ValueError("incomplete entry for %s" % line1)
read_id, seq, qualities = line1[:-1], line2[:-1], line4[:-1]
yield ("", read_id, seq, qualities)
|
from comics.aggregator.crawler import CrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "PartiallyClips"
| language = "en"
url = "http://partiallyclips.com/"
| start_date = "2002-01-01"
rights = "Robert T. Balder"
active = False
class Crawler(CrawlerBase):
def crawl(self, pub_date):
pass
|
# !/usr/bin/env python
"""Testing a sprite.
The ball should bounce off the sides of the window. You may resize the
window.
This test should just run without failing.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import unittest
from pyglet.gl import glClear
import pyglet.window
import pyglet.window.event
from pyglet import clock
from scene2d import Sprite, Image2d, FlatView
from scene2d.image import TintEffect
from scene2d.camera import FlatCamera
ball_png = os.path.join(os.path.dirname(__file__), 'ball.png')
class BouncySprite(Sprite):
def update(self):
# move, check bounds
p = self.properties
self.x += p['dx']
self.y += p['dy']
if self.left < 0:
self.left = 0
p['dx'] = -p['dx']
elif self.right > 320:
self.right = 320
p['dx'] = -p['dx']
if self.bottom < 0:
self.bottom = 0
p['dy'] = -p['dy']
elif self.top > 320:
self.top = 320
p['dy'] = -p['dy']
class SpriteOverlapTest(unittest.TestCase):
def test_sprite(self):
w = pyglet.window.Window(width=320, height=320)
image = Image2d.load(ball_png)
ball1 = BouncySprite(0, 0, 64, 64, image, properties=dict(dx=10, dy=5))
ball2 = Boun | cySprite(288, 0, 64, 64, image,
| properties=dict(dx=-10, dy=5))
view = FlatView(0, 0, 320, 320, sprites=[ball1, ball2])
view.fx, view.fy = 160, 160
clock.set_fps_limit(60)
e = TintEffect((.5, 1, .5, 1))
while not w.has_exit:
clock.tick()
w.dispatch_events()
ball1.update()
ball2.update()
if ball1.overlaps(ball2):
if 'overlap' not in ball2.properties:
ball2.properties['overlap'] = e
ball2.add_effect(e)
elif 'overlap' in ball2.properties:
ball2.remove_effect(e)
del ball2.properties['overlap']
view.clear()
view.draw()
w.flip()
w.close()
unittest.main()
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "itf.settings")
try:
from django.core.management import execute_from_command_line
except | ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
| raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
#!/usr | /bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distr | ibuted in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestScale(TestCase):
def testRegression(self):
inputSize = 1024
input = range(inputSize)
factor = 0.5
expected = [factor * n for n in input]
output = Scale(factor=factor, clipping=False)(input)
self.assertEqualVector(output, expected)
def testZero(self):
inputSize = 1024
input = [0] * inputSize
expected = input[:]
output = Scale()(input)
self.assertEqualVector(output, input)
def testEmpty(self):
input = []
expected = input[:]
output = Scale()(input)
self.assertEqualVector(output, input)
def testClipping(self):
inputSize = 1024
maxAbsValue= 10
factor = 1
input = [n + maxAbsValue for n in range(inputSize)]
expected = [maxAbsValue] * inputSize
output = Scale(factor=factor, clipping=True, maxAbsValue=maxAbsValue)(input)
self.assertEqualVector(output, expected)
def testInvalidParam(self):
self.assertConfigureFails(Scale(), { 'maxAbsValue': -1 })
suite = allTests(TestScale)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
# PyParticles : Particles simulation in python
# Copyright (C) 2012 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pyparticles.pset.boundary as bd
class ReboundBoundary( bd.Boundary ):
def __init__( self , bound=(-1,1) , dim=3 ):
self.set_boundary( bound , dim )
self.set_normals()
def set_normals( self ):
self.__N = np.zeros( ( 2*self.dim , self.dim ) )
#print( self.__N )
if self.dim >= 2 :
self.__N[0,:2] = np.array( [1,0] )
self.__N[1,:2] = np.array( [-1,0] )
self.__N[2,:2] = np.array( [0,1] )
self.__N[3,:2] = np.array( [0,-1] )
if self.dim == 3 :
self.__N[4,:] = np.array( [0,0,1] )
self.__N[5,:] = np.array( [0,0,-1] )
def b | oundary( self , p_set ):
v_mi = np.zeros((3))
v_mx = np.zeros((3))
for i in range( self.dim ) :
j = 2*i
v_mi[:] = 0.0
v_mx[:] = 0.0
#delta = self.bound[i,1] - self.bound[i,0]
b_mi | = p_set.X[:,i] < self.bound[i,0]
b_mx = p_set.X[:,i] > self.bound[i,1]
v_mi[i] = self.bound[i,0]
v_mx[i] = self.bound[i,1]
p_set.X[b_mi,:] = p_set.X[b_mi,:] + 2.0 * self.__N[j,:] * ( v_mi - p_set.X[b_mi,:] )
p_set.X[b_mx,:] = p_set.X[b_mx,:] + 2.0 * self.__N[j,:] * ( v_mx - p_set.X[b_mx,:] )
p_set.V[b_mi,i] = -p_set.V[b_mi,i]
p_set.V[b_mx,i] = -p_set.V[b_mx,i]
|
, matricula , full_search = False ):
# Exibicao default de matricula/nome/curso/situacao/periodo/CRA
# full search para as demais informacoes
# Main url
self.aluno_online_url = 'https://www.alunoonline.uerj.br'
# parameters
self.matricula = matricula
self.full_search = full_search
# Main html
self.main_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'SinteseFormacao' } )
# Main data
self.nome = self._extract_nome()
self.cra = self._extract_cra()
self.curso = self._extract_curso()
self.situacao = self._extract_situacao()
self.periodo = self._extract_periodo()
# get and extract personal data
if ( self.full_search ):
# dados contato
self.dados_contato_html = self._get_aluno_online_html( '/recadastramento_dados_contato/recadastramento_dados_contato.php' )
self.telefone = self._extract_telefone()
self.email = self._extract_email()
self.endereco = self._extract_endereco()
self.cep = self._extract_cep()
# dados pessoais
self.dados_pessoais_html = self._get_aluno_online_html( '/recadastramento_dados_pessoais/recadastramento_dados_pessoais.php' )
self.nascimento = self._extract_nascimento()
self.sexo = self._extract_sexo()
self.estado_civil = self._extract_estado_civil()
self.naturalidade = self._extract_naturalidade()
self.nacionalidade = self._extract_nacionalidade()
self.pai = self._extract_pai()
self.mae = self._extract_mae()
self.cpf = self._extract_cpf()
self.rg = self._extract_rg() #Número, Órgão, UF, País, Data Emissão, Data Validade
self.titulo_eleitor = self._extract_titulo_eleitor() #Número, Zona, Seção, UF, Data Emissão
self.certificado_reservista = self._extract_certificado_reservista() #Número, Nro. de Série, Órgão, Tipo, Data Emissão, UF
self.ensino_medio = self._extract_ensino_medio() #Nome do Estabelecimento, País, UF, Tipo de Ensino, Data Conclusão
# disciplinas
self.disciplinas_realizadas_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'DisciplinasRealizadas' } )
self.disciplinas = self._extract_disciplinas()
def _get_aluno_online_html( self , endpoint , parameters = {} ):
result = None
try:
parameters.update( { 'matricula': self.matricula } )
data = urlencode( parameters )
request = Request( self.aluno_online_url + endpoint , data.encode( 'ascii' ) )
response = urlopen( request )
result = BeautifulSoup( response.read() , 'html.parser' )
except:
pass
return result
def _extract_nome( self ):
try:
nome = self.main_html.find( id = "table_cabecalho_rodape" ).find_all( 'font' )[2].string[15:]
except:
nome = ''
return nome
def _extract_cra( self ):
try:
cra = float( self.main_html.find_all( 'div' )[7].text[16:].replace( ',' , '.' ) )
except:
cra = ''
return cra
def _extract_curso( self ):
try:
curso = self.main_html.find_all( 'div' )[6].text[8:]
except:
curso = ''
return curso
def _extract_situacao( self ):
try:
| situacao = self.main_html.find_all( 'div' )[4].text[11:]
except:
situacao = ''
return situacao
def _extract_periodo( self ):
try:
for element in self.main_html.select( 'div > b' ):
if ( element.te | xt == "Períodos Utilizados/Em Uso para Integralização Curricular:" ):
periodo = int( element.parent.text[59:] )
except:
periodo = ''
return periodo
def _format_telefone( self , ddd , tel , ramal ):
return '({0}) {1} [{2}]'.format( ddd , tel[:4] + '-' + tel[4:] , ( 'Sem Ramal' if not ramal else ( 'Ramal ' + ramal ) ) )
def _extract_telefone( self ):
telefone = []
# Tel 1..2
for i in range( 1 , 3 ):
try:
ddd = self.dados_contato_html.find( 'input' , { 'name': 'num_ddd_' + str( i ) + '_pag' } ).get( 'value' )
tel = self.dados_contato_html.find( 'input' , { 'name': 'num_tel_' + str( i ) + '_pag' } ).get( 'value' )
ramal = self.dados_contato_html.find( 'input' , { 'name': 'num_ramal_' + str( i ) + '_pag' } ).get( 'value' )
telefone.append( self._format_telefone( ddd , tel , ramal ) )
except:
pass
return telefone
def _extract_email( self ):
try:
email = self.dados_contato_html.find( 'input' , { 'name': 'dsc_email_pag' } ).get( 'value' )
except:
email = ''
return email
def _extract_endereco( self ):
try:
endereco = self.dados_contato_html.find( 'input' , { 'name': 'txt_end_pag' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.find( 'input' , { 'name': 'cod_bairro_input' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_munic_pag"] option[selected]' )[0].text
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_uf_pag"] option[selected]' )[0].text
except:
endereco = ''
return endereco
def _extract_cep( self ):
try:
cep = self.dados_contato_html.find( 'input' , { 'name': 'num_cep_pag' } ).get( 'value' )
cep = cep[:5] + '-' + cep[5:]
except:
cep = ''
return cep
def _extract_nascimento( self ):
try:
nascimento = self.dados_pessoais_html.find_all( 'div' )[2].text[15:]
except:
nascimento = ''
return nascimento
def _extract_sexo( self ):
try:
sexo = self.dados_pessoais_html.find_all( 'div' )[3].text[6:]
except:
sexo = ''
return sexo
def _extract_estado_civil( self ):
try:
civil = self.dados_pessoais_html.find_all( 'div' )[4].text[12:]
except:
civil = ''
return civil
def _extract_naturalidade( self ):
try:
naturalidade = self.dados_pessoais_html.find_all( 'div' )[5].text[14:]
except:
naturalidade = ''
return naturalidade
def _extract_nacionalidade( self ):
try:
nacionalidade = self.dados_pessoais_html.find_all( 'div' )[6].text[15:]
except:
nacionalidade = ''
return nacionalidade
def _extract_pai( self ):
try:
pai = self.dados_pessoais_html.find_all( 'div' )[7].text[13:]
except:
pai = ''
return pai
def _extract_mae( self ):
try:
mae = self.dados_pessoais_html.find_all( 'div' )[8].text[13:]
except:
mae = ''
return mae
def _extract_cpf( self ):
try:
cpf = self.dados_pessoais_html.find_all( 'font' )[10].text
cpf = cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
except:
cpf = ''
return cpf
def _extract_dados_pessoais_divs( self , start , end , cut ):
arrayReturn = []
try:
array = self.dados_pessoais_html.find_all( 'div' )[start:end]
arrayReturn.append( array[0].text[cut:] )
for data in array[1:]:
text = data.text.strip()
if ( ( not 'Não Informado' in text ) and ( not '__/__/____' in text ) ):
arrayReturn.append( text )
except:
arrayReturn = ''
return arrayReturn
def _extract_rg( self ):
return self._extract_dados_pessoais_divs( 9 , 14 , 8 )
def _extract_titulo_eleitor( self ):
return self._extract_dados_pessoais_divs( 15 , 19 , 8 )
def _extract_certificado_reservista( self ):
return self._extract_dados_pessoais_divs( 20 , 25 , 8 )
def _extract_ensino_medio( self ):
return self._extract_dados_pessoais_divs( 26 , 31 , 25 )
def _extract_disciplinas( self ):
disciplinas = []
try:
for linha in self.disciplinas_realizadas_html.find_all( 'div' , style = re.compile( '^width:100%;font-size=12px;' ) ):
conteudoLinha = []
for coluna in linha.children:
conteudoColuna = coluna.string.strip()
if ( conteudoColuna and not re.match( '\\d{4}/\\d' , conteudoColuna ) ):
conteudoLinha.append( conteudoColuna )
disciplinas.append( ( '{0:60} {1:2} {2:3} {3:15} {4:10}' + ( ' {5:6} {6:15}' if ( len( conteudoLinha ) > 5 ) else '' ) ).format( *conteudoLinha ) )
except:
disciplinas = ''
return disciplinas
def _truncate( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.