prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
u" " in_leaf = Leaf(token.NAME, u"in") in_leaf.prefix = u" " inner_args = [for_leaf, fp, in_leaf, it] if test: test.prefix = u" " if_leaf = Leaf(token.NAME, u"if") if_leaf.prefix = u" " inner_args.append(Node(syms.comp_if, [if_leaf, test])) inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) return Node(syms.atom, [Leaf(token.LBRACE, u"["), inner, Leaf(token.RBRACE, u"]")]) def FromImport(package_name, name_leafs): """ Return an import statement in the form: from package import name_leafs""" # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') #assert package_name == '.' or '.' not in package_name, "FromImport has "\ # "not been tested with dotted package names -- use at your own "\ # "peril!" for leaf in name_leafs: # Pull the leaves out of their old tree leaf.remove() children = [Leaf(token.NAME, u'from'), Leaf(token.NAME, package_name, prefix=u" "), Leaf(token.NAME, u'import', prefix=u" "), Node(syms.import_as_names, name_leafs)] imp = Node(syms.import_from, children) return imp ########################################################### ### Determine whether a node represents a given literal ########################################################### def is_tuple(node): """Does the node represent a tuple literal?""" if isinstance(node, Node) and node.children == [LParen(), RParen()]: return True return (isinstance(node, Node) and len(node.children) == 3 and isinstance(node.children[0], Leaf) and isinstance(node.children[1], Node) and isinstance(node.children[2], Leaf) and node.children[0].value == u"(" and node.children[2].value == u")") def is_list(node): """Does the node represent a list literal?""" return (isinstance(node, Node) and len(node.children) > 1 and isinstance(node.children[0], Leaf) and isinstance(node.children[-1], Leaf) and node.children[0].value == u"[" and node.children[-1].value == u"]") ########################################################### ### Misc ########################################################### def parenthesize(node): return Node(syms.atom, [LParen(), node, RParen()]) consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum", "min", "max"]) def attr_chain(obj, attr): """Follow an attribute chain. If you have a chain of objects where a.foo -> b, b.foo-> c, etc, use this to iterate over all objects in the chain. Iteration is terminated by getattr(x, attr) is None. Args: obj: the starting object attr: the name of the chaining attribute Yields: Each successive object in the chain. """ next = getattr(obj, attr) while next: yield next next = getattr(next, attr) p0 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > """ p1 = """ power< ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | 'any' | 'all' | (any* trailer< '.' 'join' >) ) trailer< '(' node=any ')' > any* > """ p2 = """ power<
'sorted' trailer< '(' arglist<node=any any*> ')' > any* > "
"" pats_built = False def in_special_context(node): """ Returns true if node is in an environment where all that is required of it is being itterable (ie, it doesn't matter if it returns a list or an itterator). See test_map_nochange in test_fixers.py for some examples and tests. """ global p0, p1, p2, pats_built if not pats_built: p1 = patcomp.compile_pattern(p1) p0 = patcomp.compile_pattern(p0) p2 = patcomp.compile_pattern(p2) pats_built = True patterns = [p0, p1, p2] for pattern, parent in zip(patterns, attr_chain(node, "parent")): results = {} if pattern.match(parent, results) and results["node"] is node: return True return False def is_probably_builtin(node): """ Check that something isn't an attribute or function name etc. """ prev = node.prev_sibling if prev is not None and prev.type == token.DOT: # Attribute lookup. return False parent = node.parent if parent.type in (syms.funcdef, syms.classdef): return False if parent.type == syms.expr_stmt and parent.children[0] is node: # Assignment. return False if parent.type == syms.parameters or \ (parent.type == syms.typedargslist and ( (prev is not None and prev.type == token.COMMA) or parent.children[0] is node )): # The name of an argument. return False return True ########################################################### ### The following functions are to find bindings in a suite ########################################################### def make_suite(node): if node.type == syms.suite: return node node = node.clone() parent, node.parent = node.parent, None suite = Node(syms.suite, [node]) suite.parent = parent return suite def find_root(node): """Find the top level namespace.""" # Scamper up to the top level namespace while node.type != syms.file_input: assert node.parent, "Tree is insane! root found before "\ "file_input node was found." node = node.parent return node def does_tree_import(package, name, node): """ Returns true if name is imported from package at the top level of the tree which node belongs to. To cover the case of an import like 'import foo', use None for the package and 'foo' for the name. """ binding = find_binding(name, find_root(node), package) return bool(binding) def is_import(node): """Returns true if the node is an import statement.""" return node.type in (syms.import_name, syms.import_from) def touch_import(package, name, node): """ Works like `does_tree_import` but adds an import statement if it was not imported. """ def is_import_stmt(node): return node.type == syms.simple_stmt and node.children and \ is_import(node.children[0]) root = find_root(node) if does_tree_import(package, name, root): return # figure out where to insert the new import. First try to find # the first import and then skip to the last one. insert_pos = offset = 0 for idx, node in enumerate(root.children): if not is_import_stmt(node): continue for offset, node2 in enumerate(root.children[idx:]): if not is_import_stmt(node2): break insert_pos = idx + offset break # if there are no imports where we can insert, find the docstring. # if that also fails, we stick to the beginning of the file if insert_pos == 0: for idx, node in enumerate(root.children): if node.type == syms.simple_stmt and node.children and \ node.children[0].type == token.STRING: insert_pos = idx + 1 break if package is None: import_ = Node(syms.import_name, [ Leaf(token.NAME, u'import'), Leaf(token.NAME, name, prefix=u' ') ]) else: import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u' ')]) children = [import_, Newline()] root.insert_child(insert_pos, Node(syms.simple_stmt, children)) _def_syms = set([syms.classdef, syms.funcdef]) def find_binding(name, node, package=None): """ Returns the node which binds variable name, otherwise None. If optional argument package is supplied, only imports will be returned. See test cases for examples.""" for child in node.children: ret = None if child.type == syms.for_stmt: if _find
ould be placed in. Defaults to ``ALL``. task_class (class): Optional. The class to use for instantiating tasks. Defaults to ``Task``. backend_class (class): Optional. The class to use for instantiating the backend. Defaults to ``None`` (DSN detection). """ self.conn_string = conn_string self.queue_name = queue_name self.task_class = task_class self.backend_class = backend_class if not backend_class: self.backend = self.build_backend(self.conn_string) else: self.backend = backend_class(self.conn_string) def build_backend(self, conn_string): """ Given a DSN, returns an instantiated backend class. Ex:: backend = gator.build_backend('locmem://') # ...or... backend = gator.build_backend('redis://127.0.0.1:6379/0') Args: conn_string (str): A DSN for connecting to the queue. Passed along to the backend. Returns: Client: A backend `
`Client`` instance """ backend_name, _ = conn_string.split(":", 1) backend_path = "alligator.backends.{}_backend".format(backe
nd_name) client_class = import_attr(backend_path, "Client") return client_class(conn_string) def len(self): """ Returns the number of remaining queued tasks. Returns: int: A count of the remaining tasks """ return self.backend.len(self.queue_name) def push(self, task, func, *args, **kwargs): """ Pushes a configured task onto the queue. Typically, you'll favor using the ``Gator.task`` method or ``Gator.options`` context manager for creating a task. Call this only if you have specific needs or know what you're doing. If the ``Task`` has the ``is_async = False`` option, the task will be run immediately (in-process). This is useful for development and in testing. Ex:: task = Task(is_async=False, retries=3) finished = gator.push(task, increment, incr_by=2) Args: task (Task): A mostly-configured task func (callable): The callable with business logic to execute args (list): Positional arguments to pass to the callable task kwargs (dict): Keyword arguments to pass to the callable task Returns: Task: The fleshed-out ``Task`` instance """ task.to_call(func, *args, **kwargs) data = task.serialize() if task.is_async: task.task_id = self.backend.push( self.queue_name, task.task_id, data, delay_until=task.delay_until, ) else: self.execute(task) return task def pop(self): """ Pops a task off the front of the queue & runs it. Typically, you'll favor using a ``Worker`` to handle processing the queue (to constantly consume). However, if you need to custom-process the queue in-order, this method is useful. Ex:: # Tasks were previously added, maybe by a different process or # machine... finished_topmost_task = gator.pop() Returns: Task: The completed ``Task`` instance """ data = self.backend.pop(self.queue_name) if data: task = self.task_class.deserialize(data) return self.execute(task) def get(self, task_id): """ Gets a specific task, by ``task_id`` off the queue & runs it. Using this is not as performant (because it has to search the queue), but can be useful if you need to specifically handle a task *right now*. Ex:: # Tasks were previously added, maybe by a different process or # machine... finished_task = gator.get('a-specific-uuid-here') Args: task_id (str): The identifier of the task to process Returns: Task: The completed ``Task`` instance """ data = self.backend.get(self.queue_name, task_id) if data: task = self.task_class.deserialize(data) return self.execute(task) def cancel(self, task_id): """ Takes an existing task & cancels it before it is processed. Returns the canceled task, as that could be useful in creating a new task. Ex:: task = gator.task(add, 18, 9) # Whoops, didn't mean to do that. gator.cancel(task.task_id) Args: task_id (str): The identifier of the task to process Returns: Task: The canceled ``Task`` instance """ data = self.backend.get(self.queue_name, task_id) if data: task = self.task_class.deserialize(data) task.to_canceled() return task def execute(self, task): """ Given a task instance, this runs it. This includes handling retries & re-raising exceptions. Ex:: task = Task(is_async=False, retries=5) task.to_call(add, 101, 35) finished_task = gator.execute(task) Args: task_id (str): The identifier of the task to process Returns: Task: The completed ``Task`` instance """ try: return task.run() except Exception: if task.retries > 0: task.retries -= 1 task.to_retrying() if task.is_async: # Place it back on the queue. data = task.serialize() task.task_id = self.backend.push( self.queue_name, task.task_id, data ) else: return self.execute(task) else: raise def task(self, func, *args, **kwargs): """ Pushes a task onto the queue. This will instantiate a ``Gator.task_class`` instance, configure the callable & its arguments, then push it onto the queue. You'll typically want to use either this method or the ``Gator.options`` context manager (if you need to configure the ``Task`` arguments, such as retries, is_async, task_id, etc.) Ex:: on_queue = gator.task(increment, incr_by=2) Args: func (callable): The callable with business logic to execute args (list): Positional arguments to pass to the callable task kwargs (dict): Keyword arguments to pass to the callable task Returns: Task: The ``Task`` instance """ task = self.task_class() return self.push(task, func, *args, **kwargs) def options(self, **kwargs): """ Allows specifying advanced ``Task`` options to control how the task runs. This returns a context manager which will create ``Task`` instances with the supplied options. See ``Task.__init__`` for the available arguments. Ex:: def party_time(task, result): # Throw a party in honor of this task completing. # ... with gator.options(retries=2, on_success=party_time) as opts: opts.task(increment, incr_by=2678) Args: kwargs (dict): Keyword arguments to control the task execution Returns: Options: An ``Options`` context manager instance """ return Options(self, **kwargs) class Options(object): def __init__(self, gator, **kwargs): """ A context manager for specifying task execution options. Typically, you'd use ``Gator.options``, which creates this context manager for you. You probably don't want to directly use this. Args: gator (Gator): A configured ``Gator`` instance
from temboo.Library.Utilities.XML.GetValuesFromXML import Get
ValuesFromXML, GetValuesFromXMLInputSet, GetValuesFromXMLResultSet, GetValuesFromXMLChoreographyExecution from temboo.Library.Utilities.XML.RunXPathQuery import RunXPathQuery, RunXPathQueryInputSet, RunXPathQueryResultSet, RunXPathQueryChoreo
graphyExecution
# Copyright 2015 Andrea Frittoli <andrea.frittoli@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ConfigParser import flask from flask import request import flask_restful import functools import sys import eowyn.exceptions as eowyn_exc from eowyn.model import managers app = flask.Flask(__name__) api = flask_restful.Api(app) manager = None def handle_validate(f): """A decorator to apply handle data validation errors""" @functools.wraps(f) def wrapper(self, *func_args, **func_kwargs): try: return f(self, *func_args, **func_kwargs) except eowyn_exc.InvalidDataException as ida: flask_restful.abort(400, message=str(ida)) return wrapper class Subscription(flask_restful.Resource): @handle_validate def get(self, topic, username): # Get next message on a topic try: message = manager.pop_message(topic=topic, username=username) return message, 200 except eowyn_exc.NoMessageFoundException: # If no message is found simply return 204 return '', 204 except eowyn_exc.SubscriptionNotFoundException as snfe
: flask_restful.abort(404, message=str(snfe)) @handle_validate def post(self, topic, username): # Subscribe to a topic try: manager.create_subscription(topic=topic, username=username) return '', 200 except eowyn_exc.SubscriptionAlreadyExistsException: # NOTE(andreaf) This is not specified, but it seemed a # reasonable code to return in this case return '', 201 @handle_validate def de
lete(self, topic, username): # Unsubscribe from a topic try: manager.delete_subscription(topic=topic, username=username) return '', 200 except eowyn_exc.SubscriptionNotFoundException as snfe: flask_restful.abort(404, message=str(snfe)) class Message(flask_restful.Resource): @handle_validate def post(self, topic): # Post a message to a topic # There's no content type set for messages, they're plain text. # Because of that we need to extract the text from the # ImmutableMultiDIct returned by flask # message = request.form.keys()[0] message = request.data try: manager.publish_message(topic, message) except eowyn_exc.TopicNotFoundException: # NOTE(andreaf) When no topic is not found it means no subscription # exists so the message is discarded right away. We still need to # capture this exception, and do nothing for now. We may have # logging or reporting logic in future here. pass return '', 200 # Handle Subscriber API (subscribe, un-subscribe and get message) api.add_resource(Subscription, '/<string:topic>/<string:username>') # Handle Publisher API (post message) api.add_resource(Message, '/<string:topic>') def main(): config = ConfigParser.ConfigParser() try: # Read config file as first command line parameter config_file = sys.argv[1] config.read(config_file) # Use the configured manager manager_type = config.get('default', 'manager') manager_configs = config.items(manager_type) # In case of duplicated configs, the last one wins manager_configs = {k: v for (k, v) in manager_configs} # Other configs debug = config.get('default', 'debug') except IndexError: # Or else use defaults debug = False manager_type = 'redis' manager_configs = {'host': 'localhost', 'port': 6379} global manager manager = managers.get_manager(manager_type, **manager_configs) app.run(debug=debug) if __name__ == '__main__': main()
""" @author: """ import bottle # this variable MUST be used as the name for the cookie used by this application COOKIE_NAME = 'sessionid' def check_login(db, usernick, password): """returns True if password matches stored""" def generate_session(db, usernick): """create a new session and add a cookie to the request object (bottle.request) user must be a valid user in the database, if not, return None There should only be one session per user at any time, if there is already a session active, use the existing sessionid in the cookie """ def delete_sessi
on(db, usernick): """remove all session table entries for this user""" def session_user
(db): """try to retrieve the user from the sessions table return usernick or None if no valid session is present"""
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os import subprocess import sys from textwrap import dedent from twitter.common.contextutil import pushd from pex.testing import temporary_content def assert_entry_points(entry_points): setup_py = dedent(""" from setuptools import setup setup( name='my_app', version='0.0.0', zip_safe=True, packages=[''], entry_points=%(entry_points)r, ) """ % dict(entry_points=entry_points)) my_app = dedent(""" def do_something(): print("hello world!") """) with temporary_content({'setup.py': setup_py, 'my_app.py': my_app}) as project_dir: with pushd(project_dir): subprocess.check_call([sys.executable, 'setup.py', 'bdist_pex']) process = subprocess.Popen([o
s.path.join(project_dir, 'dist', 'my_app-0.0.0.pex')], stdout=subprocess.PIPE) stdout, _ = process.communicate() assert 0 == process.returncode assert stdout == b'hello world!\n' def test_entry_points_dict(): assert_entry_points({'console_scripts': ['my_app = my_app:do_something']}) def test_entry_points_ini_string(): assert_entry_points(deden
t(""" [console_scripts] my_app=my_app:do_something """))
# -*- coding: utf-8 -*- """This module defines functions generally useful in scikit-ci.""" import os from .constants import SERVICES, SERVICES_ENV_VAR def current_service(): for service, env_var in SERVICES_ENV_VAR.items(): if os.environ.get(env_var, 'false').lower() == 'true': return service raise LookupError( "unknown service: None of the environment variables {} are set " "to 'true' or 'True'".format(", ".join(SERVICES_ENV_VAR.values())) ) def current_operating_system(service): return os.environ[SERVICES[service]] if SERVICES[service] else None def indent(text, prefix, predicate=None): """Adds 'prefix' to the beginning of selected lines in 'text'. If 'predicate' is provided, 'prefix' will only be added to the lines where 'predicate(line)' is True. If 'predicate' is not provided, it will default to adding 'prefix' to all non-empty lines that do not consist solely of whitespace characters. Copied from textwrap.py available in python 3 (cpython/cpython@a2d2bef) """
if predicate is None: def predicate(line): return line.strip() def prefixed_lines(): for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line) return ''.join(prefixed_lines())
"""Example shows how to send requests and get responses.""" import asyncio from obswsrc import OBSWS from obswsrc.requests import ResponseStatus, StartStreamingRequest from obswsrc.types import Stream, StreamSettings async def main(): async with OBSWS('localhost', 4444, "password") as obsws: # We can send an empty StartStreaming request (in that case the plugin # will use OBS configuration), but let's provide some settings as well stream_settings = StreamSettings( server="rtmp://example.org/my_application", key="secret_stream_key", use_auth=False
) stream = Stream( settings=stream_settings, type=
"rtmp_custom", ) # Now let's actually perform a request response = await obsws.require(StartStreamingRequest(stream=stream)) # Check if everything is OK if response.status == ResponseStatus.OK: print("Streaming has started") else: print("Couldn't start the stream! Reason:", response.error) loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close()
from json import loads import codecs import environ FIXTURE_PATH = (environ.Path(__file__) - 1).path('fixtures') def read_json(fpath): with codecs.open(fpath, 'rb', encoding='utf-8') as fp:
return loads(fp.read()) def read_fixtu
re(*subpath): fixture_file = str(FIXTURE_PATH.path(*subpath)) return read_json(fixture_file)
from typing import Callable, List, Dict, Optional import numpy as np from typeguard import check_argument_types from neuralmonkey.model.model_part import ModelPart from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder, SearchStepOutput) from neuralmonkey.runners.base_runner import (BaseRunner, Executable, ExecutionResult, NextExecute) from neuralmonkey.vocabulary import Vocabulary, END_TOKEN class BeamSearchExecutable(Executable): def __init__(self, rank: int, all_encoders: List[ModelPart], bs_outputs: SearchStepOutput, vocabulary: Vocabulary, postprocess: Optional[Callable]) -> None: self._rank = rank self._all_encoders = all_encoders self._bs_outputs = bs_outputs self._vocabulary = vocabulary self._postprocess = postprocess self.result = None # type: Optional[ExecutionResult] def next_to_execute(self) -> NextExecute: return self._all_encoders, {'bs_outputs': self._bs_outputs}, {} def collect_results(self, results: List[Dict]) -> None: if len(results) > 1: raise ValueError("Beam search runner does not support ensembling.") evaluated_bs = results[0]['bs_outputs'] max_time = evaluated_bs.scores.shape[0] # pick the end of the hypothesis based on its rank hyp_index = np.argpartition( -evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1] bs_score = evaluated_bs.scores[-1][hyp_index] # now backtrack output_tokens = [] # type: List[str] for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index] token = self._vocabulary.index_to_word[token_id] output_tokens.append(token) hyp_index = evaluated_bs.parent_ids[time][hyp_index] output_tokens.reverse() before_eos_tokens = [] # type: List[str] for tok in output_tokens: if tok == END_TOKEN: break before_eos_tokens.append(tok) if self._postprocess i
s not None: decoded_tokens = self._postprocess([before_eos_tokens]) else: decoded_tokens = [before_eos_tokens] self.result = ExecutionResult( outputs=decoded_tokens, losses=[bs_score], scalar_summaries=None, histogram_summaries=None, image_summaries=None) class BeamSearchRunner(BaseRunner): def __init__(self, output_series: str, decoder: BeamSearchDecoder, rank: int = 1, postprocess: Callable[[List[str]], List[str]] = None) -> None: super(BeamSearchRunner, self).__init__(output_series, decoder) check_argument_types() if rank < 1 or rank > decoder.beam_size: raise ValueError( ("Rank of output hypothesis must be between 1 and the beam " "size ({}), was {}.").format(decoder.beam_size, rank)) self._rank = rank self._postprocess = postprocess def get_executable(self, compute_losses: bool = False, summaries: bool = True) -> BeamSearchExecutable: return BeamSearchExecutable( self._rank, self.all_coders, self._decoder.outputs, self._decoder.vocabulary, self._postprocess) @property def loss_names(self) -> List[str]: return ["beam_search_score"] @property def decoder_data_id(self) -> Optional[str]: return None def beam_search_runner_range(output_series: str, decoder: BeamSearchDecoder, max_rank: int = None, postprocess: Callable[ [List[str]], List[str]]=None ) -> List[BeamSearchRunner]: """A list of beam search runners for a range of ranks from 1 to max_rank. This means there is max_rank output series where the n-th series contains the n-th best hypothesis from the beam search. Args: output_series: Prefix of output series. decoder: Beam search decoder shared by all runners. max_rank: Maximum rank of the hypotheses. postprocess: Series-level postprocess applied on output. Returns: List of beam search runners getting hypotheses with rank from 1 to max_rank. """ check_argument_types() if max_rank is None: max_rank = decoder.beam_size if max_rank > decoder.beam_size: raise ValueError( ("The maximum rank ({}) cannot be " "bigger than beam size {}.").format( max_rank, decoder.beam_size)) return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r), decoder, r, postprocess) for r in range(1, max_rank + 1)]
class MyStuff(object):
def __init__(self): self.tangerine = "And now a thousand y
ears between" def apple(self): print "I am classy apples!"
Fixture( net_helpers.OVSPortFixture(self.br, self.dst_namespace)).port # wait to add IPs until after anti-spoof rules to ensure ARP doesn't # happen before def collect_flows_and_ports(self, exc_info): nicevif = lambda x: ['%s=%s' % (k, getattr(x, k)) for k in ['ofport', 'port_name', 'switch', 'vif_id', 'vif_mac']] nicedev = lambda x: ['%s=%s' % (k, getattr(x, k)) for k in ['name', 'namespace']] + x.addr.list() details = {'flows': self.br.dump_all_flows(), 'vifs': map(nicevif, self.br.get_vif_ports()), 'src_ip': self.src_addr, 'dest_ip': self.dst_addr, 'sourt_port': nicedev(self.src_p), 'dest_port': nicedev(self.dst_p)} self.addDetail('arp-test-state', text_content(jsonutils.dumps(details, indent=5))) @common_base.no_skip_on_missing_deps def skip_without_arp_support(self): if not checks.arp_header_match_supported(): self.skipTest("ARP header matching not supported") def test_arp_spoof_doesnt_block_normal_traffic(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_mac_spoof_blocks_wrong_mac(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) # changing the allowed mac should stop the port from working self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr], mac='00:11:22:33:44:55') net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_doesnt_block_ipv6(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_response(self): # this will prevent the destination from responding to the ARP # request for it's own address self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_icmpv6_neigh_advt(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' # this will prevent the destination from responding (i.e., icmpv6 # neighbour advertisement) to the icmpv6 neighbour solicitation # request for it's own address (2000::2) as spoofing rules added # below only allow '2000::3'. self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3']) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_request(self): # this will prevent the source from sending an ARP # request with its own address self._setup_arp_spoof_for_port(self.src_p.name, ['192.168.0.3']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) ns_ip_wrapper = ip_lib.IPWrapper(self.src_namespace) try: ns_ip_wrapper.netns.execute(['arping', '-I', self.src_p.name, '-c1', self.dst_addr]) tools.fail("arping should have failed. The arp request should " "have been blocked.") except RuntimeError: pass def test_arp_spoof_allowed_address_pairs(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3', self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr
, count=2) def test_arp_spoof_icmpv6_neigh_advt_allowed_address_pairs(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3', self.dst_addr]) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.a
ddr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_allowed_address_pairs_0cidr(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['9.9.9.9/0', '1.2.3.4']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_disable_port_security(self): # block first and then disable port security to make sure old rules # are cleared self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3'], psec=False) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_disable_network_port(self): # block first and then disable port security to make sure old rules # are cleared self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self._setup_arp_spoof_for_port( self.dst_p.name, ['192.168.0.3'], device_owner=n_const.DEVICE_OWNER_ROUTER_GW) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def _setup_arp_spoof_for_port(self, port, addrs, psec=True, device_owner='nobody', mac=None): vif = next( vif for vif in self.br.get_vif_ports() if vif.port_name == port) ip_addr = addrs.pop() details = {'port_security_enabled': psec, 'fixed_ips': [{'ip_address': ip_addr}], 'device_owner': device_owner, 'allowed_address_pairs': [ dict(ip_address=ip) for ip in addrs]} if mac: vif.vif_mac = mac ovsagt.OVSNeutronAgent.setup_arp_spoofing_protection( self.br_int, vif, details) class CanaryTableTestCase(OVSAgentTestBase): def test_canary_table(self): self.br_int.delete_flows() self.assertEqual(constants.OVS_RESTARTED, self.br_i
from app.app_and_db import app from flask import Blueprint, jsonify, render_template import datetime import random import requests dashboard = Blueprint('dashboard', __name__) cumtd_endpoint = 'https://develope
r.cumtd.com/api/{0}/{1}/{2}' cumtd_endpoint = cumtd_endpoint.format('v2.2', 'json', 'GetDeparturesByStop') wunderground_endpoint = 'http://api.wunderground.com/api/{0}/hourly/q/{1}/{2}.json' wunderground_endpoint = wunderground_endpoint.format(app.config['WUNDERGROUND_API_KEY'], 'IL', 'Champaign') @dashbo
ard.route('/') def index(): time=datetime.datetime.now().time().strftime('%I:%M').lstrip('0') return render_template('pages/dashboard.html', image_number=random.randrange(1, 9), time=time) #Query no more than once a minute @dashboard.route('/bus') def bus_schedule(): params = {'key' : app.config['CUMTD_API_KEY'], 'stop_id' : 'GRN4TH', 'count' : '5'} response = requests.get(cumtd_endpoint, params=params) json = response.json() departures = [] for departure in json['departures'] : if departure['trip']['direction'] == 'East': departures.append(departure) return jsonify(departures=departures) #Query no more than once every three minutes @dashboard.route('/weather') def weather(): response = requests.get(wunderground_endpoint) json = response.json() return jsonify(json) app.register_blueprint(dashboard, url_prefix='/dashboard')
me = 'Segment %s content %d' % (segment_label, contentid) command_finish = 'Started %s segment with content %d and port %d at %s' % (segment_label, contentid, segment_port, segment_dir) runcommands(commands, thread_name, command_finish) @staticmethod def getRole(contentid): if contentid == GpSegmentConfiguration.MASTER_CONTENT_ID: return 'master' else: return 'mirrorless' def run(self): startThreads = [] for segconfig in self.segconfigs: thread = threading.Thread(target=self.startThread, args=(segconfig,)) thread.start() startThreads.append(thread) for thread in startThreads: thread.join() class StopInstances(): ''' Stop all segments''' def __init__(self, cluster_config): self.clusterconfig = cluster_config self.segconfigs = cluster_config.get_seg_configs() def stopThread(self, segconfig): commands = []
segment_contentid = segconfig.content segment_dir = segconfig.fselocation if segment_contentid == GpSegmentConfiguration.MASTER_CONTENT_ID: segment_type = 'master' elif segconfig.preferred_role == GpSegmentConfiguration.ROLE_PRIMARY: segment_type = 'primary' else: seg
ment_type = 'mirror' commands.append("pg_ctl -D %s stop" % segment_dir) thread_name = 'Segment %s content %d' % (segment_type, segment_contentid) command_finish = 'Stopped %s segment at %s' % (segment_type, segment_dir) runcommands(commands, thread_name, command_finish) def run(self): stopThreads = [] for segconfig in self.segconfigs: thread = threading.Thread(target=self.stopThread, args=(segconfig,)) thread.start() stopThreads.append(thread) for thread in stopThreads: thread.join() class DestroyMirrors(): ''' Destroy the WAL replication mirror segment ''' def __init__(self, cluster_config): self.clusterconfig = cluster_config self.segconfigs = cluster_config.get_seg_configs() def destroyThread(self, segconfig): commands = [] mirror_contentid = segconfig.content mirror_dir = segconfig.fselocation commands.append("pg_ctl -D %s stop" % mirror_dir) commands.append("rm -rf %s" % mirror_dir) thread_name = 'Mirror content %d' % mirror_contentid command_finish = 'Destroyed mirror at %s' % mirror_dir runcommands(commands, thread_name, command_finish, False) # Let FTS recognize that mirrors are gone. As a result, # primaries will be marked not-in-sync. If this step is # omitted, FTS will stop probing as soon as mirrors are # removed from catalog and primaries will be left "in-sync" # without mirrors. # # FIXME: enhance gp_remove_segment_mirror() to do this, so # that utility remains simplified. Remove this stopgap # thereafter. ForceFTSProbeScan(self.clusterconfig, GpSegmentConfiguration.STATUS_DOWN, GpSegmentConfiguration.NOT_IN_SYNC) commands = [] catalog_update_query = "select pg_catalog.gp_remove_segment_mirror(%d::int2)" % (mirror_contentid) commands.append("PGOPTIONS=\"-c gp_session_role=utility\" psql postgres -c \"%s\"" % catalog_update_query) command_finish = 'Removed mirror %s from catalog' % mirror_dir runcommands(commands, thread_name, command_finish, False) def run(self): destroyThreads = [] for segconfig in self.segconfigs: assert(segconfig.preferred_role == GpSegmentConfiguration.ROLE_MIRROR) thread = threading.Thread(target=self.destroyThread, args=(segconfig,)) thread.start() destroyThreads.append(thread) for thread in destroyThreads: thread.join() class GpSegmentConfiguration(): ROLE_PRIMARY = 'p' ROLE_MIRROR = 'm' STATUS_DOWN = 'd' STATUS_UP = 'u' NOT_IN_SYNC = 'n' IN_SYNC = 's' MASTER_CONTENT_ID = -1 def __init__(self, dbid, content, port, fselocation, role, preferred_role, status, mode): self.dbid = dbid self.content = content self.port = port self.fselocation = fselocation self.role = role self.preferred_role = preferred_role self.status = status self.mode = mode class ClusterConfiguration(): ''' Cluster configuration ''' def __init__(self, hostname, port, dbname, role = "all", status = "all", include_master = True): self.hostname = hostname self.port = port self.dbname = dbname self.role = role self.status = status self.include_master = include_master self._all_seg_configs = None self.refresh() def get_num_contents(self): return self.num_contents; def get_seg_configs(self): return self.seg_configs; def get_pair_port(self, input_config): for seg_config in self._all_seg_configs: if (seg_config.content == input_config.content and seg_config.role != input_config.role): return seg_config.port assert(input_config.role == GpSegmentConfiguration.ROLE_PRIMARY) ''' if not found then assume its mirror and hence return port at which mirror must be created ''' return input_config.port + 10000 def get_pair_dir(self, input_config): for seg_config in self._all_seg_configs: if (seg_config.content == input_config.content and seg_config.role != input_config.role): return seg_config.fselocation assert(input_config.role == GpSegmentConfiguration.ROLE_PRIMARY) ''' if not found then assume its mirror and hence return location at which mirror must be created ''' return input_config.fselocation.replace('dbfast', 'dbfast_mirror') def get_gp_segment_ids(self): ids = [] for seg_config in self.seg_configs: ids.append(str(seg_config.content)) return ','.join(ids) def refresh(self): query = ("SELECT dbid, content, port, fselocation, role, preferred_role, status, mode " "FROM gp_segment_configuration s, pg_filespace_entry f " "WHERE s.dbid = fsedbid") print '%s: fetching cluster configuration' % (datetime.datetime.now()) dburl = dbconn.DbURL(self.hostname, self.port, self.dbname) print '%s: fetched cluster configuration' % (datetime.datetime.now()) try: with dbconn.connect(dburl, utility=True) as conn: resultsets = dbconn.execSQL(conn, query).fetchall() except Exception, e: print e sys.exit(1) self._all_seg_configs = [] self.seg_configs = [] self.num_contents = 0 for result in resultsets: seg_config = GpSegmentConfiguration(result[0], result[1], result[2], result[3], result[4], result[5], result[6], result[7]) self._all_seg_configs.append(seg_config) append = True if (self.status != "all" and self.status != seg_config.status): append = False if (self.role != "all" and self.role != seg_config.role): append = False if (not self.include_master and seg_config.content == GpSegmentConfiguration.MASTER_CONTENT_ID): append = False if append: self.seg_configs.append(seg_config) # Count primary segments if (seg_config.preferred_role == GpSegmentConfiguration.ROLE_PRIMARY and seg_config.content != GpSegmentConfiguration.MASTER_CONTENT_ID): self.num_contents += 1 def check_status_and_mode(self, expected_status, expected_mode): ''' Check if all the instance reached the expected_state and expected_mode ''' for seg_config in self.seg_con
# This file is part of the dionaea honeypot # # SPDX-FileCopyrightText: 2009 Paul Baecher & Markus Koetter & Mark Schloesser # # SPDX-License-Identifier: GPL-2.0-or-later from dionaea.core import connection class echo(connection): def __init__ (self, proto=None): print("echo init") connection.__init__(self,proto) self.timeouts.idle = 5. self.timeouts.sustain = 10. def handle_origin(self, parent): print("origin!") print("parent {:s} {:s}:{:d}".format( parent.protocol, parent.local.host,parent.local.port)) print("self {:s} {:s}:{:d} -> {:s}:{:d}".format(self.protocol, self.local.host,self.local.port, self.remote.host,self.remote.port)) def handle_established(self): print("new connection to serve!") self.send('welcome to reverse world!\n') def handle_timeout_idle(self): self.send("you are idle!\n") return True def handle_timeout_sustain(self): self.send("your sustain time
outed!\n") return False def handle_disconnect(self): self.send("disconnecting you!\n") def handle_io_in(self,data): print('py_io_in\n') self.send(data[::-1][1:] + b'
\n') return len(data) # #e = echo(proto='tcp') #e.bind('0.0.0.0',4713,'') #e.listen()
from __future__ import absolute_import from sqlalchemy import types from sqlalchemy.dialects.postgresql import ARRAY from sqlalchemy.dialects.postgresql.base import ischema_names, PGTypeCompiler from sqlalchemy.sql import expression from ..primitives import Ltree from .scalar_coercible import ScalarCoercible class LtreeType(types.Concatenable, types.UserDefinedType, ScalarCoercible): """Postgresql LtreeType type. The LtreeType datatype can be used for representing labels of data stored in hierarchial tree-like structure. For more detailed information please refer to http://www.postgresql.org/docs/current/static/ltree.html :: from sqlalchemy_utils import LtreeType class DocumentSection(Base): __tablename__ = 'document_section' id = sa.Column(sa.Integer, autoincrement=True) path = sa.Column(LtreeType) section = DocumentSection(name='Countries.Finland') session.add(section) session.commit() section.path # Ltree('Countries.Finland') .. note:: Using :class:`LtreeType`, :class:`LQUERY` and :class:`LTXTQUERY` types may require installation of Postgresql ltree extension on the server side. Please visit http://www.postgres.org for details. """ class comparator_factory(types.Concatenable.Comparator): def ancestor_of(self, other): if isinstance(other, list): return self.op('@>')(expression.cast(other, ARRAY(LtreeType))) else: return self.op('@>')(other) def descendant_of(self, other): if isinstance(other, list): return self.op('<@')(expression.cast(other, ARRAY(LtreeType))) else: return self.op('<@')(other) def lquery(self, other): if isinstance(other, list): return self.op('?')(expression.cast(other, ARRAY(LQUERY))) else: return self.op('~')(other) def ltxtquery(self, other): return self.op('@')(other) def bind_processor(self, dialect): def process(value): if value: return value.path return process def result_processor(self, dialect, coltype): def process(value): return self._coerce(value) return process def literal_processor(self, dialect): def process(value): value = value.replace("'", "''") return "'%s'" % value return process __visit_name__ = 'LTREE' def _coerce(self, value): if value: return Ltree(value) class LQUERY(types.TypeEngine): """Postresql LQUERY type. See :class:`LTREE` for details. """ __visit_name__ = 'LQUERY' class LTXTQUERY(types.TypeEngine): """Postresql LTXTQUERY type. See :class:`LTREE` f
or details. """ __visit_name__ = 'LTXTQUERY' ischema_names['ltree'] = LtreeType ischema_names['lquery'] = LQUERY ische
ma_names['ltxtquery'] = LTXTQUERY def visit_LTREE(self, type_, **kw): return 'LTREE' def visit_LQUERY(self, type_, **kw): return 'LQUERY' def visit_LTXTQUERY(self, type_, **kw): return 'LTXTQUERY' PGTypeCompiler.visit_LTREE = visit_LTREE PGTypeCompiler.visit_LQUERY = visit_LQUERY PGTypeCompiler.visit_LTXTQUERY = visit_LTXTQUERY
from django.conf.urls import patterns, url from django.views.generic impo
rt TemplateView from django.contrib.auth.decorators import login_required, user_passes_test urlpatterns = patterns('', url(r'^$', 'website.views.index', name='website_index'), url(r'^termos/$',TemplateView.as_view(template_name='website/termos_de_uso.html'), name='website_termos'), url(r'^sobre/$', TemplateView.as_view(template_name='website/sobre.html'), name='website_sobre'), url(r'^relatorios/$', login_required(TemplateView.as_view(template_name='website/relatorios.html')),
name='website_relatorios'), )
# -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2017-01-17 09:22 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [("impart", "0004_auto_20170117_0916")] operations = [
migrations.AlterField(
model_name="contact", name="artist", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="impart.Artist", ), ) ]
from django.conf import settings import factory from pgallery.models import Gallery, Photo class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: "user_%d" % n) email = factory.Sequence(lambda n: "user_%d@example.com" % n) class Meta: model = settings.AUTH_USER_MODEL class GalleryFactory(factory.django.DjangoModelFactory): author = factory.SubFactory(UserFactory) slug = factory.Sequence(lambda n: "gallery_%d" % n) class Meta: model = Gallery class PhotoFactory(factory.django.DjangoModelFactory): gallery = factory.SubFactory(GalleryFactory) author = factory.LazyAttribute(lambda obj: obj.gallery.author) image = fa
ctory.dja
ngo.ImageField(width=1024, height=768) class Meta: model = Photo
from utils import * import sys def clean_rec(d): kwhs, kwhs_oriflag = d["kwhs"] temps, temps_oriflag = d["temps"] for i in range(len(temps_oriflag)): t = temps[i] if t < -60: temps_oriflag[i] = False #Ain't no way that reading's real temps[i] = 0 for i in range(len(kwhs_oriflag)): k = kwhs[i] if k < -5: kwhs_oriflag[i] = False #Ain't no way that reading's real kwhs[i] = 0 d["temps"] = (temps, temps_oriflag) d["kwhs"] = (kwhs,
kwhs_oriflag) if __name__ == "__main__": args = sys.argv if len(args) >
1: the_year = int(args[1]) brecs, desc = qload("state_b_records_" + str(the_year) + "_updated_with_temps.pkl") for d in brecs: clean_rec(d) qdump((brecs, desc + "(Plus we cleaned out curiously low values (noise))"), "state_b_records_" + str(the_year) + "_with_temps_cleaned.pkl")
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed
under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, ei
ther express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Core TensorFlow types.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # TODO(mdan): Consider adding ABC once the dependence on isinstance is reduced. # TODO(mdan): Add type annotations. class Tensor(object): """The base class of all dense Tensor objects. A dense tensor has a static data type (dtype), and may have a static rank and shape. Tensor objects are immutable. Mutable objects may be backed by a Tensor which holds the unique handle that identifies the mutable object. """ @property def dtype(self): pass @property def shape(self): pass class Symbol(Tensor): """Symbolic "graph" Tensor. These objects represent the output of an op definition and do not carry a value. """ pass class Value(Tensor): """Tensor that can be associated with a value (aka "eager tensor"). These objects represent the (usually future) output of executing an op immediately. """ def numpy(self): pass
ns__(self, name): return name in self.__dict__ def __repr__(self): return "Event({0})".format(self.__dict__) def to_series(self, index=None): return pd.Series(self.__dict__, index=index) class Order(Event): pass class Portfolio(object): def __init__(self): self.capital_used = 0.0 self.starting_cash = 0.0 self.portfolio_value = 0.0 self.pnl = 0.0 self.returns = 0.0 self.cash = 0.0 self.positions = Positions() self.start_date = None self.positions_value = 0.0 def __getitem__(self, key): return self.__dict__[key] def __repr__(self): return "Portfolio({0})".format(self.__dict__) class Account(object): ''' The account object tracks information about the trading account. The values are updated as the algorithm runs and its keys remain unchanged. If connected to a broker, one can update these values with the trading account values as reported by the broker. ''' def __init__(self): self.settled_cash = 0.0 self.accrued_interest = 0.0 self.buying_power = float('inf') self.equity_with_loan = 0.0 self.total_positions_value = 0.0 self.regt_equity = 0.0 self.regt_margin = float('inf') self.initial_margin_requirement = 0.0 self.maintenance_margin_requirement = 0.0 self.available_funds = 0.0 self.excess_liquidity = 0.0 self.cushion = 0.0 self.day_trades_remaining = float('inf') self.leverage = 0.0 self.net_liquidation = 0.0 def __getitem__(self, key): return self.__dict__[key] def __repr__(self): return "Account({0})".format(self.__dict__) def _get_state(self): return 'Account', self.__dict__ def _set_state(self, saved_state): self.__dict__.update(saved_state) class Position(object): def __init__(self, sid): self.sid = sid self.amount = 0 self.cost_basis = 0.0 # per share self.last_sale_price = 0.0 def __getitem__(self, key): return self.__dict__[key] def __repr__(self): return "Position({0})".format(self.__dict__) class Positions(dict): def __missing__(self, key): pos = Position(key) self[key] = pos return pos class SIDData(object): # Cache some data on the class so that this is shared for all instances of # siddata. # The dt where we cached the history. _history_cache_dt = None # _history_cache is a a dict mapping fields to pd.DataFrames. This is the # most data we have for a given field for the _history_cache_dt. _history_cache = {}
# This is the cache that is used for returns. This will have a different # structure than the other history cache as this is always daily. _returns_cache_dt = None _returns_cache = None # The last dt that we needed to cache the number of minutes. _minute_bar_cache_dt = None # If we are in minute mode, there is some cost associate
d with computing # the number of minutes that we need to pass to the bar count of history. # This will remain constant for a given bar and day count. # This maps days to number of minutes. _minute_bar_cache = {} def __init__(self, sid, initial_values=None): self._sid = sid self._freqstr = None # To check if we have data, we use the __len__ which depends on the # __dict__. Because we are foward defining the attributes needed, we # need to account for their entrys in the __dict__. # We will add 1 because we need to account for the _initial_len entry # itself. self._initial_len = len(self.__dict__) + 1 if initial_values: self.__dict__.update(initial_values) @property def datetime(self): """ Provides an alias from data['foo'].datetime -> data['foo'].dt `datetime` was previously provided by adding a seperate `datetime` member of the SIDData object via a generator that wrapped the incoming data feed and added the field to each equity event. This alias is intended to be temporary, to provide backwards compatibility with existing algorithms, but should be considered deprecated, and may be removed in the future. """ return self.dt def get(self, name, default=None): return self.__dict__.get(name, default) def __getitem__(self, name): return self.__dict__[name] def __setitem__(self, name, value): self.__dict__[name] = value def __len__(self): return len(self.__dict__) - self._initial_len def __contains__(self, name): return name in self.__dict__ def __repr__(self): return "SIDData({0})".format(self.__dict__) def _get_buffer(self, bars, field='price'): """ Gets the result of history for the given number of bars and field. This will cache the results internally. """ cls = self.__class__ algo = get_algo_instance() now = algo.datetime if now != cls._history_cache_dt: # For a given dt, the history call for this field will not change. # We have a new dt, so we should reset the cache. cls._history_cache_dt = now cls._history_cache = {} if field not in self._history_cache \ or bars > len(cls._history_cache[field].index): # If we have never cached this field OR the amount of bars that we # need for this field is greater than the amount we have cached, # then we need to get more history. hst = algo.history( bars, self._freqstr, field, ffill=True, ) # Assert that the column holds ints, not security objects. if not isinstance(self._sid, str): hst.columns = hst.columns.astype(int) self._history_cache[field] = hst # Slice of only the bars needed. This is because we strore the LARGEST # amount of history for the field, and we might request less than the # largest from the cache. return cls._history_cache[field][self._sid][-bars:] def _get_bars(self, days): """ Gets the number of bars needed for the current number of days. Figures this out based on the algo datafrequency and caches the result. This caches the result by replacing this function on the object. This means that after the first call to _get_bars, this method will point to a new function object. """ def daily_get_bars(days): return days @with_environment() def minute_get_bars(days, env=None): cls = self.__class__ now = get_algo_instance().datetime if now != cls._minute_bar_cache_dt: cls._minute_bar_cache_dt = now cls._minute_bar_cache = {} if days not in cls._minute_bar_cache: # Cache this calculation to happen once per bar, even if we # use another transform with the same number of days. prev = env.previous_trading_day(now) ds = env.days_in_range( env.add_trading_days(-days + 2, prev), prev, ) # compute the number of minutes in the (days - 1) days before # today. # 210 minutes in a an early close and 390 in a full day. ms = sum(210 if d in env.early_closes else 390 for d in ds) # Add the number of minutes for today. ms += int( (now - env.get_open_and_close(now)[0]).total_seconds() / 60 ) cls._minute_bar_cache[days] = ms + 1 # Account for this minute return cls._minute_bar_cache[days] if get_algo_instance().sim_params.data_frequency == 'daily': self._freqstr = '1d' # update this method to point to the
import alsaaudio from math import pi, sin, pow import getch SAMPLE_RATE = 44100 FORMAT = alsaaudio.PCM_FORMAT_U8 PERIOD_SIZE = 512 N_SAMPLES = 1024 notes = "abcdefg" frequencies = {} for i, note in enumerate(notes): frequencies[note] = 440 * pow(pow(2, 1/2), i) # Generate the sine wave, centered at y=128 with 1024 samples sine_wave = [int(sin(x * 2*pi/N_SAMPLES) * 127) for x in range(0, N_SAMPLES)] square_wave = [] sawtooth_wave = [] triangle_wave = [] for i in range(0, N_SAMPLES): phase = (i * 2*pi / N_SAMPLES) % 2*pi if phase < pi: square_wave.append(127) else: square_wave.append(-128) sawtooth_wave.append(int(127 - (127 // pi * phase))) if phase < pi: triangle_wave.append(int(-127 + (2 * 127 * phase // pi))) else: triangle_wave.append(int(3 * 127 - (2 * 127 * phase // pi))) def main(): buf = bytearray(PERIOD_SIZE) # alsaaudio setup dev = alsaaudio.PCM(type=alsaaudio.PCM_PLAYBACK) dev.setchannels(1) dev.setrate(SAMPLE_RATE) dev.setformat(FORMAT) dev.setperiodsize(PERIOD_SIZE) #load_buf(buf, 440) f = 440 w_half = [x//2 + 128 for x in make_wave(sine_wave, f)] #w_o1 = [x//4 for x in make_wave(f*2)] #w_o2 = [x//6 for x in make_wave(f*3)] #w_o3 = [x//8 for x in make_wave(f*4)] #w_o4 = [x//10 for x in make_wave(f*5)] #w_o4 = [x//12 for x in make_wave(f*6)] #w_o5 = [x//14 for x in make_wave(f*7)] #w_o6 = [x//16 for x in make_wave(f*8)] #for i, samp in enumerate(w_o1): # w[i] += samp + w_o2[i] + w_o3[i] + w_o4[i] + w_o5[i] + w_o6[i] + 128 # print(w[i]) #buf = bytearray(w) #for i, samp in enumerate(w): # if samp > 0: # samp = 127 # else: # samp = -128 w = [x + 128 for x in make_wave(square_wave, 440)] buf = bytearray(w) char = getch.getch() last = 'q' while char != 'q': if char != last: if char == '1': w = [x//2 + 128 for x in make_wave(sine_wave, 440)] buf = bytearray(w) elif char == '2': w = [x//2 + 128 for x in make_wave(square_wave, 440)] buf = bytearray(w) elif char == '3': w = [x//2 + 128 for x in make_wave(sawtooth_wave, 440)]
buf = bytearray(w) elif char == '4': w = [x//2 + 128 for x in make_wave(triangle_wave, 440)] buf = bytearray(w) elif char == '5': buf = bytearray(w_half) dev.write(buf) dev.write(buf) dev.write(buf) last = ch
ar char = getch.getch() return 0 #def load_buf(buf, frequency): # step = N_SAMPLES * frequency // SAMPLE_RATE # for i in range(0, PERIOD_SIZE): # buf[i] = wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES] # return buf def make_wave(wave, frequency): step = N_SAMPLES * frequency // SAMPLE_RATE w = [] for i in range(0, PERIOD_SIZE): w.append(wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES]) return w if __name__ == '__main__': main()
#!/usr/bin/env python from distutils.core import setup setup(name='check_iftraffic_nrpe', version='0.12.1', description='Nagios NRPE plugin to check Linux network traffic'
, scripts = ['check_iftraffic_nrpe.py'], author='Samuel Krieg', author_email='samuel.krieg+github@gmail.com', url='https://github.com/SamK/check_iftraffic_nrpe.py', download_url = 'https://github.com/SamK/check_iftraffic_nrpe.py/tarball/0.12.1', keywords = ['nagios', 't
raffic', 'nrpe', 'monitoring'] )
#!/usr/bin/env python2 from dbutil import * def createTables(): """ Populate the array
with names of sql DDL files """ for sqlFileName in ["Address.sql", "Electricity.sql", "CodeViolationsReport.sql", "FireRescueEMSResponse.sql", "NaturalGasReport.sql", "WaterReport.sql"]: try: runSqlFile("create/" + sqlFileName) print "Created table '{}'".format(sqlFileName.s
plit(".sql")[0]) except Exception as e: pass createTables()
.dot(x, x) - Delta**2 if c > 0: raise ValueError("`x` is not within the trust region.") d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant. # Computations below avoid loss of significance, see "Numerical Recipes". q = -(b + copysign(d, b)) t1 = q / a t2 = c / q if t1 < t2: return t1, t2 else: return t2, t1 def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None, rtol=0.01, max_iter=10): """Solve a trust-region problem arising in least-squares minimization. This function implements a method described by J. J. More [1]_ and used in MINPACK, but it relies on a single SVD of Jacobian instead of series of Cholesky decompositions. Before running this function, compute: ``U, s, VT = svd(J, full_matrices=False)``. Parameters ---------- n : int Number of variables. m : int Number of residuals. uf : ndarray Computed as U.T.dot(f). s : ndarray Singular values of J. V : ndarray Transpose of VT. Delta : float Radius of a trust region. initial_alpha : float, optional Initial guess for alpha, which might be available from a previous iteration. If None, determined automatically. rtol : float, optional Stopping tolerance for the root-finding procedure. Namely, the solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``. max_iter : int, optional Maximum allowed number of iterations for the root-finding procedure. Returns ------- p : ndarray, shape (n,) Found solution of a trust-region problem. alpha : float Positive value such that (J.T*J + alpha*I)*p = -J.T*f. Sometimes called Levenberg-Marquardt parameter. n_iter : int Number of iterations made by root-finding procedure. Zero means that Gauss-Newton step was selected as the solution. References ---------- .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. """ def phi_and_derivative(alpha, suf, s, Delta): """Function of which to find zero. It is defined as "norm of regularized (by alpha) least-squares solution minus `Delta`". Refer to [1]_. """ denom = s**2 + alpha p_norm = norm(suf / denom) phi = p_norm - Delta phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm return phi, phi_prime suf = s * uf # Check if J has full rank and try Gauss-Newton step. if m >= n: threshold = EPS * m * s[0] full_rank = s[-1] > threshold else: full_rank = False if full_rank: p = -V.dot(uf / s) if norm(p) <= Delta: return p, 0.0, 0 alpha_upper = norm(suf) / Delta if full_rank: phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta) alpha_lower = -phi / phi_prime else: alpha_lower = 0.0 if initial_alpha is None or not full_rank and initial_alpha == 0: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) else: alpha = initial_alpha for it in range(max_iter): if alpha < alpha_lower or alpha > alpha_upper: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta) if phi < 0: alpha_upper = alpha ratio = phi / phi_prime alpha_lower = max(alpha_lower, alpha - ratio) alpha -= (phi + Delta) * ratio / Delta if np.abs(phi) < rtol * Delta: break p = -V.dot(suf / (s**2 + alpha)) # Make the norm of p equal to Delta, p is changed only slightly during # this. It is done to prevent p lie outside the trust region (which can # cause problems later). p *= Delta / norm(p) return p, alpha, it + 1 def solve_trust_region_2d(B, g, Delta): """Solve a general trust-region problem in 2 dimensions. The problem is reformulated as a 4-th order algebraic equation, the solution of which is found by numpy.roots. Parameters ---------- B : ndarray, shape (2, 2) Symmetric matrix, defines a quadratic term of the function. g : ndarray, shape (2,) Defines a linear term of the function. Delta : float Radius of a trust region. Returns ------- p : ndarray, shape (2,) Found solution. newton_step : bool Whether the returned solution is the Newton step which lies within the trust region. """ try: R, lower = cho_factor(B) p = -cho_solve((R, lower), g) if np.dot(p, p) <= Delta**2: return p, True except LinAlgError: pass a = B[0, 0] * Delta**2 b = B[0, 1] * Delta**2 c = B[1, 1] * Delta**2 d = g[0] * Delta f = g[1] * Delta coeffs = np.array( [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d]) t = np.roots(coeffs) # Can handle leading zeros. t = np.real(t[np.isreal(t)]) p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2))) value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p) i = np.argmin(value) p = p[:, i] return p, False def update_tr_radius(Delta, actual_reduction, predicted_reduction, step_norm, bound_hit): """Update the radius of a trust region based on the cost reduction. Returns ------- Delta : float New radius. ratio : float Ratio between actual and predicted reductions. Zero if predicted reduction is zero. """ if predicted_reduction > 0: ratio = actual_reduction / predicted_reduction else: ratio = 0 if ratio < 0.25: Delta = 0.25 * step_norm elif ratio > 0.75 and bound_hit: Delta *= 2.0 return Delta, ratio # Construction and minimization of quadratic fu
nctions. def build_quadratic_1d(J, g, s, diag=None, s0=None): """Parameterize
a multivariate quadratic function along a line. The resulting univariate quadratic function is given as follows: :: f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) + g.T * (s0 + s*t) Parameters ---------- J : ndarray, sparse matrix or LinearOperator shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (n,) Direction vector of a line. diag : None or ndarray with shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. s0 : None or ndarray with shape (n,), optional Initial point. If None, assumed to be 0. Returns ------- a : float Coefficient for t**2. b : float Coefficient for t. c : float Free term. Returned only if `s0` is provided. """ v = J.dot(s) a = np.dot(v, v) if diag is not None: a += np.dot(s * diag, s) a *= 0.5 b = np.dot(g, s) if s0 is not None: u = J.dot(s0) b += np.dot(u, v) c = 0.5 * np.dot(u, u) + np.dot(g, s0) if diag is not None: b += np.dot(s0 * diag, s) c += 0.5 * np.dot(s0 * diag, s0) return a, b, c else: return a, b def minimize_quadratic_1d(a, b, lb, ub, c=0): """Minimize a 1-d quadratic function subject to bounds. The free term `c` is 0 by default. Bounds must be finite. Returns ------- t : float Minimum point. y : float Minimum value. """ t = [lb, ub] if a != 0: extremum = -0.5 * b / a if lb < extremum < ub: t.append(extremum) t = np.asarray(t) y = a * t**2 + b * t + c min_index = np.argmin(y) return t[min_index], y[min_index] def evaluate_quadratic(J, g
view': 'modelview <selection>', 'upgrade': 'upgrade [store-dir] ...', 'addref': 'addref [store-dir] ... <filename> ...', 'qc': 'qc [store-dir]', 'report': 'report <subcommand> <arguments>... [options]' } subcommands = subcommand_descriptions.keys() program_name = 'fomosto' usage = program_name + ''' <subcommand> <arguments> ... [options] Subcommands: init %(init)s build %(build)s stats %(stats)s check %(check)s decimate %(decimate)s redeploy %(redeploy)s view %(view)s extract %(extract)s import %(import)s export %(export)s ttt %(ttt)s tttview %(tttview)s tttextract %(tttextract)s tttlsd %(tttlsd)s server %(server)s download %(download)s modelview %(modelview)s upgrade %(upgrade)s addref %(addref)s qc %(qc)s report %(report)s To get further help and a list of available options for any subcommand run: fomosto <subcommand> --help ''' % d2u(subcommand_descriptions) def add_common_options(parser): parser.add_option( '--loglevel', action='store', dest='loglevel', type='choice', choices=('critical', 'error', 'warning', 'info', 'debug'), default='info', help='set logger level to ' '"critical", "error", "warning", "info", or "debug". ' 'Default is "%default".') def process_common_options(options): util.setup_logging(progr
am_name, options.loglevel) def cl_parse(command, args, setup=None, details=None): usage = subcommand_usages[command] descr = subcommand_descriptions[command] if isinstance(usage, str): usage = [usage]
susage = '%s %s' % (program_name, usage[0]) for s in usage[1:]: susage += '\n%s%s %s' % (' '*7, program_name, s) description = descr[0].upper() + descr[1:] + '.' if details: description = description + ' %s' % details parser = OptionParser(usage=susage, description=description) parser.format_description = lambda formatter: description if setup: setup(parser) add_common_options(parser) (options, args) = parser.parse_args(args) process_common_options(options) return parser, options, args def die(message, err=''): sys.exit('%s: error: %s \n %s' % (program_name, message, err)) def fomo_wrapper_module(name): try: if not re.match(gf.meta.StringID.pattern, name): raise ValueError('invalid name') words = name.split('.', 1) if len(words) == 2: name, variant = words else: name = words[0] variant = None name_clean = re.sub(r'[.-]', '_', name) modname = '.'.join(['pyrocko', 'fomosto', name_clean]) mod = __import__(modname, level=0) return getattr(mod.fomosto, name_clean), variant except ValueError: die('invalid modelling code wrapper name') except ImportError: die('''modelling code wrapper "%s" not available or not installed (module probed: "%s")''' % (name, modname)) def command_init(args): details = ''' Available modelling backends: %s More information at https://pyrocko.org/docs/current/apps/fomosto/backends.html ''' % '\n'.join([' * %s' % b for b in fomosto.AVAILABLE_BACKENDS]) parser, options, args = cl_parse( 'init', args, details=details) if len(args) == 0: sys.exit(parser.format_help()) if args[0] == 'redeploy': if len(args) != 3: parser.error('incorrect number of arguments') source_dir, dest_dir = args[1:] try: source = gf.Store(source_dir) except gf.StoreError as e: die(e) config = copy.deepcopy(source.config) config.derived_from_id = source.config.id try: config_filenames = gf.store.Store.create_editables( dest_dir, config=config) except gf.StoreError as e: die(e) try: dest = gf.Store(dest_dir) except gf.StoreError as e: die(e) for k in source.extra_keys(): source_fn = source.get_extra_path(k) dest_fn = dest.get_extra_path(k) shutil.copyfile(source_fn, dest_fn) logger.info( '(1) configure settings in files:\n %s' % '\n '.join(config_filenames)) logger.info( '(2) run "fomosto redeploy <source> <dest>", as needed') else: if len(args) != 2: parser.error('incorrect number of arguments') (modelling_code_id, store_dir) = args module, variant = fomo_wrapper_module(modelling_code_id) try: config_filenames = module.init(store_dir, variant) except gf.StoreError as e: die(e) logger.info('(1) configure settings in files:\n %s' % '\n '.join(config_filenames)) logger.info('(2) run "fomosto ttt" in directory "%s"' % store_dir) logger.info('(3) run "fomosto build" in directory "%s"' % store_dir) def get_store_dir(args): if len(args) == 1: store_dir = op.abspath(args.pop(0)) else: store_dir = op.abspath(op.curdir) if not op.isdir(store_dir): die('not a directory: %s' % store_dir) return store_dir def get_store_dirs(args): if len(args) == 0: store_dirs = [op.abspath(op.curdir)] else: store_dirs = [op.abspath(x) for x in args] for store_dir in store_dirs: if not op.isdir(store_dir): die('not a directory: %s' % store_dir) return store_dirs def command_build(args): def setup(parser): parser.add_option( '--force', dest='force', action='store_true', help='overwrite existing files') parser.add_option( '--nworkers', dest='nworkers', type='int', metavar='N', help='run N worker processes in parallel') parser.add_option( '--continue', dest='continue_', action='store_true', help='continue suspended build') parser.add_option( '--step', dest='step', type='int', metavar='I', help='process block number IBLOCK') parser.add_option( '--block', dest='iblock', type='int', metavar='I', help='process block number IBLOCK') parser, options, args = cl_parse('build', args, setup=setup) store_dir = get_store_dir(args) try: if options.step is not None: step = options.step - 1 else: step = None if options.iblock is not None: iblock = options.iblock - 1 else: iblock = None store = gf.Store(store_dir) module, _ = fomo_wrapper_module(store.config.modelling_code_id) module.build( store_dir, force=options.force, nworkers=options.nworkers, continue_=options.continue_, step=step, iblock=iblock) except gf.StoreError as e: die(e) def command_stats(args): parser, options, args = cl_parse('stats', args) store_dir = get_store_dir(args) try: store = gf.Store(store_dir) s = store.stats() except gf.StoreError as e: die(e) for k in store.stats_keys: print('%s: %s' % (k, s[k])) def command_check(args): parser, options, args = cl_parse('check', args) store_dir = get_store_dir(args) try: store = gf.Store(store_dir) problems = store.check(show_progress=True) if problems: die('problems detected with gf store: %s' % store_dir) except gf.StoreError as e: die(e) def load_config(fn): try: config = gf.meta.load(filename=fn) assert isinstance(config, gf.Config) except Exception: die('cannot load gf config from file: %s' % fn) return config def command_decimate(args): def
import logging from logging import config import paramiko import os from read_config import * class FileDownloader(object): ip = None port = None user = None password = None local_file_path = None remote_file_path = None abs_file_list = [] ssh = None logger = None def __init__(self, config): logging.config.fileConfig('../config/logging.conf') self.logger = logging.getLogger('fileLogger') self.ip = config.get_serverip() self.port = config.get_serverport() self.user = config.get_username() self.password = config.get_password() self.remote_file_path = config.get_srcdir() self.local_file_path = config.get_dstdir() d
ef download(self): sftp = self.ssh.open_sftp() self.list_remote_file(self.remote_file_path) for remote_file in self.abs_file_list: sub_path_name =
remote_file[remote_file.index(self.remote_file_path) + len(self.remote_file_path):] local_file = self.local_file_path + sub_path_name dir_name = os.path.dirname(local_file) if not os.path.exists(dir_name): os.makedirs(dir_name) sftp.get(remote_file, local_file) self.logger.info('Download ' + remote_file + ' successfully.') def list_remote_file(self, remote_folder): sftp = self.ssh.open_sftp() file_list = sftp.listdir(remote_folder) for file in file_list: cmd = 'file ' + remote_folder + '/' + file + '|grep directory|wc -l' stdin, stdout, stderr = self.ssh.exec_command(cmd) res = stdout.readline().strip() if res == "1": self.list_remote_file(remote_folder + '/' + file) else: self.abs_file_list.append(remote_folder + '/' + file) def connect(self): self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.ssh.connect(self.ip, int(self.port), self.user, self.password) self.logger.info('Connect to ' + self.ip + ' successfully.') def close(self): self.ssh.close() self.logger.info('Disconnect to ' + self.ip + ' successfully.') if __name__ == '__main__': config = ConfigLoader().load_config('../config/product_config.json') downloader = FileDownloader(config) downloader.connect() downloader.download() downloader.close()
, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'rule': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Rule']" } ),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], { 'default': '0' }) }, 'sentry.groupseen': { 'Meta': { 'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_seen': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'db_index': 'False' } ) }, 'sentry.grouptagkey': { 'Meta': { 'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.grouptagvalue': { 'Meta': { 'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'" }, 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'grouptag'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']" } ), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '200' }) }, 'sentry.helppage': { 'Meta': { 'object_name': 'HelpPage' }, 'content': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_visible': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'key': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'unique': 'True', 'null': 'True' } ), 'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '50' }), 'title': ('django.db.models.fields.CharField', [], { 'max_length': '64' }) }, 'sentry.lostpasswordhash': { 'Meta': { 'object_name': 'LostPasswordHash' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'hash': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'unique': 'True' } ) }, 'sentry.option': { 'Meta': { 'object_name': 'Option' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '64' }), 'last_updated': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': { 'object_name': 'Organization' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'members': ( 'django.db.models.fields.related.ManyToManyField', [], { 'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']" } ), 'name': ('dj
""" 사이트 관리 도구 어드민 페이지 설정. """
from django.contrib import admin from modeltranslation.admin import TranslationAdmin from .models import Category, GroupServicePermissio
n, Service, TopBanner class CategoryAdmin(TranslationAdmin): """ :class:`Category` 모델에 대한 커스텀 어드민. `django-modeltranlation` 에서 제공하는 :class:`TranslationAdmin` 을 상속받아 다국어 처리를 사용자 친화적으로 변경하였습니다. """ pass class ServiceAdmin(TranslationAdmin): """ `Service` 모델에 대한 커스텀 어드민. `django-modeltranlation` 에서 제공하는 :class:`TranslationAdmin` 을 상속받아 다국어 처리를 사용자 친화적으로 변경하였습니다. """ pass class TopBannerAdmin(TranslationAdmin): """ `TopBanner` 모델에 대한 커스텀 어드민. `django-modeltranlation` 에서 제공하는 :class:`TranslationAdmin` 을 상속받아 다국어 처리를 사용자 친화적으로 변경하였습니다. """ pass admin.site.register(Category, CategoryAdmin) admin.site.register(Service, ServiceAdmin) admin.site.register(TopBanner, TopBannerAdmin) admin.site.register(GroupServicePermission)
from sklearn import datasets from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt loaded_data = datasets.load_boston() data_X = loaded_data.data data_y = loaded_data.target model = LinearRegression() model.fit(data_X, data_y) print(model.pre
dict(data_X[:4,:])) print(data_y[:4]) print(model.coef_) print(model.intercept_) print(model.score(data_X, data_y)) #X, y = datasets.make_regression(n_samples=100, n_features=1, n_targets=1, n
oise=20) #plt.scatter(X,y) #plt.show()
""" Django settings for web project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'a(g^11#$6jyys)0mjl3zv4=r029or=v*ldq=)44866(re!nmg)' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'MutationInfoApp', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'web.urls' WSGI_APPLICATION = 'web.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(
BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True U
SE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
from invoke import task, Collection from invocations.checks import blacken from invocations.packaging import release from invocations import docs, pytest as pytests, travis @task def coverage(c, html=True): """ Run coverage with coverage.py. """ # NOTE: this MUST use coverage itself, and not pytest-cov, because the # latter is apparently unable to prevent pytest plugins from being loaded # before pytest-cov itself is able to start up coverage.py! The result is # that coverage _always_ skips over all module level code, i.e. constants, # 'def' lines, etc. Running coverage as the "outer" layer avoids this # problem, thus no need for pytest-cov. # NOTE: this does NOT hold true for NON-PYTEST code, so # pytest-relaxed-USING modules can happily use pytest-cov. c.run("coverage run --source=pytest_relaxed -m pytest") if html: c.run("coverage html") c.run("open htmlcov/index.html") # TODO: good candidate for builtin-to-invoke "just wrap <other task> with a # tiny bit of behavior", and/or args/kwargs style invocations @task def test( c, verbose=True, color=True, capture="sys", opts="
", x=False, k=None, module=None, ): """ Run pytest with given options. Wraps ``invocations.pytests.test``. See its docs for details. """ # TODO: could invert this & have our entire test suite manually _enable_ # our own plugin, but given pytest's options around plugin setup, this # seems to be both easi
er and simpler. opts += " -p no:relaxed" pytests.test( c, verbose=verbose, color=color, capture=capture, opts=opts, x=x, k=k, module=module, ) ns = Collection(blacken, coverage, docs, test, travis, release) ns.configure({"blacken": {"find_opts": "-and -not -path './build*'"}})
neth@loafman.com> # # This file is part of duplicity. # # Duplicity is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # # Duplicity is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with duplicity; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Provides temporary file handling cenetered around a single top-level securely created temporary directory. The public interface of this module is thread-safe. """ import os import threading import tempfile from duplicity import log from duplicity import util from duplicity import globals # Set up state related to managing the default temporary directory # instance _defaultLock = threading.Lock() _defaultInstance = None def default(): """ Obtain the global default instance of TemporaryDirectory, creating it first if necessary. Failures are propagated to caller. Most callers are expected to use this function rather than instantiating TemporaryDirectory directly, unless they explicitly desdire to have their "own" directory for some reason. This function is thread-safe. """ global _defaultLock global _defaultInstance _defaultLock.acquire() try: if _defaultInstance is None or _defaultInstance.dir() is None: _defaultInstance = TemporaryDirectory(temproot = globals.temproot) return _defaultInstance finally: _defaultLock.release() class TemporaryDirectory: """ A temporary directory. An instance of this class is backed by a directory in the file system created securely by the use of tempfile.mkdtemp(). Said instance can be used to obtain unique filenames inside of this directory for cases where mktemp()-like semantics is desired, or (recommended) an fd,filename pair for mkstemp()-like semantics. See further below for the security implications of using it. Each instance will keep a list of all files ever created by it, to faciliate deletion of such files and rmdir() of the directory itself. It does this in order to be able to clean out the directory without resorting to a recursive delete (ala rm -rf), which would be risky. Calling code can optionally (recommended) notify an instance of the fact that a tempfile was deleted, and thus need not be kept track of anymore. This class serves two primary purposes: Firstly, it provides a convenient single top-level directory in which all the clutter ends up, rather than cluttering up the root of the system temp directory itself with many files. Secondly, it provides a way to get mktemp() style semantics for temporary file creation, with most of the risks gone. Specifically, since the directory itself is created securely, files in this directory can be (mostly) safely created non-atomically without the usual mktemp() security implications. However, in the presence of tmpwatch, tmpreaper, or similar mechanisms that will cause files in the system tempdir to expire, a security risk is still present because the removal of the TemporaryDirectory managed directory removes all protection it offers. For this reason, use of mkstemp() is greatly preferred above use of mktemp(). In addition, since cleanup is in the form of deletion based on a list of filenames, completely independently of whether someone else already deleted the file, there exists a race here as well. The impact should however be limited to the removal of an 'attackers' file. """ def __init__(self, temproot = None): """ Create a new TemporaryDirectory backed by a unique and securely created file
system directory. tempbase
- The temp root directory, or None to use system default (recommended). """ self.__dir = tempfile.mkdtemp("-tempdir", "duplicity-", temproot) log.Info(_("Using temporary directory %s") % util.ufn(self.__dir)) # number of mktemp()/mkstemp() calls served so far self.__tempcount = 0 # dict of paths pending deletion; use dict even though we are # not concearned with association, because it is unclear whether # sets are O(1), while dictionaries are. self.__pending = {} self.__lock = threading.Lock() # protect private resources *AND* mktemp/mkstemp calls def dir(self): """ Returns the absolute pathname of the temp folder. """ return self.__dir def __del__(self): """ Perform cleanup. """ global _defaultInstance if _defaultInstance is not None: self.cleanup() def mktemp(self): """ Return a unique filename suitable for use for a temporary file. The file is not created. Subsequent calls to this method are guaranteed to never return the same filename again. As a result, it is safe to use under concurrent conditions. NOTE: mkstemp() is greatly preferred. """ filename = None self.__lock.acquire() try: self.__tempcount = self.__tempcount + 1 suffix = "-%d" % (self.__tempcount,) filename = tempfile.mktemp(suffix, "mktemp-", self.__dir) log.Debug(_("Registering (mktemp) temporary file %s") % util.ufn(filename)) self.__pending[filename] = None finally: self.__lock.release() return filename def mkstemp(self): """ Returns a filedescriptor and a filename, as per os.mkstemp(), but located in the temporary directory and subject to tracking and automatic cleanup. """ fd = None filename = None self.__lock.acquire() try: self.__tempcount = self.__tempcount + 1 suffix = "-%d" % (self.__tempcount,) fd, filename = tempfile.mkstemp(suffix, "mkstemp-", self.__dir) log.Debug(_("Registering (mkstemp) temporary file %s") % util.ufn(filename)) self.__pending[filename] = None finally: self.__lock.release() return fd, filename def mkstemp_file(self): """ Convenience wrapper around mkstemp(), with the file descriptor converted into a file object. """ fd, filename = self.mkstemp() return os.fdopen(fd, "r+"), filename def forget(self, fname): """ Forget about the given filename previously obtained through mktemp() or mkstemp(). This should be called *after* the file has been deleted, to stop a future cleanup() from trying to delete it. Forgetting is only needed for scaling purposes; that is, to avoid n timefile creations from implying that n filenames are kept in memory. Typically this whould never matter in duplicity, but for niceness sake callers are recommended to use this method whenever possible. """ self.__lock.acquire() try: if fname in self.__pending: log.Debug(_("Forgetting temporary file %s") % util.ufn(fname)) del(self.__pending[fname]) else: log.Warn(_("Attempt to forget unknown tempfile %s - this is probably a bug.") % util.ufn(fname)) pass finally: self.__lock.release() def cleanup(self): """ Cleanup any files created in the temporary directory (that have not been forgotten), and clean up the temporary directory itself. On failure they are logged, but this method will not raise an
else: # newer zfcp sysfs interface with auto port scan raise ValueError, _("WWPN %(wwpn)s not found at zFCP device " "%(devnum)s.") % {'wwpn': self.wwpn, 'devnum': self.devnum} else: if os.path.exists(portadd): # older zfcp sysfs interface log.info("WWPN %(wwpn)s at zFCP device %(devnum)s already " "there.") % {'wwpn': self.wwpn, 'devnum': self.devnum} if not os.path.exists(unitdir): try: loggedWriteLineToFile(unitadd, self.fcplun) udev_settle() except IOError as e: raise ValueError, _("Could not add LUN %(fcplun)s to WWPN " "%(wwpn)s on zFCP device %(devnum)s " "(%(e)s).") \ % {'fcplun': self.fcplun, 'wwpn': self.wwpn, 'devnum': self.devnum, 'e': e} else: raise ValueError, _("LUN %(fcplun)s at WWPN %(wwpn)s on zFCP " "device %(devnum)s already configured.") \ % {'fcplun': self.fcplun, 'wwpn': self.wwpn, 'devnum': self.devnum} fail = "0" try: f = open(failed, "r") fail = f.readline().strip() f.close() except IOError as e: raise ValueError, _("Could not read failed attribute of LUN " "%(fcplun)s at WWPN %(wwpn)s on zFCP device " "%(devnum)s (%(e)s).") \ % {'fcplun': self.fcplun, 'wwpn': self.wwpn, 'devnum': self.devnum, 'e': e} if fail != "0": self.offlineDevice() raise ValueError, _("Failed LUN %(fcplun)s at WWPN %(wwpn)s on " "zFCP device %(devnum)s removed again.") \ % {'fcplun': self.fcplun, 'wwpn': self.wwpn, 'devnum': self.devnum} return True def offlineSCSIDevice(self): f = open("/proc/scsi/scsi", "r") lines = f.readlines() f.close() # alternatively iterate over /sys/bus/scsi/devices/*:0:*:*/ for line in lines: if not line.startswith("Host"): continue scsihost = string.split(line) host = scsihost[1] channel = "0" id = scsihost[5] lun = scsihost[7] scsidev = "%s:%s:%s:%s" % (host[4:], channel, id, lun) fcpsysfs = "%s/%s" % (scsidevsysfs, scsidev) scsidel = "%s/%s/delete" % (scsidevsysfs, scsidev) f = open("%s/hba_id" %(fcpsysfs), "r") fcphbasysfs = f.readline().strip() f.close() f = open("%s/wwpn" %(fcpsysfs), "r") fcpwwpnsysfs = f.readline().strip() f.close() f = open("%s/fcp_lun" %(fcpsysfs), "r") fcplunsysfs = f.readline().strip() f.close() if fcphbasysfs == self.devnum \ and fcpwwpnsysfs == self.wwpn \ and fcplunsysfs == self.fcplun: loggedWriteLineToFile(scsidel, "1") udev_settle() return log.warn("no scsi device found to delete for zfcp %s %s %s" %(self.devnum, self.wwpn, self.fcplun)) def offlineDevice(self): offline = "%s/%s/online" %(zfcpsysfs, self.devnum) portadd = "%s/%s/port_add" %(zfcpsysfs, self.devnum) portremove = "%s/%s/port_remove" %(zfcpsysfs, self.devnum) unitremove = "%s/%s/%s/unit_remove" %(zfcpsysfs, self.devnum, self.wwpn) portdir = "%s/%s/%s" %(zfcpsysfs, self.devnum, self.wwpn) devdir = "%s/%s" %(zfcpsysfs, self.devnum) try: self.offl
ineSCSIDevice() except IOError as e: raise ValueError, _("Could not correctly delete SCSI device of " "zFCP %(devnum)s %(wwpn)s %(fcplun)s " "(%(e)s).") \ % {'devnum': self.devnum, 'wwpn': self.wwpn, 'fcplun': self.fcplun, 'e': e} try: loggedWriteLineToFile(unitremove, self.fcplun) except IOError as e: raise ValueEr
ror, _("Could not remove LUN %(fcplun)s at WWPN " "%(wwpn)s on zFCP device %(devnum)s " "(%(e)s).") \ % {'fcplun': self.fcplun, 'wwpn': self.wwpn, 'devnum': self.devnum, 'e': e} if os.path.exists(portadd): # only try to remove ports with older zfcp sysfs interface for lun in os.listdir(portdir): if lun.startswith("0x") and \ os.path.isdir(os.path.join(portdir, lun)): log.info("Not removing WWPN %s at zFCP device %s since port still has other LUNs, e.g. %s." %(self.wwpn, self.devnum, lun)) return True try: loggedWriteLineToFile(portremove, self.wwpn) except IOError as e: raise ValueError, _("Could not remove WWPN %(wwpn)s on zFCP " "device %(devnum)s (%(e)s).") \ % {'wwpn': self.wwpn, 'devnum': self.devnum, 'e': e} if os.path.exists(portadd): # older zfcp sysfs interface for port in os.listdir(devdir): if port.startswith("0x") and \ os.path.isdir(os.path.join(devdir, port)): log.info("Not setting zFCP device %s offline since it still has other ports, e.g. %s." %(self.devnum, port)) return True else: # newer zfcp sysfs interface with auto port scan import glob luns = glob.glob("%s/0x????????????????/0x????????????????" %(devdir,)) if len(luns) != 0: log.info("Not setting zFCP device %s offline since it still has other LUNs, e.g. %s." %(self.devnum, luns[0])) return True try: loggedWriteLineToFile(offline, "0") except IOError as e: raise ValueError, _("Could not set zFCP device %(devnum)s " "offline (%(e)s).") \ % {'devnum': self.devnum, 'e': e} return True class ZFCP: """ ZFCP utility class. This class will automatically online to ZFCP drives configured in /tmp/fcpconfig when the startup() method gets called. It can also be used to manually configure ZFCP devices through the addFCP() method. As this class needs to make sure that /tmp/fcpconfig configured drives are only onlined once and as it keeps a global list of all ZFCP devices it is implemented as a Singleton. """ def __init__(self): self.fcpdevs = [] self.hasReadConfig = False self.down = True # So that users can write zfcp() to get the singleton instance def __call__(self): return self def readConfig(self): try: f = open("/tmp/fcpconfig", "r") except IOError: log.info("no /tmp/fcpconfig; not configuring zfcp") return lines = f.readlines() f.close() for line in lines: # each line is a string separated list of values to describe a dev # there are two valid formats for the line: # devnum scsi
import sys sys.path.insert(1, "../../") import h2o def vec_show(i
p,port): # Connect to h2o h2o.init(ip,port) iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader.csv")) print "iris:" iris.show() ################################################################### res = 2 - iris res2 = res[0] print "res2:" res2.show() res3 = res[1] print "res3:" res3.show() iris[2].sho
w() if __name__ == "__main__": h2o.run_test(sys.argv, vec_show)
# -*- coding: utf-8 -*- # Copyright (C) 2010 Axel Tillequin (bdcht3@gmail.com) # This code is part of Masr # published under GPLv2 license import gtk from grandalf.graphs import Vertex,Edge,Graph from grandalf.layouts import SugiyamaLayout from grandalf.routing import * from grandalf.utils import median_wh,Dot from .items import * # start is called when Masr is 'run', to modify GUI/Canvas elements # with plugin-specific menus, keybindings, canvas options, etc. def start(pfunc,app,**kargs): app.screen.gui.message("plugin graph started by %s"%pfunc) al = kargs['args'] sg = comp = 0 step = False cons = False N=1 for i,arg in enumerate(al): if arg.endswith(Session.filetype): if not app.session: app.session = Session(arg,app) if arg == '-sg': sg = int(al[i+1]) if arg == '-c': comp = int(al[i+1]) if arg == '-s': step = True if arg == '-N': N = int(al[i+1]) if arg == '-ce': cons=True if app.session: assert sg<len(app.session.L) app.session.g = ast2Graph(app.session.L[sg]) assert comp<len(app.session.g.C) app.session.cg = CGraph(app.screen.canvas,app.session.g.C[comp]) app.session.cg.Draw(N,stepflag=step,constrained=cons) def end(pfunc,app,**kargs): pass # Session class allows Masr GUIs' File menu to Open a file with matching # extensions for a new plugin session on this file's data. class Session(object): filetype = ('.dot',) def __init__(self,filename,app): self.app = app self.filename = filename self.dot = Dot() self.L = self.dot.read(filename) self.scene = None def info(self): for s in self.L: print s def ast2Graph(ast): V={} E=[] # create Vertex and Vertex.view for each node in ast : for k,x in ast.nodes.iteritems(): try: label = x.attr['label'] except (KeyError,AttributeError): label = x.name v = dotnode(label.strip('"\n')) V[x.name] = v edgelist = [] # create Edge and Edge_basic for each edge in ast: for e in ast.edges: edgelist.append(e) f
or edot in edgelist: v1 = V[edot.n1.name] v2 = V[edot.n2.name] e = Edge(v1,v2) e.view = Edge_basic(v1.view,v2.view,head=True) e.view.set_properties(line_width = 2) E.append(e) return Graph(V.values(),E) def dotnode(seq): _start = Vertex(seq) v = _start.view = Node_codeblock(_start.data.replace('\l','\n')) v.w,v.h = v.get_wh() return _start #---------------------
--------------------------------------------------------- # CGraph is simply a SugiyamaLayout extended with adding nodes and edges views # on the current canvas and dealing with mouse/keyboard events. class CGraph(SugiyamaLayout): def __init__(self,c,g): self.parent = c SugiyamaLayout.__init__(self,g) self.route_edge = route_with_lines self.dx,self.dy = 5,5 self.dirvh=0 c.parent.connect_object("button-press-event",CGraph.eventhandler,self) c.parent.connect_object("button-release-event",CGraph.eventhandler,self) c.parent.connect_object("key-press-event",CGraph.eventhandler,self) c.parent.connect_object("key-release-event",CGraph.eventhandler,self) def Draw(self,N=1,stepflag=False,constrained=False): self.init_all(cons=constrained) if stepflag: self.drawer=self.draw_step() self.greens=[] else: self.draw(N) for e in self.alt_e: e.view.set_properties(stroke_color='red') for v in self.g.sV: self.connect_add(v.view) for e in self.g.sE: self.parent.root.add_child(e.view) # move edge start/end to CX points: e.view.update_points() def connect_add(self,item): self.parent.root.add_child(item) def disconnect(self): self.parent.parent.disconnect_by_func(CGraph.eventhandler) def remove(self,item): #import gc #gc.set_debug(gc.DEBUG_LEAK) #gc.collect() Blit.remove(self,item) for e in item.cx.registered[:]: for cx in e.cx: cx.unregister(e) self.c.root.remove(self,e) def clean(self): for v in self.g.sV: self.c.root.remove(v.view) # Scene-Wide (default) event handler on items events: def eventhandler(self,e): if e.type == gtk.gdk.KEY_PRESS: if e.keyval == ord('p'): for l in self.layers: for v in l: v.view.xy = (self.grx[v].x[self.dirvh],v.view.xy[1]) self.draw_edges() self.dirvh = (self.dirvh+1)%4 if e.keyval == ord('W'): self.xspace += 1 self.setxy() self.draw_edges() if e.keyval == ord('w'): self.xspace -= 1 self.setxy() self.draw_edges() if e.keyval == ord('H'): self.yspace += 1 self.setxy() self.draw_edges() if e.keyval == ord('h'): self.yspace -= 1 self.setxy() self.draw_edges() if e.keyval == ord(' '): try: s,mvmt = self.drawer.next() print s,len(mvmt) for x in self.greens: x.view.shadbox.set_properties(fill_color='grey44') self.greens=[] for x in mvmt: if hasattr(x.view,'shadbox'): x.view.shadbox.set_properties(fill_color='green') self.greens.append(x) except StopIteration: print 'drawer terminated' del self.drawer del self.greens except AttributeError: print 'drawer created' self.drawer=self.draw_step() self.greens=[]
p, remote = self.socket.recvfrom(1500) p = DNS(raw(p)) # check received packet for correctness assert(p is not None) assert(p[DNS].qr == 0) assert(p[DNS].opcode == 0) # has two queries assert(p[DNS].qdcount == TEST_QDCOUNT) qdcount = p[DNS].qdcount # both for TEST_NAME assert(p[DNS].qd[0].qname == TEST_NAME.encode("utf-8") + b".") assert(p[DNS].qd[1].qname == TEST_NAME.encode("utf-8") + b".") assert(any(p[DNS].qd[i].qtype == DNS_RR_TYPE_A for i in range(qdcount))) # one is A assert(any(p[DNS].qd[i].qtype == DNS_RR_TYPE_AAAA for i in range(qdcount))) # one is AAAA if self.reply is not None: self.socket.sendto(raw(self.reply), remote) self.reply = None def listen(self, reply=None): self.reply = reply self.enter_loop.set() def stop(self): self.stopped = True self.enter_loop.set() self.socket.close() self.join() server = None def check_and_search_output(cmd, pattern, res_group, *args, **kwargs): output = subprocess.check_output(cmd, *args, **kwargs).decode("utf-8") for line in output.splitlines(): m = re.search(pattern, line) if m is not None: return m.group(res_group) return None def get_bridge(tap): res = check_and_search_output( ["bridge", "link"], r"{}.+master\s+(?P<master>[^\s]+)".format(tap), "master" ) return tap if res is None else res def get_host_lladdr(tap): res = check_and_search_output( ["ip", "addr", "show", "dev", tap, "scope", "link"], r"inet6\s+(?P<lladdr>[0-9A-Fa-f:]+)/\d+", "lladdr" ) if res is None: raise AssertionError( "Can't find host link-local address on interface {}".format(tap) ) else: return res def dns_server(child, server, port=53): child.sendline("dns server {} {:d}".format(server, port)) child.sendline("dns server") child.expect(r"DNS server: \[{}\]:{:d}".format(server, port)) def successful_dns_request(child, name, exp_addr=None): child.sendline("dns request {}".format(name)) res = child.expect(["error resolving {}".format(name), "{} resolves to {}".format(name, exp_addr)], timeout=3) return ((res > 0) and (exp_addr is not None)) def test_success(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)), an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA, rdlen=DNS_RR_TYPE_AAAA_DLEN, rdata=TEST_AAAA_DATA) / DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_A, rdlen=DNS_RR_TYPE_A_DLEN, rdata=TEST_A_DATA)))) assert(successful_dns_request(child, TEST_NAME, TEST_AAAA_DATA)) def test_timeout(child): # listen but send no reply server.listen() assert(not successful_dns_request(child, TEST_NAME, TEST_AAAA_DATA)) def test_too_short_response(child): server.listen(Raw(b"\x00\x00\x81\x00")) assert(not successful_dns_request(child, TEST_NAME)) def test_qdcount_too_large1(child): # as reported in https://github.com/RIOT-OS/RIOT/issues/10739 server.listen(base64.b64decode("AACEAwkmAAAAAAAAKioqKioqKioqKioqKioqKioqKio=")) assert(not successful_dns_request(child, TEST_NAME)) def test_qdcount_too_large2(child): server.listen(DNS(qr=1, qdcount=40961, ancount=TEST_ANCOUNT, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)), an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA, rdlen=DNS_RR_TYPE_AAAA_DLEN, rdata=TEST_AAAA_DATA) / DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_A, rdlen=DNS_RR_TYPE_A_DLEN, rdata=TEST_A_DATA)))) assert(not successful_dns_request(child, TEST_NAME)) def test_ancount_too_large1(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=2714, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qn
ame=TEST_NAME, qtype=DNS_RR_TYPE_A)), an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA,
rdlen=DNS_RR_TYPE_AAAA_DLEN, rdata=TEST_AAAA_DATA) / DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_A, rdlen=DNS_RR_TYPE_A_DLEN, rdata=TEST_A_DATA)))) assert(not successful_dns_request(child, TEST_NAME, TEST_AAAA_DATA)) def test_ancount_too_large2(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=19888, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)), an="\0")) assert(not successful_dns_request(child, TEST_NAME)) def test_bad_compressed_message_query(child): server.listen(DNS(qr=1, qdcount=1, ancount=1, qd=DNS_MSG_COMP_MASK)) assert(not successful_dns_request(child, TEST_NAME)) def test_bad_compressed_message_answer(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)), an=DNS_MSG_COMP_MASK)) assert(not successful_dns_request(child, TEST_NAME)) def test_malformed_hostname_query(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=0, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / # need to use byte string here to induce wrong label # lengths b"\xafexample\x03org\x00\x00\x1c\x00\x01"))) assert(not successful_dns_request(child, TEST_NAME)) def test_malformed_hostname_answer(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)), # need to use byte string here to induce wrong label # lengths an=(b"\xaftest\x00\x00\x1c\x00\x01\x00\x00\x00\x00\x00\x10" b"\x20\x01\x0d\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00" b"\x00\x00\x01" / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)))) assert(not successful_dns_request(child, TEST_NAME)) def test_addrlen_too_large(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)), an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA, rdlen=18549, rdata=TEST_AAAA_DATA) / DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_A, rdlen=DNS_RR_TYPE_A_DLEN, rdata=TEST_A_DATA)))) assert(not successful_dns_request(child, TEST_NAME, TEST_AAAA_DATA)) def test_addrlen_wrong_ip6(child): server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT, qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) / DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)), an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA, rdlen=DNS_RR_TYPE_AAAA_DLEN + 1, rdat
import time fr
om datetime impo
rt datetime class pyTemperature(object): def __init__(self, date = datetime.now(), temp=None,pressure=None,humidity=None): self.date = date self.temperature = temp self.pressure = pressure self.humidity = humidity def printTemperature(self): print(self.date) print("Temp: ") print(self.temperature) print("Press: ") print(self.pressure) print("Humidity: ") print(self.humidity)
from core.himesis import Himesis, HimesisPostConditionPattern import cPickle as pickle from uuid import UUID class HReconnectMatchElementsRHS(HimesisPostConditionPattern): def __init__(self): """ Creates the himesis graph representing the AToM3 model HReconnectMatchElementsRHS. """ # Flag this instance as compiled now self.is_compiled = True super(HReconnectMatchElementsRHS, self).__init__(name='HReconnectMatchElementsRHS', num_nodes=3, edges=[]) # Add the edges self.add_edges([(2, 0), (0, 1)]) # Set the graph attributes self["mm__"] = pickle.loads("""(lp1 S'MT_post__GM2AUTOSAR_MM' p2 aS'MoTifRule' p3 a.""") self["MT_action__"] = """#=============================================================================== # This code is executed after the rule has been applied. # You can access a node labelled n matched by this rule by: PostNode('n'). # To access attribute x of node n, use: PostNode('n')['x']. #=============================================================================== pass """ self["name"] = """""" self["GUID__"] = UUID('ce9c5429-6e4c-4782-a83a-17e240381cb6') # Set the node attributes self.vs[0]["mm__"] = """MT_post__match_contains""" self.vs[0]["MT_label__"] = """3""" self.vs[0]["GUID__"] = UUID('789662d8-ab7d-4640-a710-abbc847de320') self.vs[1]["mm__"] = """MT_post__MetaModelElement_S""" self.vs[1]["MT_label__"] = """2""" self.vs[1]["MT_post__classtype"] = """ #=============================================================================== # You can access the value of the current node's attribute value by: attr_value. # If the current node shall be created you MUST initialize it here! # You can access a node labelled n by: PreNode('n'). # To access attribute x of node n, use: PreNode('n')['x']. # Note that the attribute values are those before the match is rewritten. # The order in which this code is executed depends on the label value # of the encapsulating node. # The given action must return the new value of the attribute. #=============================================================================== return attr_value """ self.vs[1]["MT_post__name"] = """ #=============================================================================== # You can access the value of the current node's attribute value by: attr_value. # If the current node shall be created you MUST initialize it here! # You can access a node labelled n by: PreNode('n'). # To access attribute x of node n, use: PreNode('n')['x']. # Note that the attribute values are those before the match is rewritten. # The order in which this code is executed depends on the label value # of the encapsulating node. # The given action must return the new value of the attribute.
#=============================================================================== return attr_value """ self.vs[1]["GUID__"] = UUID('7e5e306f-cb65-40df-9e60-63b9fe83b79b') self.vs[2]["mm__"] = """MT_post__MatchModel""" self.vs[2]["MT_label__"] = """1""" self.vs[2]["GUID__"] = UUID('3c85bf70-be4a-40d8-9bcb-c138195ad20e') from HReconnectMatchElementsLHS import HReconnectMatchElementsLHS self.pre = HReconnectMa
tchElementsLHS() def action(self, PostNode, graph): """ Executable constraint code. @param PostNode: Function taking an integer as parameter and returns the node corresponding to that label. """ #=============================================================================== # This code is executed after the rule has been applied. # You can access a node labelled n matched by this rule by: PostNode('n'). # To access attribute x of node n, use: PostNode('n')['x']. #=============================================================================== pass def execute(self, packet, match): """ Transforms the current match of the packet according to the rule %s. Pivots are also assigned, if any. @param packet: The input packet. @param match: The match to rewrite. """ graph = packet.graph # Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite. # Because of the uniqueness property of labels in a rule, we can store all LHS labels # and subsequently add the labels corresponding to the nodes to be created. labels = match.copy() #=============================================================================== # Update attribute values #=============================================================================== #=============================================================================== # Create new nodes #=============================================================================== # match_contains3 new_node = graph.add_node() labels['3'] = new_node graph.vs[new_node][Himesis.Constants.META_MODEL] = 'match_contains' #=============================================================================== # Create new edges #=============================================================================== # MatchModel1 -> match_contains3 graph.add_edges([(labels['1'], labels['3'])]) # match_contains3 -> MetaModelElement_S2 graph.add_edges([(labels['3'], labels['2'])]) #=============================================================================== # Set the output pivots #=============================================================================== #=============================================================================== # Perform the post-action #=============================================================================== try: self.action(lambda i: graph.vs[labels[i]], graph) except Exception, e: raise Exception('An error has occurred while applying the post-action', e) #=============================================================================== # Finally, delete nodes (this will automatically delete the adjacent edges) #===============================================================================
""" ===================================== Sensor spac
e least squares regression ===================================== Predict single trial activity from a continuous variable. A single-trial regression is performed in each sensor and timepoint individually, resulting in an Evoked object which contains the regression coefficient (beta value) for each combination of sensor and timepoint. Example also shows the T statistics and the associated p-values. Note that this example is for educational purposes and that the data used here do not contain any signifi
cant effect. (See Hauk et al. (2006). The time course of visual word recognition as revealed by linear regression analysis of ERP data. Neuroimage.) """ # Authors: Tal Linzen <linzen@nyu.edu> # Denis A. Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) import numpy as np import mne from mne.datasets import sample from mne.stats.regression import linear_regression print(__doc__) data_path = sample.data_path() ############################################################################### # Set parameters and read data raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, aud_r=2) # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=False, exclude='bads') # Reject some epochs based on amplitude reject = dict(mag=5e-12) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=reject) ############################################################################### # Run regression names = ['intercept', 'trial-count'] intercept = np.ones((len(epochs),), dtype=np.float) design_matrix = np.column_stack([intercept, # intercept np.linspace(0, 1, len(intercept))]) # also accepts source estimates lm = linear_regression(epochs, design_matrix, names) def plot_topomap(x, units): x.plot_topomap(ch_type='mag', scalings=1., size=1.5, vmax=np.max, units=units, times=np.linspace(0.1, 0.2, 5)) trial_count = lm['trial-count'] plot_topomap(trial_count.beta, units='z (beta)') plot_topomap(trial_count.t_val, units='t') plot_topomap(trial_count.mlog10_p_val, units='-log10 p') plot_topomap(trial_count.stderr, units='z (error)')
from gitmostwanted.app import celery, db from gitmostwanted.lib.github.api import user_starred, user_starred_star from gitmostwanted.models.repo import Repo from gitmostwanted.models.user import UserAttitude @celery.task() def repo_starred_star(user_id: int, access_token: str): starred, code = user_starred(access_token) if not starred: return False attitudes = UserAttitude.list_liked_by_user(user_id) lst_in = [repo_like(s['full_name'], user_id) for s in starred if not [a for a in attitudes if s['full_name'] == a.repo.full_name]] lst_out = [user_starred_star(r.repo.full_name, access_token) for r in attitudes if not
[x for x in starred if x['full_name'] == r.repo.full_name]] return len(lst_out), len(list(filter(None, lst_in))) def repo_like(repo_name: str
, uid: int): repo = Repo.get_one_by_full_name(repo_name) if not repo: return None db.session.merge(UserAttitude.like(uid, repo.id)) db.session.commit() return repo.id
object for example Account or Opportunity.. about_kind = ndb.StringProperty() about_item = ndb.StringProperty() # a key reference to the account's organization # Should be required discussionKind = ndb.StringProperty() discussionId = ndb.StringProperty() organization = ndb.KeyProperty() class Note(EndpointsModel): # _message_fields_schema = ('id','title') author = ndb.StructuredProperty(Userinfo) # Sharing fields owner = ndb.StringProperty() collaborators_list = ndb.StructuredProperty(model.Userinfo, repeated=True) collaborators_ids = ndb.StringProperty(repeated=True) created_at = ndb.DateTimeProperty(auto_now_add=True) updated_at = ndb.DateTimeProperty(auto_now=True) title = ndb.StringProperty(required=True) content = ndb.TextProperty() # number of comments in this topic comments = ndb.IntegerProperty(default=0) # A Topic is attached to an object for example Account or Opportunity.. about_kind = ndb.StringProperty() about_item = ndb.StringProperty() # a key reference to the account's organization # Should be required organization = ndb.KeyProperty() # public or private access = ndb.StringProperty() def put(self, **kwargs): ndb.Model.put(self, **kwargs) self._setup() try: self.put_index() except: print 'error on saving document index' def set_perm(self): about_item = str(self.key.id()) perm = model.Permission(about_kind='Note', about_item=about_item, type='user', role='owner', value=self.owner) perm.put() def put_index(self, data=None): """ index the element at each""" empty_string = lambda x: x if x else "" collaborators = " ".join(self.collaborators_ids) organization = str(self.organization.id()) if data: search_key = ['topics', 'tags'] for key in search_key: if key not in data.keys(): data[key] = "" my_document = search.Document( doc_id=str(data['id']), fields=[ search.TextField(name=u'type', value=u'Note'), search.TextField(name='organization', value=empty_string(organization)), search.TextField(name='access', value=empty_string(self.access)), search.TextField(name='owner', value=empty_string(self.owner)), search.TextField(name='collaborators', value=collaborators), search.TextField(name='title', value=empty_string(self.title)), search.TextField(name='content', value=empty_string(self.content)), search.TextField(name='about_kind', value=empty_string(self.about_kind)), search.TextField(name='about_item', value=empty_string(self.about_item)), search.DateField(name='created_at', value=self.created_at), search.DateField(name='updated_at', value=self.updated_at), search.NumberField(name='comments', value=self.comments), search.TextField(name='tags', value=data['tags']), search.TextField(name='topics', value=data['topics']), ]) else: my_document = search.Document( doc_id=str(self.key.id()), fields=[ search.TextField(name=u'type', value=u'Note'), search.TextField(name='organization', value=empty_string(organization)), search.TextField(name='access', value=empty_string(self.access)), search.TextField(name='owner', value=empty_string(self.owner)), search.TextField(name='collaborators', value=collaborators), search.TextField(name='title', value=empty_string(self.title)), search.TextField(name='content', value=empty_string(self.content)), search.TextField(name='about_kind', value=empty_string(self.about_kind)), search.TextField(name='about_item', value=empty_string(self.about_item)), search.DateField(name='created_at', value=self.created_at), search.DateField(name='updated_at', value=self.updated_at), search.NumberField(name='comments', value=self.comments), ]) my_index = search.Index(name="GlobalIndex") my_index.put(my_document) @classmethod def get_schema(cls, user_from_email, request): note = cls.get_by_id(int(request.id)) if note is None: raise endpoints.NotFoundException('Note not found.') author = AuthorSchema( google_user_id=note.author.google_user_id, display_name=note.author.display_name, google_public_profile_url=note.author.google_public_profile_
url, photo=note.author.photo ) about = None edge_list = Edge.list(start_node=note.key, kind='parents') fo
r edge in edge_list['items']: about_kind = edge.end_node.kind() parent = edge.end_node.get() if parent: if about_kind == 'Contact' or about_kind == 'Lead': if parent.lastname and parent.firstname: about_name = parent.firstname + ' ' + parent.lastname else: if parent.lastname: about_name = parent.lastname else: if parent.firstname: about_name = parent.firstname else: about_name = parent.name about = DiscussionAboutSchema( kind=about_kind, id=str(parent.key.id()), name=about_name ) note_schema = NoteSchema( id=str(note.key.id()), entityKey=note.key.urlsafe(), title=note.title, content=note.content, about=about, created_by=author, created_at=note.created_at.strftime("%Y-%m-%dT%H:%M:00.000"), updated_at=note.updated_at.strftime("%Y-%m-%dT%H:%M:00.000") ) return note_schema @classmethod def insert(cls, user_from_email, request): parent_key = ndb.Key(urlsafe=request.about) note_author = Userinfo() note_author.display_name = user_from_email.google_display_name note_author.photo = user_from_email.google_public_profile_photo_url note = Note( owner=user_from_email.google_user_id, organization=user_from_email.organization, author=note_author, title=request.title, content=request.content ) entityKey_async = note.put_async() entityKey = entityKey_async.get_result() note.put_index() Edge.insert( start_node=parent_key, end_node=entityKey, kind='topics', inverse_edge='parents' ) author_shema = AuthorSchema( google_user_id=note.owner, display_name=note_author.display_name, google_public_profile_url=note_author.google_public_profile_url, photo=note_author.display_name, edgeKey="", email=note_author.email ) note_schema = NoteSchema( id=str(note.key.id()), entityKey=note.key.urlsafe(), title=note.title, content=note.content, created_by=author_shema ) return note_schema @classmethod def list_by_parent(cls, parent_key, request): topic_list = [] topic_edge_list = Edge.list( start_node=parent_key, kind='topics', limit=request.topics.limit, pageToken=request.topic
from bson import ObjectId from . import repeating_schedule from state_change import StateChange class StateChangeRepeating(StateChange): def __init__(self, seconds_into_week, AC_target, heater_target, fan, id=None): self.id = id self.seconds_into_week = seconds_into_week self.AC_target = AC_target self.heater_target = heater_target self.fan = fan assert type(seconds_into_week) is int or long assert type(AC_target) is int or float assert type(heater_target) is int or float assert type(fan) is int or float @classmethod def from_dictionary(cls, json): seconds_into_week = json["week_time"] AC_target = json["state"]["AC_target"] heater_target = json["state"]["heater_target"] fan = json["state"]["fan"] try: id = ObjectId(json["_id"]["$oid"]) except KeyError: id = None except TypeError: try: id = ObjectId(json["_id"]) except: id = None return cls(seconds_into_week, AC_target, heater_target, fan, id=id) @classmethod def get_current(cls, now): week_time = now.weekday() * 24 * 60 ** 2 + (now.hour * 60 + now.minute) * 60 result = repeating_schedule.aggregate( [ {"$project": { "time_delta": {"$mod": [{"$add": [{"$subtract": [week_time, "$week_time"]}, 24 * 7 * 60 ** 2]}, 24 * 7 * 60 ** 2]}, "state": 1, "week_time": 1} }, {"$sort": {"time_delta": 1}
} ]).next() return cls.from_dictionary(result) def save(self): delayed_state_change = { "week_time": self.seconds_into_week, "state": {"AC_target": self.AC_target, "heater_target": self.heater_target, "fan": self.fan} } if self.id is
not None: delayed_state_change["_id"] = self.id return repeating_schedule.save(delayed_state_change) def to_dictionary(self): return {"week_time": self.seconds_into_week, "_id": str(self.id), "state": {"AC_target": self.AC_target, "heater_target": self.heater_target, "fan": self.fan}} @classmethod def get_all_dic(cls): all_items = cls.get_all() result = [] for item in all_items: result.append(item.to_dictionary()) return result
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack_dashboard.test.integration_tests import decorators from openstack_dashboard.test.integration_tests import helpers from openstack_dashboard.test.integration_tests.regions import messages class TestUser(helpers.AdminTestCase): USER_NAME = helpers.gen_random_resource_name("user") @decorators.skip_because(bugs=['1774697']) def test_create_delete_user(self): users_page = self.home_pg.go_to_identity_userspage() password = self.TEST_PASSWORD users_page.create_user(self.USER_NAME, password=password, project='admin', role='admin') self.assertTrue(users_page.find_message_and_dismiss(messages.SUCCESS)) self.assertFalse(users_page.find_message_and_dismiss(messages.ERROR)) self.assertTrue(users_page.is_user_present(self.USER_NAME)) users_page.delete_user(self.USER_NAME) self.assertTrue(users_page.find_message_and_dismiss(messages.SUCCESS)) self.assertFalse(users_page.find_message_and_dis
miss(messages.ERROR)) self.as
sertFalse(users_page.is_user_present(self.USER_NAME))
from __future__ import unicode_literals from django.db import models # Create your models here. class TimeStampedModel(models.Model): "
"" An abstract base class model that provides self-updating "created" and "modified" fields. """ created = models.DateTi
meField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) class Meta: abstract = True
import errno import glob import platform import re import sys import tempfile import zipfile from contextlib import contextmanager from distutils.version import StrictVersion import os import requests from xml.etree import ElementTree IS_64_BIT = sys.maxsize > 2**32 IS_LINUX = platform.system().lower() == 'linux' IS_WINDOWS = platform.system().lower() == 'windows' IS_MAC = platform.system().lower() == 'darwin' UNKNOWN_PLATFORM = not IS_LINUX and not IS_WINDOWS REPO_DIR = os.path.join(os.path.expanduser('~'), '.rockyroad') @contextmanager def download_file(url): """ Download a remote file to a temporary location. :param url: the file url """ resp = requests.get(url, stream=True) with tempfile.NamedTemporaryFile(delete=False) as fp: name = fp.name for chunk in resp.iter_content(chunk_size=1024): if chunk: fp.write(chunk) yield name fp.close() def _mkdirp(dirpath): try: os.makedirs(dirpath) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(dirpath): pass def _get_xml_ns(uri): m = re.match(r'\{.*?\}', uri) return m.group(0) if m else '' class Driver: version = None bit = None repo_dir = os.path.join(os.path.expanduser('~'), '.rockyroad') def __init__(self, version=None, bit=None): if version: self.version = str(version) if not bit: self.bit = '64' if IS_64_BIT else '32' else: self.bit = str(bit) if hasattr(self, 'setup'): self.setup() def download(self): """Download the driver binary""" raise NotImplementedError('You must implement download()') def binary_path(self): """The absolute path to the driver binary""" raise NotImplementedError('You must implement binary_path()') def path(self): """ The absolute path to the driver :return: """ if not os.path.exists(self.binary_path()): self.download() return self.binary_path() class ChromeDriver(Driver): versions = {} _bin_path = None def setup(self): url = 'https://chromedriver.storage.googleapis.com/' resp = requests.get(url) tree = ElementTree.fromstring(resp.content) ns = _get_xml_ns(tree.tag) for elem in tree.findall('%sContents' % ns): key = elem.find('%sKey' % ns) m = re.match('^([\d.]+?)/chromedriver_(linux|mac|win)(32|64)', key.text) if m: v =
m.group(1) # version p = m.group(2) # platform b = m.group(3) # bit if v not in self.versions: self.versions[v] = {} if p not in self.versions[v]: self.versions[v][p] = {} self.versions[v][p][b] = url + key.text @property def _platform(self): if IS_WINDOWS: return 'win' elif IS_LINUX: return 'linux'
elif IS_MAC: return 'mac' else: raise RuntimeError('Unable to detect current platform') def binary_path(self): if self._bin_path: return self._bin_path if self.version and self.version not in self.versions: raise RuntimeError('Chromedriver %s does not exist' % self.version) if not self.version: numbers = list(self.versions.keys()) numbers.sort(key=StrictVersion, reverse=True) self.version = numbers[0] bin_name = 'chromedriver.exe' if IS_WINDOWS else 'chromedriver' self._bin_path = os.path.join(REPO_DIR, 'chromedriver', '%s-%s%s' % (self.version, self._platform, self.bit,), bin_name) return self._bin_path def download(self): url = self.versions[self.version][self._platform][self.bit] destination_dir = ''.join(self._bin_path.split(os.pathsep)) with download_file(url) as name: _mkdirp(destination_dir) z = zipfile.ZipFile(name, 'r') z.extractall(destination_dir) z.close() for filename in glob.iglob(destination_dir + '/*'): os.chmod(filename, 777) def download_chromedriver(version=None, bit=None): """ Download the chromedriver binary. If version is not set, then it will get the latest one. If the bit value is not set then it will use the same value as the current system """ url = 'https://chromedriver.storage.googleapis.com/' resp = requests.get(url) tree = ElementTree.fromstring(resp.content) ns = _get_xml_ns(tree.tag) if version: version = str(version) if bit: bit = str(bit) else: bit = '64' if IS_64_BIT else '32' versions = {} for elem in tree.findall('%sContents' % ns): key = elem.find('%sKey' % ns) m = re.match('^([\d.]+?)/chromedriver_(linux|mac|win)(32|64)', key.text) if m: v = m.group(1) # version p = m.group(2) # platform b = m.group(3) # bit if v not in versions: versions[v] = {} if p not in versions[v]: versions[v][p] = {} versions[v][p][b] = url + key.text if version and version not in versions: raise RuntimeError('Chromedriver %s is not a valid version' % version) if IS_WINDOWS: p = 'win' elif IS_LINUX: p = 'linux' elif IS_MAC: p = 'mac' else: raise RuntimeError('Unable to detect current platform') if version: if bit is None: download_url = versions[version][p][bit] elif bit not in versions[version][p]: raise RuntimeError('Invalid bit value %s' % bit) else: download_url = versions[version][p][bit] else: # get latest version numbers = list(versions.keys()) numbers.sort(key=StrictVersion, reverse=True) version = numbers[0] download_url = versions[version][p][bit] destination_dir = os.path.join(REPO_DIR, 'chromedriver', '%s-%s%s' % (version, p, bit,)) if os.path.isdir(destination_dir): return destination_dir # download an unzip to repo directory with download_file(download_url) as name: _mkdirp(destination_dir) z = zipfile.ZipFile(name, 'r') z.extractall(destination_dir) z.close() for filename in glob.iglob(destination_dir + '/*'): os.chmod(filename, 777) return destination_dir def get_binary(name, arch=None, version=None): """ Get the driver binary. This will check the cache location to see if it has already been downloaded and return its path. If it is not in the cache then it will be downloaded. :param name: the binary name chromedriver, :param arch: :param version: :return: """
status=UserGroupStatus.PENDING.value) resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies) body = utils.check_response_basic_info(resp, 200, expected_method="GET") utils.check_val_is_in("group_names", body) utils.check_val_type(body["group_names"], list) utils.check_val_is_in(group_with_terms_name, body["group_names"]) pending_members = body["group_names"] # Check if getting all group's members finds both pending and active members path = "/users/{user_name}/groups?status={status}".format(user_name=self.test_user_name, status=UserGroupStatus.ALL.value) resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies) body = utils.check_response_basic_info(resp, 200, expected_method="GET") utils.check_val_is_in("group_names", body) self.assertCou
ntEqual(body["group_names"], pending_members + active_members) # validate that pending user can be viewed in the edit group page path = "/ui/groups/{}/default".format(group_with_terms_name) resp = utils.test_request(self, "GET", path) body = utils.check_ui_response_basic_info(resp) utils.check_val_i
s_in("{} [pending]".format(self.test_user_name), body) # validate that pending group membership can be viewed in the edit user page path = "/ui/users/{}/default".format(self.test_user_name) resp = utils.test_request(self, "GET", path) body = utils.check_ui_response_basic_info(resp) utils.check_val_is_in("{} [pending]".format(group_with_terms_name), body) # validate that pending group membership can be viewed in the user's account page utils.check_or_try_logout_user(self) utils.check_or_try_login_user(self, username=self.test_user_name, password=self.test_user_name, use_ui_form_submit=True) resp = utils.test_request(self, "GET", "/ui/users/current") body = utils.check_ui_response_basic_info(resp, expected_title="Magpie") utils.check_val_is_in("{} [pending]".format(group_with_terms_name), body) # Validate the content of the email that would have been sent if not mocked message = real_contents(*wrapped_contents.call_args.args, **wrapped_contents.call_args.kwargs) msg_str = message.decode() confirm_url = wrapped_contents.call_args.args[-1].get("confirm_url") test_user_email = "{}@mail.com".format(self.test_user_name) utils.check_val_is_in("To: {}".format(test_user_email), msg_str) utils.check_val_is_in("From: Magpie", msg_str) utils.check_val_is_in(confirm_url, msg_str) utils.check_val_true(confirm_url.startswith("http://localhost") and "/tmp/" in confirm_url, msg="Expected confirmation URL in email to be a temporary token URL.") # Simulate user clicking the confirmation link in 'sent' email (external operation from Magpie) resp = utils.test_request(self, "GET", urlparse(confirm_url).path) body = utils.check_ui_response_basic_info(resp, 200) utils.check_val_is_in("accepted the terms and conditions", body) utils.check_val_equal(mocked_send.call_count, 3, msg="Expected sent notification to user for an email confirmation of user added " "to requested group, following terms and conditions acceptation.") # Log back to admin user to apply admin-only checks utils.check_or_try_logout_user(self) self.login_admin() # Check if user has been added to group successfully utils.TestSetup.check_UserGroupMembership(self, override_group_name=group_with_terms_name) path = "/groups/{grp}".format(grp=group_with_terms_name) resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies) body = utils.check_response_basic_info(resp, 200, expected_method="GET") utils.check_val_equal(body["group"]["member_count"], 1) utils.check_val_is_in(self.test_user_name, body["group"]["user_names"]) # UI checks: validates that both test tmp_tokens were deleted if '[pending]' is not displayed anymore # validate that user is no longer pending in the edit group page path = "/ui/groups/{}/default".format(group_with_terms_name) resp = utils.test_request(self, "GET", path) body = utils.check_ui_response_basic_info(resp) utils.check_val_not_in("{} [pending]".format(self.test_user_name), body) utils.check_val_is_in(self.test_user_name, body) # validate that group membership is no longer pending in the edit user page path = "/ui/users/{}/default".format(self.test_user_name) resp = utils.test_request(self, "GET", path) body = utils.check_ui_response_basic_info(resp) utils.check_val_not_in("{} [pending]".format(group_with_terms_name), body) utils.check_val_is_in(group_with_terms_name, body) # validate that group membership is no longer pending in the user's account page utils.check_or_try_logout_user(self) utils.check_or_try_login_user(self, username=self.test_user_name, password=self.test_user_name, use_ui_form_submit=True) resp = utils.test_request(self, "GET", "/ui/users/current") body = utils.check_ui_response_basic_info(resp, expected_title="Magpie") utils.check_val_not_in("{} [pending]".format(group_with_terms_name), body) utils.check_val_is_in(group_with_terms_name, body) @runner.MAGPIE_TEST_UI @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_REGISTRATION class TestCase_MagpieUI_UserRegistration_Local(ti.UserTestCase, unittest.TestCase): # pylint: disable=C0103,invalid-name """ Test any operation that require at least ``MAGPIE_ADMIN_GROUP`` AuthN/AuthZ. Use a local Magpie test application. Enables the User self-registration feature. """ __test__ = True @classmethod def setUpClass(cls): # minimally, must setup the test app to provide the required routes # other settings related to user registration are set specifically for each test variation settings = { "magpie.user_registration_enabled": True, # always needed, other settings added as needed per test case "magpie.smtp_host": "example.com", # must exist when getting configs, but not used because email mocked } cls.grp = get_constant("MAGPIE_ADMIN_GROUP") cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME", raise_missing=False, raise_not_set=False) cls.pwd = get_constant("MAGPIE_TEST_ADMIN_PASSWORD", raise_missing=False, raise_not_set=False) cls.app = utils.get_test_magpie_app(settings=settings) cls.url = cls.app # to simplify calls of TestSetup (all use .url) cls.cookies = None cls.version = utils.TestSetup.get_Version(cls) cls.headers, cls.cookies = utils.check_or_try_login_user(cls.url, cls.usr, cls.pwd, use_ui_form_submit=True) cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp) cls.setup_admin() cls.login_admin() cls.test_user_name = get_constant("MAGPIE_TEST_USER",
vision, generators, nested_scopes, print_function, unicode_literals, with_statement) import codecs import os import re from pkg_resources import resource_string from pygments.formatters.html import HtmlFormatter from pygments.styles import get_al
l_styles from pants.backend.docgen.targets.doc import Page from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.base.generator import Generator from pants.base.workunit import WorkUnitLabel from pants.binaries import binary_util from pants.build_graph.address import Address from pants.task.task import Task from pants.uti
l.dirutil import safe_mkdir def util(): """Indirection function so we can lazy-import our utils. It's an expensive import that invokes re.compile a lot (via markdown and pygments), so we don't want to incur that cost unless we must. """ from pants.backend.docgen.tasks import markdown_to_html_utils return markdown_to_html_utils class MarkdownToHtml(Task): """Generate HTML from Markdown docs.""" @classmethod def register_options(cls, register): register('--code-style', choices=list(get_all_styles()), default='friendly', fingerprint=True, help='Use this stylesheet for code highlights.') register('--open', type=bool, help='Open the generated documents in a browser.') register('--fragment', type=bool, fingerprint=True, help='Generate a fragment of html to embed in a page.') register('--ignore-failure', type=bool, fingerprint=True, help='Do not consider rendering errors to be build errors.') @classmethod def product_types(cls): return ['markdown_html', 'wiki_html'] def __init__(self, *args, **kwargs): super(MarkdownToHtml, self).__init__(*args, **kwargs) self._templates_dir = os.path.join('templates', 'markdown') self.open = self.get_options().open self.fragment = self.get_options().fragment self.code_style = self.get_options().code_style def execute(self): # TODO(John Sirois): consider adding change detection outdir = os.path.join(self.get_options().pants_distdir, 'markdown') css_path = os.path.join(outdir, 'css', 'codehighlight.css') css = util().emit_codehighlight_css(css_path, self.code_style) if css: self.context.log.info('Emitted {}'.format(css)) def is_page(target): return isinstance(target, Page) roots = set() interior_nodes = set() if self.open: dependencies_by_page = self.context.dependents(on_predicate=is_page, from_predicate=is_page) roots.update(dependencies_by_page.keys()) for dependencies in dependencies_by_page.values(): interior_nodes.update(dependencies) roots.difference_update(dependencies) for page in self.context.targets(is_page): # There are no in or out edges so we need to show show this isolated page. if not page.dependencies and page not in interior_nodes: roots.add(page) with self.context.new_workunit(name='render', labels=[WorkUnitLabel.MULTITOOL]): plaingenmap = self.context.products.get('markdown_html') wikigenmap = self.context.products.get('wiki_html') show = [] for page in self.context.targets(is_page): def process_page(key, outdir, url_builder, genmap, fragment=False): if page.format == 'rst': with self.context.new_workunit(name='rst') as workunit: html_path = self.process_rst( workunit, page, os.path.join(outdir, util().page_to_html_path(page)), os.path.join(page.payload.sources.rel_path, page.source), self.fragment or fragment, ) else: with self.context.new_workunit(name='md'): html_path = self.process_md( os.path.join(outdir, util().page_to_html_path(page)), os.path.join(page.payload.sources.rel_path, page.source), self.fragment or fragment, url_builder, css=css, ) self.context.log.info('Processed {} to {}'.format(page.source, html_path)) relpath = os.path.relpath(html_path, outdir) genmap.add(key, outdir, [relpath]) return html_path def url_builder(linked_page): dest = util().page_to_html_path(linked_page) src_dir = os.path.dirname(util().page_to_html_path(page)) return linked_page.name, os.path.relpath(dest, src_dir) page_path = os.path.join(outdir, 'html') html = process_page(page, page_path, url_builder, plaingenmap) if css and not self.fragment: plaingenmap.add(page, self.workdir, list(css_path)) if self.open and page in roots: show.append(html) if page.provides: for wiki in page.provides: basedir = os.path.join(self.workdir, str(hash(wiki))) process_page((wiki, page), basedir, wiki.wiki.url_builder, wikigenmap, fragment=True) if show: binary_util.ui_open(*show) PANTS_LINK = re.compile(r'''pants\(['"]([^)]+)['"]\)(#.*)?''') def process_md(self, output_path, source, fragmented, url_builder, css=None): def parse_url(spec): match = self.PANTS_LINK.match(spec) if match: address = Address.parse(match.group(1), relative_to=get_buildroot()) page = self.context.build_graph.get_target(address) anchor = match.group(2) or '' if not page: raise TaskError('Invalid markdown link to pants target: "{}". '.format(match.group(1)) + 'Is your page missing a dependency on this target?') alias, url = url_builder(page) return alias, url + anchor else: return spec, spec def build_url(label): components = label.split('|', 1) if len(components) == 1: return parse_url(label.strip()) else: alias, link = components _, url = parse_url(link.strip()) return alias, url wikilinks = util().WikilinksExtension(build_url) safe_mkdir(os.path.dirname(output_path)) with codecs.open(output_path, 'w', 'utf-8') as output: source_path = os.path.join(get_buildroot(), source) with codecs.open(source_path, 'r', 'utf-8') as source_stream: md_html = util().markdown.markdown( source_stream.read(), extensions=['codehilite(guess_lang=False)', 'extra', 'tables', 'toc', wikilinks, util().IncludeExcerptExtension(source_path)], ) if fragmented: style_css = (HtmlFormatter(style=self.code_style)).get_style_defs('.codehilite') template = resource_string(__name__, os.path.join(self._templates_dir, 'fragment.mustache')) generator = Generator(template, style_css=style_css, md_html=md_html) generator.write(output) else: style_link = os.path.relpath(css, os.path.dirname(output_path)) template = resource_string(__name__, os.path.join(self._templates_dir, 'page.mustache')) generator = Generator(template, style_link=style_link, md_html=md_html) generator.write(output) return output.name def process_rst(self, workunit, page, output_path, source, fragmented): source_path = os.path.join(get_buildroot(), source) with codecs.open(source_path, 'r', 'utf-8') as source_stream: rst_html, returncode = util().rst_to_html(source_stream.read(), stderr=workunit.output('stderr')) if returncode != 0: message = '{} rendered with errors.'.format(source_path) if self.get_options().ignore_failure: self.context.log.warn(message) else: raise TaskError(message, exit_code=returncode, failed_targets=[page]) template_path = os.path.join(self._templates_dir, 'f
r = http('--print=B', '--pretty=colors', 'GET', httpbin.url + '/get', 'a=b', env=env) # Tests that the JSON data isn't formatted. assert not r.strip().count('\n') assert COLOR in r def test_format_option(self, httpbin): env = MockEnvironment(colors=256) r = http('--print=B', '--pretty=format', 'GET', httpbin.url + '/get', 'a=b', env=env) # Tests that the JSON data is formatted. assert r.strip().count('\n') == 2 assert COLOR not in r class TestLineEndings: """ Test that CRLF is properly used in headers and as the headers/body separator. """ def _validate_crlf(self, msg): lines = iter(msg.splitlines(True)) for header in lines: if header == CRLF: break assert header.endswith(CRLF), repr(header) else: assert 0, f'CRLF between headers and body not found in {msg!r}' body = ''.join(lines) assert CRLF not in body return body def test_CRLF_headers_only(self, httpbin): r = http('--headers', 'GET', httpbin.url + '/get') body = self._validate_crlf(r) assert not body, f'Garbage after headers: {r!r}' def test_CRLF_ugly_response(self, httpbin): r = http('--pretty=none', 'GET', httpbin.url + '/get') self._validate_crlf(r) def test_CRLF_formatted_response(self, httpbin): r = http('--pretty=format', 'GET', httpbin.url + '/get') assert r.exit_status == ExitStatus.SUCCESS self._validate_crlf(r) def test_CRLF_ugly_request(self, httpbin): r = http('--pretty=none', '--print=HB', 'GET', httpbin.url + '/get') self._validate_crlf(r) def test_CRLF_formatted_request(self, httpbin): r = http('--pretty=format', '--print=HB', 'GET', httpbin.url + '/get') self._validate_crlf(r) class TestFormatOptions: def test_header_formatting_options(self): def get_headers(sort): return http( '--offline', '--print=H', '--format-options', 'headers.sort:' + sort, 'example.org', 'ZZZ:foo', 'XXX:foo', ) r_sorted = get_headers('true') r_unsorted = get_headers('false') assert r_sorted != r_unsorted assert f'XXX: foo{CRLF}ZZZ: foo' in r_sorted assert f'ZZZ: foo{CRLF}XXX: foo' in r_unsorted @pytest.mark.parametrize( 'options, expected_json', [ # @formatter:off ( 'json.sort_keys:true,json.indent:4', json.dumps({'a': 0, 'b': 0}, indent=4), ), ( 'json.sort_keys:false,json.indent:2', json.dumps({'b': 0, 'a': 0}, indent=2), ), ( 'json.format:false', json.dumps({'b': 0, 'a': 0}), ), # @formatter:on ] ) def test_json_formatting_options(self, options: str, expected_json: str): r = http( '--offline', '--print=B', '--format-options', options, 'example.org', 'b:=0', 'a:=0', ) assert expected_json in r @pytest.mark.parametrize( 'defaults, options_string, expected', [ # @formatter:off ({'foo': {'bar': 1}}, 'foo.bar:
2', {'foo': {'bar': 2}}), ({'foo': {'bar': True}}, 'foo.bar:false', {'foo': {'bar': False}}), ({'foo': {'bar': 'a'}}, 'foo.bar:b', {'foo': {'bar': 'b'}}), # @formatter:on ] ) def test_parse_format_options(self, defaults, options_string, expected): actual = parse_format_
options(s=options_string, defaults=defaults) assert expected == actual @pytest.mark.parametrize( 'options_string, expected_error', [ ('foo:2', 'invalid option'), ('foo.baz:2', 'invalid key'), ('foo.bar:false', 'expected int got bool'), ] ) def test_parse_format_options_errors(self, options_string, expected_error): defaults = { 'foo': { 'bar': 1 } } with pytest.raises(argparse.ArgumentTypeError, match=expected_error): parse_format_options(s=options_string, defaults=defaults) @pytest.mark.parametrize( 'args, expected_format_options', [ ( [ '--format-options', 'headers.sort:false,json.sort_keys:false', '--format-options=json.indent:10' ], { 'headers': { 'sort': False }, 'json': { 'sort_keys': False, 'indent': 10, 'format': True }, 'xml': { 'format': True, 'indent': 2, }, } ), ( [ '--unsorted' ], { 'headers': { 'sort': False }, 'json': { 'sort_keys': False, 'indent': 4, 'format': True }, 'xml': { 'format': True, 'indent': 2, }, } ), ( [ '--format-options=headers.sort:true', '--unsorted', '--format-options=headers.sort:true', ], { 'headers': { 'sort': True }, 'json': { 'sort_keys': False, 'indent': 4, 'format': True }, 'xml': { 'format': True, 'indent': 2, }, } ), ( [ '--no-format-options', # --no-<option> anywhere resets '--format-options=headers.sort:true', '--unsorted', '--format-options=headers.sort:true', ], PARSED_DEFAULT_FORMAT_OPTIONS, ), ( [ '--format-options=json.indent:2', '--format-options=xml.format:false', '--format-options=xml.indent:4', '--unsorted', '--no-unsorted', ], { 'headers': { 'sort': True }, 'json': { 'sort_keys': True, 'indent': 2, 'format': True }, 'xml': { 'format': False, 'indent': 4, }, } ), ( [ '--format-options=json.indent:2', '--unsorted', '--sorted', ], { 'headers': { 'sort': True }, 'json': { 'sort_keys': True, 'indent': 2, 'format': True }, 'xml': { 'format': True, 'indent': 2, }, } ), ( [ '--format-options=json.indent:2', '--sorted', '--no-sorted',
""" Tests of student.roles """ import ddt from django.test import TestCase from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory from student.tests.factories import AnonymousUserFactory from student.roles import ( GlobalStaff, CourseRole, CourseStaffRole, CourseInstructorRole, OrgStaffRole, OrgInstructorRole, RoleCache, CourseBetaTesterRole ) from opaque_keys.edx.locations import SlashSeparatedCourseKey class RolesTestCase(TestCase): """ Tests of student.roles """ def setUp(self): self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') self.course_loc = self.course_key.make_usage_key('course', '2012_Fall') self.anonymous_user = AnonymousUserFactory() self.student = UserFactory() self.global_staff = UserFactory(is_staff=True) self.course_staff = StaffFactory(course_key=self.course_key) self.course_instructor = InstructorFactory(course_key=self.course_key) def test_global_staff(self): self.assertFalse(GlobalStaff().has_user(self.student)) self.assertFalse(GlobalStaff().has_user(self.course_staff)) self.assertFalse(GlobalStaff().has_user(self.course_instructor)) self.assertTrue(GlobalStaff().has_user(self.global_staff)) def test_group_name_case_sensitive(self): uppercase_course_id = "ORG/COURSE/NAME" lowercase_course_id = uppercase_course_id.lower() uppercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(uppercase_course_id) lowercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(lowercase_course_id) role = "role" lowercase_user = UserFactory() CourseRole(role, lowercase_course_key).add_users(lowercase_user) uppercase_user = UserFactory() CourseRole(role, uppercase_course_key).add_users(uppercase_user) self.assertTrue(CourseRole(role, lowercase_course_key).has_user(lowercase_user)) self.assertFalse(CourseRole(role, uppercase_course_key).has_user(lowercase_user)) self.assertFalse(CourseRole(role, lowercase_course_key).has_user(uppercase_user)) self.assertTrue(CourseRole(role, uppercase_course_key).has_user(uppercase_user)) def test_course_role(self): """ Test that giving a user a course role enables access appropriately """ self.assertFalse( CourseStaffRole(self.course_key).has_user(self.student), "Student has premature access to {}".format(self.course_key) ) CourseStaffRole(self.course_key).add_users(self.student) self.assertTrue( CourseStaffRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # remove access and confirm CourseStaffRole(self.course_key).remove_users(self.student) self.assertFalse( CourseStaffRole(self.course_key).has_user(self.student), "Student still has access to {}".format(self.course_key) ) def test
_org_role(self): """ Test that giving a user an org role enables access appropriately
""" self.assertFalse( OrgStaffRole(self.course_key.org).has_user(self.student), "Student has premature access to {}".format(self.course_key.org) ) OrgStaffRole(self.course_key.org).add_users(self.student) self.assertTrue( OrgStaffRole(self.course_key.org).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key.org)) ) # remove access and confirm OrgStaffRole(self.course_key.org).remove_users(self.student) if hasattr(self.student, '_roles'): del self.student._roles self.assertFalse( OrgStaffRole(self.course_key.org).has_user(self.student), "Student still has access to {}".format(self.course_key.org) ) def test_org_and_course_roles(self): """ Test that Org and course roles don't interfere with course roles or vice versa """ OrgInstructorRole(self.course_key.org).add_users(self.student) CourseInstructorRole(self.course_key).add_users(self.student) self.assertTrue( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key.org)) ) self.assertTrue( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # remove access and confirm OrgInstructorRole(self.course_key.org).remove_users(self.student) self.assertFalse( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student still has access to {}".format(self.course_key.org) ) self.assertTrue( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # ok now keep org role and get rid of course one OrgInstructorRole(self.course_key.org).add_users(self.student) CourseInstructorRole(self.course_key).remove_users(self.student) self.assertTrue( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student lost has access to {}".format(self.course_key.org) ) self.assertFalse( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) def test_get_user_for_role(self): """ test users_for_role """ role = CourseStaffRole(self.course_key) role.add_users(self.student) self.assertGreater(len(role.users_with_role()), 0) def test_add_users_doesnt_add_duplicate_entry(self): """ Tests that calling add_users multiple times before a single call to remove_users does not result in the user remaining in the group. """ role = CourseStaffRole(self.course_key) role.add_users(self.student) self.assertTrue(role.has_user(self.student)) # Call add_users a second time, then remove just once. role.add_users(self.student) role.remove_users(self.student) self.assertFalse(role.has_user(self.student)) @ddt.ddt class RoleCacheTestCase(TestCase): IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') NOT_IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2013_Fall') ROLES = ( (CourseStaffRole(IN_KEY), ('staff', IN_KEY, 'edX')), (CourseInstructorRole(IN_KEY), ('instructor', IN_KEY, 'edX')), (OrgStaffRole(IN_KEY.org), ('staff', None, 'edX')), (OrgInstructorRole(IN_KEY.org), ('instructor', None, 'edX')), (CourseBetaTesterRole(IN_KEY), ('beta_testers', IN_KEY, 'edX')), ) def setUp(self): self.user = UserFactory() @ddt.data(*ROLES) @ddt.unpack def test_only_in_role(self, role, target): role.add_users(self.user) cache = RoleCache(self.user) self.assertTrue(cache.has_role(*target)) for other_role, other_target in self.ROLES: if other_role == role: continue self.assertFalse(cache.has_role(*other_target)) @ddt.data(*ROLES) @ddt.unpack def test_empty_cache(self, role, target): cache = RoleCache(self.user) self.assertFalse(cache.has_role(*target))
impor
t pigpio import time class LeftEncoder: def __init__(self, pin=24): self.pi = pigpio.pi() self.pin = pin self.pi.set_mode(pin, pigpio.INPUT) self.pi.set_pull_up_down(pin, pigpio.PUD_UP) cb1 = self.pi.callback(pin, pigpio.EITHER_EDGE, self.cbf) self.tick = 0 def cbf(self, gpio, level, tick): # print(gpio, level, tick) print(self.tick) self.tick += 1 e = LeftEncoder() while True:
time.sleep(.01)
"""SCons.Tool.latex Tool-specific initialization for LaTeX. Generates .dvi files from .latex or .ltx files There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2017 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/latex.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog" import SCons.Action import SCons.Defaults import SCons.Scanner.LaTeX import SCons.Util import SCons.Tool import SCons.Tool.tex def LaTeXAuxFunction(target = None, source= None, env=None): result = SCons.Tool.tex.InternalLaTeXAuxAction( SCons.Tool.tex.LaTeXAction, target, source, env ) if result != 0: SCons.Tool.tex.check_file_error_message(env['LATEX']) return result LaTeXAuxAction = SCons.Action.Action(LaTeXAuxFunction, strfunction=SCons.Tool.tex.TeXLaTeXStrFunction) def generate(env): """Add Builders and construction variables for LaTeX to an Environment.""" env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes) from . import dvi dvi.generate(env) from . import pdf pdf.generate(env) bld = env['BUILDERS']['DVI'] bld.add_action('.ltx', LaTeXAuxAction) bld.add_action('.latex', LaTeXAuxAction) bld.add_emi
tter('.ltx', SCons.Tool.tex.tex_eps_emitter)
bld.add_emitter('.latex', SCons.Tool.tex.tex_eps_emitter) SCons.Tool.tex.generate_common(env) def exists(env): SCons.Tool.tex.generate_darwin(env) return env.Detect('latex') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_target_tcp_proxy description: - Represents a TargetTcpProxy resource, which is used by one or more global forwarding rule to route incoming TCP requests to a Backend service. short_description: Creates a GCP TargetTcpProxy version_added: 2.6 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: - present - absent default: present description: description: - An optional description of this resource. required: false name: description: - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. required: true proxy_header: description: - Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. required: false choices: - NONE - PROXY_V1 service: description: - A reference to the BackendService resource. - 'This field represents a link to a BackendService resource in GCP. It can be specified in two ways. First, you can place in the selfLink of the resource here as a string Alternatively, you can add `register: name-of-resource` to a gcp_compute_backend_service task and then set this service field to "{{ name-of-resource }}"' required: true extends_documentation_fragment: gcp notes: - 'API Reference: U(https://cloud.google.com/compute/docs/reference/latest/targetTcpProxies)' - 'Setting Up TCP proxy for Google Cloud Load Balancing: U(https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy)' ''' EXAMPLES = ''' - name: create a instance group gcp_compute_instance_group: name: "instancegroup-targettcpproxy" zone: us-central1-a project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: instancegroup - name: create a health check gcp_compute_health_check: name: "healthcheck-targettcpproxy" type: TCP tcp_health_check: port_name: service-health request: ping response: pong healthy_threshold: 10 timeout_sec: 2 unhealthy_threshold: 5 project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: healthcheck - name: create a backend service gcp_compute_backend_service: name: "backendservice-targettcpproxy" backends: - group: "{{ instancegroup }}" health_checks: - "{{ healthcheck.selfLink }}" protocol: TCP project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: backendservice - name: create a target tcp proxy gcp_compute_target_tcp_proxy: name: "test_object" proxy_header: PROXY_V1 service: "{{ backendservice }}" project: "test_project" auth_kind: "serviceaccount" service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' creationTimestamp: description: - Creation timestamp in RFC3339 text format. returned: success type: str description: description: - An optional description of this resource. returned: success type: str id: description: - The unique identifier for the resource. returned: success type: int name: description: - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. returned: success type: str proxyHeader: description: - Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. returned: success type: str service: description: - A reference to the BackendService resource. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict import json import time ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), description=dict(type='str'), name=dict(required=True, type='str'), proxy_header=dict(type='str', choices=['NONE', 'PROXY_V1']), service=dict(required=True), ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] state = module.params['state'] kind = 'compute#targetTcpProxy' fetch = fetch_resource(module, self_link(module), kind) changed = False if fetch: if state == 'present': if is_different(module, fetch): update(module, self_link(module), kind, fetch) fetch = fetch_resource(module, self_link(module), kind) changed = True else: delete(module, self_link(module), kind) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module), kind) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link, kind): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.post(link, resource_to_request(module))) def update(module, link, kind, fetch): update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) return fetch_resource(module, self_link(module), kind) def update_fields(module, request, response): if re
sponse.get('proxyHeader') != request.get('proxyHeader'): proxy_header_update(module, request, response) if response.get('service') != request.get('service'): service_update(module, request, res
ponse) def proxy_header_update(module, request, response): auth = GcpSession(module, 'compute') auth.post( ''.join(["https://www.googleapis.c
cted window caption.""" RV_WIN_NAME = "Remote Viewer" """Expected window caption.""" RV_WM_CLASS = "remote-viewer" class RVSessionError(Exception): """Exception for remote-viewer session. Root exception for the RV Sessiov. """ def __init__(self, test, *args, **kwargs): super(RVSessionError, self).__init__(args, kwargs) if test.cfg.pause_on_fail or test.cfg.pause_on_end: # 1 hour seconds = 60 * 60 * 10 logger.error("Test %s has failed. Do nothing for %s seconds.", test.cfg.id, seconds) time.sleep(seconds) class RVSessionNotImplemented(RVSessionError): """Used to show that this part of code is not implemented. """ class RVSessionConnect(RVSessionError): """Exception for remote-viewer session. """ #TODO: pass env variables @reg.add_action(req=[ios.IOSystem]) def rv_connect(vmi, ssn, env=None): """Establish connection between client and guest based on test parameters supplied at cartesian config. Notes ----- There are three possible methods to connect from client to guest: * Cmdline + parameters * Cmdline + rv file * remote-viewer menu URL Parameters ---------- test : VmInfo VM that runs RV. ssn : xxx Session object, as a exec-layer to VM. env : dict Dictionary of env variables to be passed before remote-viewer starts. Returns ------- None """ env = env or {} method = vmi.cfg.rv_parameters_from if method == 'cmd': act.info(vmi, "Connect to VM using command line.") rv_connect_cmd(vmi, ssn, env) elif method == 'menu': act.info(vmi, "Connect to VM using menu.") rv_connect_menu(vmi, ssn, env) elif method == 'file': act.info(vmi, "Connect to VM using .vv file.") rv_connect_file(vmi, ssn, env) else: raise RVSessionConnect(vmi.test, "Wrong connect method.") #TODO: pass env variables @reg.add_action(req=[ios.ILinux]) def rv_connect_cmd(vmi, ssn, env): cmd = act.rv_basic_opts(vmi) url = act.rv_url(vmi) cmd.append(url) cmd = utils.combine(cmd, "2>&1") act.info(vmi, "Final RV command: %s", cmd) utils.set_ticket(vmi.test) act.rv_run(vmi, cmd, ssn) act.rv_auth(vmi) #TODO: pass env variables @reg.add_action(req=[ios.ILinux]) def rv_connect_menu(vmi, ssn, env): cmd = act.rv_basic_opts(vmi) utils.set_ticket(vmi.test) cmd = utils.combine(cmd, "2>&1") act.info(vmi, "Final RV command: %s", cmd) act.rv_run(vmi, cmd, ssn) url = act.rv_u
rl(vmi) act.str_input(vmi, url) act.rv_auth(vmi) @reg.add_action(req=[ios.ILinux]) def rv_connect_file(vmi, ssn, env): cmd = utils.Cmd(vmi.cfg.rv_binary) vv_file_host = act.gen_
vv_file(vmi) with open(vv_file_host, 'r') as rvfile: file_contents = rvfile.read() act.info(vmi, "RV file contents:\n%s", file_contents) vv_file_client = act.cp_file(vmi, vv_file_host) cmd.append(vv_file_client) utils.set_ticket(vmi.test) cmd = utils.combine(cmd, "2>&1") act.info(vmi, "Final RV command: %s", cmd) act.rv_run(vmi, cmd, ssn) @reg.add_action(req=[ios.ILinux]) def rv_basic_opts(vmi): """Command line parameters for RV. """ cfg = vmi.cfg rv_cmd = utils.Cmd() rv_cmd.append(cfg.rv_binary) if cfg.rv_debug: rv_cmd.append("--spice-debug") if cfg.full_screen: rv_cmd.append("--full-screen") if cfg.disable_audio: rv_cmd.append("--spice-disable-audio") if cfg.smartcard: rv_cmd.append("--spice-smartcard") if cfg.certdb: rv_cmd.append("--spice-smartcard-db") rv_cmd.append(cfg.certdb) if cfg.gencerts: rv_cmd.append("--spice-smartcard-certificates") rv_cmd.append(cfg.gencerts) if cfg.usb_redirection_add_device: logger.info("Auto USB redirect for devices class == 0x08.") opt = r'--spice-usbredir-redirect-on-connect="0x08,-1,-1,-1,1"' rv_cmd.append(opt) if utils.is_yes(vmi.test.kvm_g.spice_ssl): cacert_host = utils.cacert_path_host(vmi.test) cacert_client = act.cp_file(vmi, cacert_host) opt = "--spice-ca-file=%s" % cacert_client rv_cmd.append(opt) if cfg.spice_client_host_subject: host_subj = utils.get_host_subj(vmi.test) opt = '--spice-host-subject=%s' % host_subj rv_cmd.append(opt) return rv_cmd @reg.add_action(req=[ios.ILinux]) def rv_url(vmi): """Cacert subj is in format for create certificate(with '/' delimiter) remote-viewer needs ',' delimiter. And also is needed to remove first character (it's '/'). If it's invalid implicit, a remote-viewer connection will be attempted with the hostname, since ssl certs were generated with the ip address. """ test = vmi.test port = test.kvm_g.spice_port tls_port = test.kvm_g.spice_tls_port #escape_char = test.cfg_c.shell_escape_char or '\\' host_ip = utils.get_host_ip(test) # SSL if utils.is_yes(vmi.test.kvm_g.spice_ssl): if vmi.cfg.ssltype == "invalid_implicit_hs" or \ "explicit" in vmi.cfg.ssltype: hostname = socket.gethostname() url = "spice://%s?tls-port=%s&port=%s" % (hostname, tls_port, port) else: url = "spice://%s?tls-port=%s&port=%s" % (host_ip, tls_port, port) return url # No SSL url = "spice://%s?port=%s" % (host_ip, port) return url @reg.add_action(req=[ios.ILinux]) def rv_auth(vmi): """Client waits for user authentication if spice_password is set use qemu monitor password if set, else, if set, try normal password. Only for cmdline. File console.rv should have a password. """ if vmi.cfg.ticket_send: # Wait for remote-viewer to launch. act.wait_for_win(vmi, RV_WIN_NAME_AUTH) act.str_input(vmi, vmi.cfg.ticket_send) @reg.add_action(req=[ios.IOSystem]) def gen_vv_file(vmi): """Generates vv file for remote-viewer. Parameters ---------- test : SpiceTest Spice test object. """ test = vmi.test cfg = vmi.cfg host_dir = os.path.expanduser('~') fpath = os.path.join(host_dir, cfg.rv_file) rv_file = open(fpath, 'w') rv_file.write("[virt-viewer]\n") rv_file.write("type=%s\n" % cfg.display) rv_file.write("host=%s\n" % utils.get_host_ip(test)) rv_file.write("port=%s\n" % test.kvm_g.spice_port) if cfg.ticket_send: rv_file.write("password=%s\n" % cfg.ticket_send) if utils.is_yes(test.kvm_g.spice_ssl): rv_file.write("tls-port=%s\n" % test.kvm_g.spice_tls_port) rv_file.write("tls-ciphers=DEFAULT\n") host_subj = utils.get_host_subj(test) if host_subj: rv_file.write("host-subject=%s\n" % host_subj) cacert_host = utils.cacert_path_host(test) if cacert_host: cert = open(cacert_host) cert_auth = cert.read() cert_auth = cert_auth.replace('\n', r'\n') rv_file.write("ca=%s\n" % cert_auth) if cfg.full_screen: rv_file.write("fullscreen=1\n") if cfg.spice_proxy: rv_file.write("proxy=%s\n" % cfg.spice_proxy) if cfg.rv_debug: """TODO""" # rv_cmd.append("--spice-debug") ..todo:: XXX TODO rv_file.close() return fpath @reg.add_action(req=[ios.ILinux]) def rv_run(vmi, rcmd, ssn, env=None): env = env or {} cfg = vmi.cfg if cfg.rv_ld_library_path: cmd = utils.Cmd("export") cmd.append("LD_LIBRARY_PATH=%s" % cfg.rv_ld_library_path) act.run(vmi, cmd, ssn=ssn) if cfg.spice_proxy and cfg.rv_parameters_from != "file": cmd = utils.Cmd("export") cmd.append("SPICE_PROXY=%s" % cfg.spice_proxy) act.run(vmi, cmd, ssn=ssn) for key in env: cmd = utils.Cmd("export", "%s=%s" % (key, env[key])) act.run(vmi, cmd, ssn=ssn) if cfg.usb_redirection_add_device: # USB was created by qemu (ro
of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields from openerp.osv import osv import openerp.addons.decimal_precision as dp from openerp.tools.translate import _ class mrp_subproduct(osv.osv): _name = 'mrp.subproduct' _description = 'Byproduct' _columns={ 'product_id': fields.many2one('product.product', 'Product', required=True), 'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True), 'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\ 'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\ By opposition, 'Variable' means that the quantity will be computed as\ '(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"), 'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'), } _defaults={ 'subproduct_type': 'variable', 'product_qty': lambda *a: 1.0, } def onchange_product_id(self, cr, uid, ids, product_id, context=None): """ Changes UoM if product_id changes. @param product_id: Changed product_id @return: Dictionary of changed values """ if product_id: prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context) v = {'product_uom': prod.uom_id.id} return {'value': v} return {} def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None): res = {'value':{}} if not product_uom or not product_id: return res product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context) if uom.category_id.id != product.uom_id.category_id.id: res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')} res['value'].update({'product_uom': product.uom_id.id}) return res class mrp_bom(osv.osv): _name = 'mrp.bom' _description = 'Bill of Material' _inherit='mrp.bom' _columns={ 'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts'), } class mrp_production(osv.osv): _description = 'Production' _inherit= 'mrp.production' def action_confirm(self, cr, uid, ids, context=None): """ Confirms production order and calculates quantity based on subproduct_type. @return: Newly generated picking Id. """ picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context) product_uom_obj = self.pool.get('product.uom') for production in self.browse(cr, uid, ids): source = production.product_id.property_stock_production.id if not production.bom_id: continue for sub_product in production.bom_id.sub_products: product_uom_factor = product_uom_obj._compu
te_qty(cr, uid, production.product_uom.id, production.produc
t_qty, production.bom_id.product_uom.id) qty1 = sub_product.product_qty qty2 = production.product_uos and production.product_uos_qty or False product_uos_factor = 0.0 if qty2 and production.bom_id.product_uos.id: product_uos_factor = product_uom_obj._compute_qty(cr, uid, production.product_uos.id, production.product_uos_qty, production.bom_id.product_uos.id) if sub_product.subproduct_type == 'variable': if production.product_qty: qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0) if production.product_uos_qty: qty2 *= product_uos_factor / (production.bom_id.product_uos_qty or 1.0) data = { 'name': 'PROD:'+production.name, 'date': production.date_planned, 'product_id': sub_product.product_id.id, 'product_uom_qty': qty1, 'product_uom': sub_product.product_uom.id, 'product_uos_qty': qty2, 'product_uos': production.product_uos and production.product_uos.id or False, 'location_id': source, 'location_dest_id': production.location_dest_id.id, 'move_dest_id': production.move_prod_id.id, 'state': 'waiting', 'production_id': production.id } self.pool.get('stock.move').create(cr, uid, data) return picking_id def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None): """Compute the factor to compute the qty of procucts to produce for the given production_id. By default, it's always equal to the quantity encoded in the production order or the production wizard, but with the module mrp_byproduct installed it can differ for byproducts having type 'variable'. :param production_id: ID of the mrp.order :param move_id: ID of the stock move that needs to be produced. Identify the product to produce. :return: The factor to apply to the quantity that we should produce for the given production order and stock move. """ sub_obj = self.pool.get('mrp.subproduct') move_obj = self.pool.get('stock.move') production_obj = self.pool.get('mrp.production') production_browse = production_obj.browse(cr, uid, production_id, context=context) move_browse = move_obj.browse(cr, uid, move_id, context=context) subproduct_factor = 1 sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context) if sub_id: subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context) if subproduct_record.bom_id.product_qty: subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty return subproduct_factor return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context) class change_production_qty(osv.osv_memory): _inherit = 'change.production.qty' def _update_product_to_produce(self, cr, uid, prod, qty, context=None): bom_obj = self.pool.get('mrp.bom') move_lines_obj = self.pool.get('stock.move') prod_obj = self.pool.get('mrp.production') for m in prod.move_created_ids: if m.product_id.id == prod.product_id.id: move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty}) else: for sub_product_line in prod.bom_id.sub_products: if sub_product_line.product_id.id == m.product_id.id: factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context) subproduct_qty = sub_product_line.subproduct_type
""" WSGI config for yaco project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.envi
ron["DJANGO_SETTINGS_MODULE"] = "yaco.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yaco.settings")
# This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class
Migration(migrations.Migration): dependencies = [ ('forum', '0004_topic_update_index_date'), ] database_operations = [ migrations.AlterModelTable('TopicFollowed', 'notification_topicfollowed') ] st
ate_operations = [ migrations.DeleteModel('TopicFollowed') ] operations = [ migrations.SeparateDatabaseAndState( database_operations=database_operations, state_operations=state_operations) ]
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutSets(Koan): def test_sets_make_keep_lists_unique(self): highlanders = ['MacLeod', 'Ramirez', 'MacLeod', 'Matunas', 'MacLeod', 'Malcolm', 'MacLeod'] there_can_only_be_only_one = set(highlanders) self.assertEqual(set(['MacLeod', 'Ramirez', 'Matunas', 'Malcolm']), there_can_only_be_only_one) def test_sets_are_unordered(self): self.assertEqual(set(['1', '2', '3', '5', '4']), set('12345')) def test_convert_the_set_into_a_list_to_sort_it(self): self.assertEqual(['1', '2', '3', '4', '5'], sorted(set('13245'))) # ------------------------------------------------------------------ def test_set_have_arithmetic_operators(self): scotsmen = set(['MacLeod', 'Wallace', 'Willie']) warriors = set(['MacLeod', 'Wallace', 'Leonid
as']) self.assertEqual(set([
'Willie']), scotsmen - warriors) self.assertEqual(set(['MacLeod', 'Wallace', 'Willie', 'Leonidas']), scotsmen | warriors) self.assertEqual(set(['MacLeod', 'Wallace']), scotsmen & warriors) self.assertEqual(set(['Willie', 'Leonidas']), scotsmen ^ warriors) # ------------------------------------------------------------------ def test_we_can_query_set_membership(self): self.assertEqual(True, 127 in set([127, 0, 0, 1])) self.assertEqual(True, 'cow' not in set('apocalypse now')) def test_we_can_compare_subsets(self): self.assertEqual(True, set('cake') <= set('cherry cake')) self.assertEqual(True, set('cake').issubset(set('cherry cake'))) self.assertEqual(False, set('cake') > set('pie'))
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MOD
ULE", "logger.settings
") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
""" WSGI config for my
_doku_application project. It exposes the WSGI callable as a module-level variable named ``application``. For mor
e information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_doku_application.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
# vim: set et sw=4 sts=4 fileencoding=utf-8: # # Python header conversion # Copyright (c) 2013,2014 Dave Hughes <dave@waveform.org.uk> # # Original headers # Copyright (c) 2012, Broadcom Europe Ltd # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import ( unicode_literals, print_function, division, absolute_import, ) # Make Py2's str equivalent to Py3's str = type('') import ctypes as ct import warnings _lib
= ct.CDLL('libbcm_host.so') # bcm_host.h ################################################################# bcm_host_init = _lib.bcm_host_init bcm_host_init.argtypes = [] bcm_host_init.restype = None bcm_host_deinit = _lib.bcm_host_deinit bcm_host_deinit.argtypes = [] bcm_ho
st_deinit.restype = None graphics_get_display_size = _lib.graphics_get_display_size graphics_get_display_size.argtypes = [ct.c_uint16, ct.POINTER(ct.c_uint32), ct.POINTER(ct.c_uint32)] graphics_get_display_size.restype = ct.c_int32
""" simple alignment baselines """ # TODO: # - better implementions from daeso.pair import Pair def greedy_align_equal_words(corpus): for graph_pair in corpus: graph_pair.clear() graphs = graph_pair.get_graphs() target_nodes = graphs.target.terminals(with_punct=False, with_empty=False) target_words = [ graphs.target.node[tn]["word"].lower() for tn in target_nodes ] for sn in graphs.source.terminals_iter(with_punct=False, with_empty=False): sw = graphs.source.node[sn]["word"].lower() try: j = target_words.index(sw) except: continue tn = target_nodes[j] graph_pair.add_align(Pair(sn, tn), "equals") del target_nodes[j] del target_words[j] def greedy_align_equal_words_roots(corpus): # if words are equal, align as equals # elif roots are equals, align as restates for graph_pair in corpus: graph_pair.clear() graphs = graph_pair.get_graphs() target_nodes = graphs.target.terminals(with_punct=False, with_empty=False) target_words = [ graphs.target.node[tn]["word"].lower() for tn in target_nodes ] target_roots = [ graphs.target.node[tn]["root"] for tn in target_nodes ] for sn in graphs.source.terminals_iter(with_punct=False, with_empty=False): sw = graphs.source.node[sn]["word"].lower() sr = graphs.source.node[sn]["root"] try: j = target_words.index(sw) except: try: j = target_roots.index(sr) except: continue else: relation = "restates" else: relation = "equals" tn = target_nodes[j] graph_pair.add_align(Pair(sn, tn), relation) del target_nodes[j] del target_words[j] del target_roots[j] def greedy_align_words(corpus): # if words are equal -> equals # if roots are equals -> restates # if source in target root and len(source)>3 -> generalizes # if target in source root and len(target)>3-> specifies # if target and source root share a morph segment ->intersects for graph_pair in corpus: graph_pair.clear() graphs = graph_pair.get_graphs() target_nodes = graphs.target.terminals(with_punct=False, with_empty=False) target_words = [ graphs.target.node[tn]["word"].lower() for tn in target_nodes ] target_roots = [ graphs.target.node[tn]["root"] for tn in target_nodes ] for sn in graphs.source.terminals_iter(with_punct=False, with_empty=False): sw = graphs.source.node[sn]["word"].lower() relation = None # align identical words for i, tw in enumerate(target_words): if sw == tw: relation = "equals" break if not relation: sr = graphs.source.node[sn]["root"] # align identical roots for i, tr in enumerate(target_roots): if sr == tr: relation = "restates" break if not relation: sparts = set(sr.split("_")) # check for spec, gen, or intersect for i, tr in enumerate(target_roots): tw = target_words[i] if sr in tr and len(sw) > 3: relation = "generalizes" break elif tr in sr and len(tw) > 3: relation = "specifies" break # check if roots share a morphological segment elif sparts.intersection(tr.split("_")): relation = "intersects" break if relation: tn = target_nodes[i] graph_pair.add_align(Pair(sn, tn), relation) del target_nodes[i] del target_words[i] del target_roots[i] #===================================================================== # Full tree alignment #===================================================================== def lc_roots(graph, n): """ Return the list of the lower-cased roots of the terminals in the yield of node n. Store list in attribute "_lc_roots" of node n. Also recursively calls lc_roots for all nodes dominated by node n. """ try: # node already seen (should not happen in trees) return graph.node[n]["_lc_roots"] except KeyError: graph.node[n]["_lc_roots"] = [] if graph.node_is_terminal(n, with_empty=False, with_punct=False): root = graph.node[n].get("root", "").lower() if root: graph.node[n]["_lc_roots"].append(root) else: # punct and empty nodes end here for m in graph.successors(n): graph.node[n]["_lc_roots"] += lc_roots(graph, m) return graph.node[n]["_lc_roots"] def greedy_align_phrases(corpus): # greedy align phrases with the same lower-cased words as strings and with # the same lower-cased roots as restates for graph_pair in corpus: graph_pair.clear() graphs = graph_pair.get_graphs() lc_roots(graphs.source, graphs.source.root) lc_roots(graphs.target, graphs.target.root) target_nodes = [ tn for tn in graphs.target if ( not graphs.target.node_is_punct(tn) and not graphs.target.node_is_empty(tn) ) ] target_words = [ graphs.target.get_node_token_string(tn).lower() for tn in target_nodes ] target_roots = [ graphs.target.node[tn].get("_lc_roots", []) for tn in target_nodes
] for sn in graphs.source: if ( graphs.source.node_
is_punct(sn) or graphs.source.node_is_empty(sn) ): continue sw = graphs.source.get_node_token_string(sn).lower() sr = graphs.source.node[sn].get("_lc_roots") try: j = target_words.index(sw) except: try: j = target_roots.index(sr) except: continue else: tn = target_nodes[j] graph_pair.add_align(Pair(sn, tn), "restates") #print "RESTATES" #print " ".join(sr) #print " ".join(target_roots[j]) del target_nodes[j] del target_words[j] del target_roots[j] else: tn = target_nodes[j] graph_pair.add_align(Pair(sn, tn), "equals") #print "EQUALS" #print sw #print target_words[j] del target_nodes[j] del target_words[j] del target_roots[j]
__version__ = '0.4' __author__ = 'Martin Natano <natano@natano.net>' _repository = None _branch = 'git-orm' _remote = 'origin' class GitError(Exception): pass def set_repository(value): f
rom pygit2 import discover_repository, Repository global _repository if value is None: _repository = None return try: path = discover_repository(value) except KeyError: raise GitError('no repository found in "{}"'.format(value)) _repository = Reposit
ory(path) def get_repository(): return _repository def set_branch(value): global _branch _branch = value def get_branch(): return _branch def set_remote(value): global _remote _remote = value def get_remote(): return _remote
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates sequence of binary patterns or kv pairs to test associative memory. Pattern task: Given N patterns, retrieve the right pattern via its degraded version, where some of the bits are set to 0. Symbolic key-value task: Given a string of concatenated key-value pairs, retrieve the right value given the key. See [Miconi et al. 2018] Differentiable Plasticity (https://arxiv.org/abs/1804.02464) and [Ba et al. 2016] Using Fast Weights to to Attend to the Recent Past (https://arxiv.org/abs/1610.06258v1) for details of task design. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import string import numpy as np import tensorflow.compat.v1 as tf def generate_pattern_data(num_patterns, pattern_size): """Generates a sequence of patterns followed by a degraded query pattern. Args: num_patterns (int): Number of unique patterns in the sequence pattern_size (int): Dimensionality of each pattern Returns: seq: Numpy array, the sequence of patterns to be presented shape [num_patterns + 1, pattern_size] target: Numpy array, the pattern we exp
ect to retrieve for the degraded query pattern shape [pattern_size,] target_idx (int): The index into the list of unique patterns for the target
patterns: List of np arrays, all unique patterns """ patterns = [] for _ in range(num_patterns): pattern = np.random.choice([-1, 1], size=(pattern_size,), p=[.5, .5]) patterns.append(pattern) # Choose one pattern to degrade target_idx = random.choice(range(num_patterns)) target = patterns[target_idx] degraded = target.copy() degraded_idxs = np.random.choice( pattern_size, pattern_size // 2, replace=False) degraded[degraded_idxs] = 0 patterns.append(degraded) seq = np.array(patterns) return seq, target, target_idx, patterns def generate_pattern_data_selective(num_patterns, num_patterns_store, pattern_size): """Generates a sequence of patterns followed by a degraded query pattern. Args: num_patterns (int): Number of unique patterns in the sequence num_patterns_store (int): Number of patterns we actually have to store pattern_size (int): Dimensionality of each pattern Returns: seq: Numpy array, the sequence of patterns to be presented. shape [num_patterns + 1, pattern_size] target: Numpy array, the pattern we expect to retrieve for the degraded query pattern. shape [pattern_size,] target_idx (int): The index into the list of unique patterns for the target. patterns: List of np arrays, all unique patterns. Patterns we need to remember (that may be queried) have their last bit set to 1, otherwise 0. """ patterns = [] for _ in range(num_patterns): pattern = np.random.choice([-1, 1], size=(pattern_size,), p=[.5, .5]) patterns.append(pattern) # Choose patterns that are important to remember remember_idxs = np.random.choice( range(num_patterns), size=num_patterns_store, replace=False) patterns = [ np.append(p, [1]) if i in remember_idxs else np.append(p, [0]) for i, p in enumerate(patterns) ] # Choose one pattern to degrade target_idx = random.choice(range(num_patterns)) target = patterns[target_idx] degraded = target.copy() degraded_idxs = np.random.choice( pattern_size, pattern_size // 2, replace=False) degraded[degraded_idxs] = 0 patterns.append(degraded) seq = np.array(patterns) return seq, target, target_idx, patterns def generate_symbolic_data(num_pairs): """Generates a sequence of key-value pairs followed by a query key. Args: num_pairs (int): Number of pairs Returns: seq_text (str): Sequence of kv pairs, followed by a ?, followed by the query key. seq_encoded (numpy arr): Sequence of kv pairs, encoded into vocab indices. target_val (str): Digit, the value we expect to retrieve for the key. target_val_encoded (int): Encoded target_val target_idx (int): The index into the list of pairs for the target """ pairs = zip( np.random.choice(list(string.ascii_lowercase), num_pairs, replace=False), np.random.choice(list("0123456789"), num_pairs) ) vocab = get_symbolic_vocab() # Choose a query key target_idx = random.choice(range(num_pairs)) target_key, target_val_text = pairs[target_idx] target_val_encoded = vocab.index(target_val_text) seq_text = "".join([k + v for k, v in pairs]) + "?" + target_key seq_encoded = [vocab.index(char) for char in seq_text] return seq_text, seq_encoded, target_val_text, target_val_encoded, target_idx def get_pattern_dataset(n=100000, num_patterns=3, pattern_size=50, selective=False, num_patterns_store=None): """Generates a dataset of sequences of patterns and retrieval targets. Args: n: Number of examples num_patterns: Number of unique patterns in the sequence pattern_size: Dimensionality of each pattern selective (bool): True if only a subset of patterns needs to be stored. num_patterns_store: Number of patterns to store if selective=True. Returns: A tf.data.Dataset created from a dict with property "seqs," containing the sequences of randomly generated binary patterns, and "targets," containing the ground-truth pattern to retrieve for the last degraded query pattern in the sequence. """ seqs = [] targets = [] for _ in range(n): if selective: if num_patterns_store is None: num_patterns_store = num_patterns // 10 seq, target, _, _ = generate_pattern_data_selective( num_patterns, num_patterns_store, pattern_size) else: seq, target, _, _ = generate_pattern_data(num_patterns, pattern_size) seqs.append(seq) targets.append(target) return tf.data.Dataset.from_tensor_slices({ "seqs": np.array(seqs, dtype=np.float32), "targets": np.array(targets, dtype=np.int32) }) def get_symbolic_dataset(_, n=100000, num_pairs=5): """Generates a dataset of sequences of key-value pairs and retrieval targets. Args: n: Number of examples num_pairs: Number of pairs in each sequence Returns: A tf.data.Dataset created from a dict with property "seqs," containing the sequences of randomly generated key-value pairs, and "targets," containing the ground-truth value to retrieve for the query key. """ seqs = [] targets = [] for _ in range(n): _, seq_encoded, _, target_encoded, _ = generate_symbolic_data(num_pairs) seqs.append(seq_encoded) targets.append(target_encoded) return tf.data.Dataset.from_tensor_slices({ "seqs": np.array(seqs, dtype=np.int32), "targets": np.array(targets, dtype=np.int32) }) def get_symbolic_vocab(): """Gets the vocabulary for the symbolic task. Returns: A list with a-z, 0-9, and ?. """ return list(string.ascii_lowercase) + list(string.digits + "?")
t, Fifth Floor, Boston, MA # 02110-1301, USA. # import CTK import Handler import Cherokee import Balancer import validations from util import * from consts import * URL_APPLY = '/plugin/proxy/apply' HELPS = [('modules_handlers_proxy', N_("Reverse Proxy"))] NOTE_REUSE_MAX = N_("Maximum number of connections per server that the proxy can try to keep opened.") NOTE_ALLOW_KEEPALIVE = N_("Allow the server to use Keep-alive connections with the back-end servers.") NOTE_PRESERVE_HOST = N_("Preserve the original \"Host:\" header sent by the client. (Default: No)") NOTE_PRESERVE_SERVER = N_("Preserve the \"Server:\" header sent by the back-end server. (Default: No)") VALS = [ ('.+?!reuse_max', validations.is_number_gt_0), ] def commit(): # New Rewrite for e in ('in_rewrite_request', 'out_rewrite_request'): key = CTK.post.pop ('tmp!new!%s!key'%(e)) regex = CTK.post.pop ('tmp!new!%s!regex'%(e)) subst = CTK.post.pop ('tmp!new!%s!substring'%(e)) if regex and subst: next = CTK.cfg.get_next_entry_prefix ('%s!%s'%(key, e)) CTK.cfg['%s!regex'%(next)] = regex CTK.cfg['%s!substring'%(next)] = subst return CTK.cfg_reply_ajax_ok() # New Header for e in ('in_header_add', 'out_header_add'): key = CTK.post.pop ('tmp!new!%s!key'%(e)) header = CTK.post.pop ('tmp!new!%s!header'%(e)) value = CTK.post.pop ('tmp!new!%s!value'%(e)) if header and value: CTK.cfg['%s!%s!%s'%(key, e, header)] = value return CTK.cfg_reply_ajax_ok() # New Hide for e in ('in_header_hide', 'out_header_hide'): key = CTK.post.pop ('tmp!new!%s!key'%(e)) hide = CTK.post.pop ('tmp!new!%s!hide'%(e)) if hide: next = CTK.cfg.get_next_entry_prefix ('%s!%s'%(key, e)) CTK.cfg[next] = hide return CTK.cfg_reply_ajax_ok() # Modification return CTK.cfg_apply_post() class URL_Rewrite (CTK.Container): class Content (CTK.Container): def __init__ (self, refresh, key): CTK.Container.__init__ (self) keys = CTK.cfg.keys(key) if keys: table = CTK.Table() table.set_header(1) table += [CTK.RawHTML(x) for x in (_('Regular Expression'), _('Substitution'))] for k in CTK.cfg.keys(key): regex = CTK.TextCfg ('%s!%s!regex'%(key,k), False) subst = CTK.TextCfg ('%s!%s!substring'%(key,k), False) remove = CTK.ImageStock('del') remove.bind('click', CTK.JS.Ajax (URL_APPLY, data={'%s!%s'%(key,k): ''}, complete = refresh.JS_to_refresh())) table += [regex, subst, remove] submit = CTK.Submitter (URL_APPLY) submit += table self += CTK.Indenter (submit) def __init__ (self, key, key_entry): CTK.Container.__init__ (self) # List refresh = CTK.Refreshable ({'id': 'proxy_%s'%(key_entry)}) refresh.register (lambda: self.Content(refresh, '%s!%s'%(key, key_entry)).Render()) self += refresh # New new_regex = CTK.TextCfg('tmp!new!%s!regex'%(key_entry), False, {'class': 'noauto'}) new_subst = CTK.TextCfg('tmp!new!%s!substring'%(key_entry), True, {'class': 'noauto'}) add_button = CTK.SubmitterButton(_('Add')) table = CTK.Table() table.set_header(1) table += [CTK.RawHTML(x) for x in (_('Add RegEx'), _('Substitution'))] table += [new_regex, new_subst, add_button] submit = CTK.Submitter (URL_APPLY) submit += CTK.Indenter (table) submit += CTK.Hidden ('tmp!new!%s!key'%(key_entry), key) submit.bind ('submit_success', refresh.JS_to_refresh()) self += submit class Header_List (CTK.Container): class Content (CTK.Container): def __init__ (self, refresh, key): CTK.Container.__init__ (self) keys = CTK.cfg.keys(key) if keys: table = CTK.Table() table.set_header(1) table += [CTK.RawHTML(x) for x in (_('Regular Expression'), _('Substitution'))] for k in CTK.cfg.keys(key): value = CTK.TextCfg ('%s!%s'%(key,k), False) remove = CTK.ImageStock('del') remove.bind('click', CTK.JS.Ajax (URL_APPLY, data={'%s!%s'%(key,k): ''}, complete = refresh.JS_to_refresh())) table += [CTK.RawHTML(k), value, remove] submit = CTK.Submitter (URL_APPLY) submit += table self += CTK.Indenter (submit) def __init__ (self, key, key_entry): CTK.Container.__init__ (self) # List refresh = CTK.Refreshable ({'id': 'proxy_%s'%(key_entry)}) refresh.register (lambda: self.Content(refresh, '%s!%s'%(key, key_entry)).Render()) self += refresh # New new_regex = CTK.TextCfg('tmp!new!%s!header'%(key_entry), False, {'class': 'noauto'}) new_subst = CTK.TextCfg('tmp!new!%s!value'%(key_entry), False, {'class': 'noauto'}) add_button = CTK.SubmitterButton(_('Add')) table = CTK.Table() table.set_header(1) table += [CTK.RawHTML(x) for x in (_('Add Header Entry'), _('Value'))] table += [new_regex, new_subst, add_button] submit = CTK.Submitter (URL_APPLY) submit += CTK.Indenter (table) submit += CTK.Hidden ('tmp!new!%s!key'%(key_entry), key) submit.bind ('submit_success', refresh.JS_to_refresh()) self += submit class Header_Hide (CTK.Container): class Content (CTK.Container): def __init__ (self, refresh, key): CTK.Container.__init__ (self) keys = CTK.cfg.keys(key) if keys: table = CTK.Table() table.set_header(1) table += [CTK.RawHTML(_('Header'))] for k in CTK.cfg.keys(key): remove = CTK.ImageStock('del') remove.bind('click', CTK.JS.Ajax (URL_APPLY, data={'%s!%s'%(key,k): ''}, complete = refresh.JS_to_refresh())) table += [CTK.RawHTML(CTK.cfg.get_val('%s!%s'%(key,k))), remove] submit = CTK.Submitter (URL_APPLY) submit += table self += CTK.Indenter (submit) def __init__ (self, key, key_entry): CTK.Container.__init__ (self) # List refresh = CTK.Refreshable ({'id': 'proxy_%s'%(key_entry)}) refresh.register (lambda: self.Content(refresh, '%s!%s'%(key, key_entry)).Render()) self += refresh # New new_hide = CTK.TextCfg('tmp!new!%s!hide'%(key_entry), False, {'class'
: 'noauto'}) add_bu
tton = CTK.SubmitterButton(_('Add')) table = CTK.Table() table.set_header(1) table += [CTK.RawHTML (_('Hide Header'))] table += [new_hide, add_button] submit = CTK.Submitter (URL_APPLY) submit += CTK.Indenter (table) submit += CTK.Hidden ('tmp!new!%s!key'%(key_entry), key) submit.bind ('submit_success', refresh.JS_to_refresh()) self += submit class Plugin_proxy (Handler.PluginHandler): def __init__ (self, key, **kwargs): kwargs['show_document_root'] = False Handler.PluginHandler.__init__ (self, key, **kwargs) Handler.PluginHandler.AddCommon (self) # Properties table = CTK.PropsTable() table.Add (_('Reuse connections'), CTK.TextCfg ('%s!reuse_max'%(key), True), _(NOTE_REUSE_MAX)) table.Add (_('Allow Keepalive'), CTK.CheckCfgText('%s!in_allow_keepalive'%(key), True, _('Allow')), _(NOTE_ALLOW_KEEPALIVE)) table.Add (_('Preserve Host Header'), CTK.CheckCfgText('%s!in_preserve_host'%(key), False, _('Preserve')), _(NOTE_PRESERVE_HOST))
# -*- coding: utf-8 -*- """ Copyright [2009-2020] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os from datetime import date import collections as coll import attr import pytest from rnace
ntral_pipeli
ne.databases import data from rnacentral_pipeline.databases.intact import parser from rnacentral_pipeline.databases.helpers import publications as pubs @pytest.fixture(scope="module") def sample(): with open("data/intact/sample.txt", "r") as raw: return list(parser.parse(raw, os.environ["PGDATABASE"])) def test_can_parse_all_data(sample): assert len(sample) == 4 def test_creates_entries_with_expected_ids(sample): ids = sorted(e.primary_id for e in sample) assert ids == [ "INTACT:URS0000077671_559292", "INTACT:URS0000182FAB_559292", "INTACT:URS000020D517_559292", "INTACT:URS00002AFD52_559292", ] def test_correctly_groups_data(sample): val = {e.primary_id: len(e.interactions) for e in sample} assert val == { "INTACT:URS0000077671_559292": 1, "INTACT:URS0000182FAB_559292": 1, "INTACT:URS000020D517_559292": 1, "INTACT:URS00002AFD52_559292": 4, } def test_produces_correct_data(sample): with open("data/intact/sample.txt", "r") as raw: val = next(parser.parse_interactions(raw)) i1 = data.Interactor( id=data.InteractionIdentifier("intact", "EBI-10921362", None), alt_ids=[ data.InteractionIdentifier("rnacentral", "URS00002AFD52_559292", None) ], aliases=[ data.InteractionIdentifier("psi-mi", "snr18 yeast", "display_short"), data.InteractionIdentifier("psi-mi", "EBI-10921362", "display_long"), ], taxid=559292, biological_role=[ data.InteractionIdentifier("psi-mi", "MI:0499", "unspecified role") ], experimental_role=[data.InteractionIdentifier("psi-mi", "MI:0498", "prey")], interactor_type=[ data.InteractionIdentifier("psi-mi", "MI:0609", "small nucleolar rna") ], xrefs=[], annotations="-", features=[data.InteractionIdentifier("32p radiolabel", "?-?", None)], stoichiometry=None, participant_identification=[ data.InteractionIdentifier("psi-mi", "MI:0396", "predetermined participant") ], ) i2 = data.Interactor( id=data.InteractionIdentifier("uniprotkb", "P15646", None), alt_ids=[ data.InteractionIdentifier("intact", "EBI-6838", None), data.InteractionIdentifier("uniprotkb", "P89890", None), data.InteractionIdentifier("uniprotkb", "D6VRX5", None), ], aliases=[ data.InteractionIdentifier("psi-mi", "fbrl_yeast", "display_long"), data.InteractionIdentifier("uniprotkb", "NOP1", "gene name"), data.InteractionIdentifier("psi-mi", "NOP1", "display_short"), data.InteractionIdentifier("uniprotkb", "LOT3", "gene name synonym"), data.InteractionIdentifier("uniprotkb", "YDL014W", "locus name"), data.InteractionIdentifier("uniprotkb", "D2870", "orf name"), data.InteractionIdentifier( "uniprotkb", "U3 small nucleolar RNA-associated protein NOP1", "gene name synonym", ), data.InteractionIdentifier( "uniprotkb", "Histone-glutamine methyltransferase", "gene name synonym" ), ], taxid=559292, biological_role=[ data.InteractionIdentifier("psi-mi", "MI:0499", "unspecified role") ], experimental_role=[data.InteractionIdentifier("psi-mi", "MI:0498", "prey")], interactor_type=[data.InteractionIdentifier("psi-mi", "MI:0326", "protein")], xrefs=[ data.InteractionIdentifier( "go", "GO:0008649", "rRNA methyltransferase activity" ), data.InteractionIdentifier("go", "GO:0031428", "box C/D snoRNP complex"), data.InteractionIdentifier("go", "GO:0032040", "small-subunit processome"), data.InteractionIdentifier("go", "GO:0003723", "RNA binding"), data.InteractionIdentifier( "go", "GO:0000494", "box C/D snoRNA 3'-end processing" ), data.InteractionIdentifier("refseq", "NP_010270.1", None), data.InteractionIdentifier("sgd", "S000002172", None), data.InteractionIdentifier("interpro", "IPR000692", "Fibrillarin"), data.InteractionIdentifier("interpro", "IPR020813", None), data.InteractionIdentifier("rcsb pdb", "5WYJ", None), data.InteractionIdentifier("rcsb pdb", "5WYK", None), data.InteractionIdentifier( "go", "GO:0006356", "regulation of transcription by RNA polymerase I" ), data.InteractionIdentifier( "go", "GO:1990259", "histone-glutamine methyltransferase activity" ), data.InteractionIdentifier("go", "GO:0005730", "nucleolus"), data.InteractionIdentifier("go", "GO:0006364", "rRNA processing"), data.InteractionIdentifier("go", "GO:0031167", "rRNA methylation"), data.InteractionIdentifier("go", "GO:0043144", "snoRNA processing"), data.InteractionIdentifier( "go", "GO:1990258", "histone glutamine methylation" ), data.InteractionIdentifier("interpro", "IPR029063", None), data.InteractionIdentifier("mint", "P15646", None), data.InteractionIdentifier("go", "GO:0000451", "rRNA 2'-O-methylation"), data.InteractionIdentifier("go", "GO:0005654", "nucleoplasm"), data.InteractionIdentifier( "go", "GO:0008171", "O-methyltransferase activity" ), data.InteractionIdentifier("go", "GO:0015030", "Cajal body"), data.InteractionIdentifier("reactome", "R-SCE-6791226", None), data.InteractionIdentifier("dip", "DIP-698N", None), data.InteractionIdentifier("rcsb pdb", "5WLC", None), data.InteractionIdentifier("go", "GO:0030686", "90S preribosome"), data.InteractionIdentifier("rcsb pdb", "6ND4", None), ], annotations="crc64:56A8B958A7B6066E", features=[data.InteractionIdentifier("protein a tag", "n-n", None)], stoichiometry=None, participant_identification=[ data.InteractionIdentifier("psi-mi", "MI:0396", "predetermined participant") ], ) assert attr.asdict(val) == attr.asdict( data.Interaction( ids=[data.InteractionIdentifier("intact", "EBI-11665247", None)], interactor1=i1, interactor2=i2, methods=[], types=[ data.InteractionIdentifier("psi-mi", "MI:0915", "physical association") ], xrefs=[], annotations=[ data.InteractionIdentifier("figure legend", "Fig 2, Fig 3B", None) ], confidence=[data.InteractionIdentifier("intact-miscore", "0.74", None)], source_database=[data.InteractionIdentifier("psi-mi", "MI:0471", "MINT")], is_negative=False, publications=[pubs.reference(11726521)], create_date=date(2003, 7, 8), update_date=date(2016, 3, 23), host_organisms=-1, ) )
# codin
g=utf-8 # Run a test server. from app import app im
port sys reload(sys) sys.setdefaultencoding('utf-8') if __name__ == '__main__': app.run(host='0.0.0.0', port=7000, debug=True)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic loop for training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import errors from tensorflow.python.util.tf_export import tf_export @tf_export("train.basic_train_loop") def basic_train_loop(supervisor, train_step_fn, args=None, kwargs=None, master=""): """Basic loop to train a model. Calls `train_step_fn` in a loop to train a model. The function is called as: ```python train_step_fn(session, *args, **kwargs) ``` It is passed a `tf.Session` in addition to `args` and `kwargs`. The function typically runs one training ste
p in the session. Args: supervisor: `tf.train.Supervisor` to run the training services. train_step_fn: Callable to execute one training step. Called repeatedly as `train_step_fn(session, *args **kwargs)`. args: Optional positional arguments passed to `train_step_fn`. kwargs
: Optional keyword arguments passed to `train_step_fn`. master: Master to use to create the training session. Defaults to `""` which causes the session to be created in the local process. """ if args is None: args = [] if kwargs is None: kwargs = {} should_retry = True while should_retry: try: should_retry = False with supervisor.managed_session(master) as sess: while not supervisor.should_stop(): train_step_fn(sess, *args, **kwargs) except errors.AbortedError: # Always re-run on AbortedError as it indicates a restart of one of the # distributed tensorflow servers. should_retry = True
#!/usr/bin/env python # -*-
coding: utf-8 -*- """module that copies from
another module""" from task_01.peanut import BUTTER JELLY = BUTTER
from __future__ import absolute_import, division import time import os try: unicode except NameError: unicode = str from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked class SQLiteLockFile(LockBase): "Demonstrate SQL-based locking." testdb = None def __init__(self, path, threaded=True, timeout=None): """ >>> lock = SQLiteLockFile('somefile') >>> lock = SQLiteLockFile('somefile', threaded=False) """ LockBase.__init__(self, path, threaded, timeout) self.lock_file = unicode(self.lock_file) self.unique_name = unicode(self.unique_name) if SQLiteLockFile.testdb is None: import tempfile _fd, testdb = tempfile.mkstemp() os.close(_fd) os.unlink(testdb) del _fd, tempfile SQLiteLockFile.testdb = testdb import sqlite3 self.connection = sqlite3.connect(SQLiteLockFile.testdb) c = self.connection.cursor() try: c.execute("create table locks" "(" " lock_file varchar(32)," " unique_name varchar(32)" ")") except sqlite3.OperationalError: pass else: self.connection.commit() import atexit atexit.register(os.unlink, SQLiteLockFile.testdb) def acquire(self, timeout=None): timeout = timeout if timeout is not None else self.timeout end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout if timeout is None: wait = 0.1 elif timeout <= 0: wait = 0 else: wait = timeout / 10 cursor = self.connection.cursor() while True: if not self.is_locked(): # Not locked. Try to lock it. cursor.execute("insert into locks" " (lock_file, unique_name)" " values" " (?, ?)", (self.lock_file, self.unique_name)) self.connection.commit() # Check to see if we are the only lock holder. cursor.execute("select * from locks"
" where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall() if len(rows) > 1: # Nope. Someone else got there. Remove our lock. curso
r.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() else: # Yup. We're done, so go home. return else: # Check to see if we are the only lock holder. cursor.execute("select * from locks" " where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall() if len(rows) == 1: # We're the locker, so go home. return # Maybe we should wait a bit longer. if timeout is not None and time.time() > end_time: if timeout > 0: # No more waiting. raise LockTimeout("Timeout waiting to acquire" " lock for %s" % self.path) else: # Someone else has the lock and we are impatient.. raise AlreadyLocked("%s is already locked" % self.path) # Well, okay. We'll give it a bit longer. time.sleep(wait) def release(self): if not self.is_locked(): raise NotLocked("%s is not locked" % self.path) if not self.i_am_locking(): raise NotMyLock("%s is locked, but not by me (by %s)" % (self.unique_name, self._who_is_locking())) cursor = self.connection.cursor() cursor.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() def _who_is_locking(self): cursor = self.connection.cursor() cursor.execute("select unique_name from locks" " where lock_file = ?", (self.lock_file,)) return cursor.fetchone()[0] def is_locked(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?", (self.lock_file,)) rows = cursor.fetchall() return not not rows def i_am_locking(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?" " and unique_name = ?", (self.lock_file, self.unique_name)) return not not cursor.fetchall() def break_lock(self): cursor = self.connection.cursor() cursor.execute("delete from locks" " where lock_file = ?", (self.lock_file,)) self.connection.commit()
from django import db from django.conf import settings from django.core.management.base import NoArgsCommand from data.models import FCNASpending import csv # National Priorities Project Data Repository # imp
ort_fcna_spending.py # Updated 7/23/2010, Joshua Ruihley, Sunlight Foundation # Imports Federal Child Nutrition Act Spending Data # source info: http://nces.ed.gov/ccd/bat/index.asp (accurate as of 7/23/2010) # npp csv: http://assets.nationalpriorities.org/raw_data/education/fcna_spending.csv (updated 7/23/2010) # destination model: FCNASpending # HOWTO: # 1) Download source files from url listed above # 2) Con
vert source file to .csv with same formatting as npp csv # 3) change SOURCE_FILE variable to the the path of the source file you just created # 4) change 'amount' column in data_FCNASpending table to type 'bigint' # 5) Run as Django management command from your project path "python manage.py import_fcna_spending" SOURCE_FILE = '%s/education/fcna_spending.csv' % (settings.LOCAL_DATA_ROOT) class Command(NoArgsCommand): def handle_noargs(self, **options): def clean_int(value): if value=='': value=None return value data_reader = csv.reader(open(SOURCE_FILE)) for i, row in enumerate(data_reader): if i == 0: year_row = row; else: state = row[0] agency_name = row[1] agency_id = row[2] for j,col in enumerate(row): if j > 2: record = FCNASpending() record.year = year_row[j] record.state = state record.agency_name = agency_name record.agency_id = agency_id record.amount = clean_int(col) record.save() db.reset_queries()
# -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2017 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. #
# You should have received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Configuration for
Zenodo Support.""" from __future__ import absolute_import, print_function #: Maximum size of attachment in contact form. SUPPORT_ATTACHMENT_MAX_SIZE = 1000 * 1000 * 10 # 10 MB #: Description maximum length. SUPPORT_DESCRIPTION_MAX_LENGTH = 5000 #: Description minimum length. SUPPORT_DESCRIPTION_MIN_LENGTH = 20 #: Email body template. SUPPORT_EMAIL_BODY_TEMPLATE = 'zenodo_support/email_body.html' #: Email title template. SUPPORT_EMAIL_TITLE_TEMPLATE = 'zenodo_support/email_title.html' #: Support confirmation email body. SUPPORT_EMAIL_CONFIRM_BODY = """Thank you for contacting Zenodo support. We have received your message, and we will do our best to get back to you as \ soon as possible. This is an automated confirmation of your request, please do not reply to this\ email. Zenodo Support https://zenodo.org """ #: Support confirmation email title. SUPPORT_EMAIL_CONFIRM_TITLE = 'Zenodo Support' 'zenodo_support/email_confirm_title.html' #: Issue category for contact form. SUPPORT_ISSUE_CATEGORIES = [ { 'key': 'file-modification', 'title': 'File modification', 'description': ( 'All requests related to updating files in already published ' 'record(s). This includes new file addition, file removal or ' 'file replacement. ' 'Before sending a request, please consider creating a ' '<a href="http://help.zenodo.org/#versioning">new version</a> ' 'of your upload. Please first consult our ' '<a href="http://help.zenodo.org/#general">FAQ</a> to get familiar' ' with the file update conditions, to see if your case is ' 'eligible.<br /><br />' 'You request has to contain <u>all</u> of the points below:' '<ol>' '<li>Provide a justification for the file change in the ' 'description.</li>' '<li>Mention any use of the record(s) DOI in publications or ' 'online, e.g.: list papers that cite your record and ' 'provide links to posts on blogs and social media. ' 'Otherwise, state that to the best of your knowledge the DOI has ' 'not been used anywhere.</li>' '<li>Specify the record(s) you want to update <u>by the Zenodo' ' URL</u>, e.g.: "https://zenodo.org/record/8428".<br />' "<u>Providing only the record's title, publication date or a " "screenshot with search result is not explicit enough</u>.</li>" '<li>If you want to delete or update a file, specify it ' '<u>by its filename</u>, and mention if you want the name to ' 'remain as is or changed (by default the filename of the new ' 'file will be used).</li>' '<li>Upload the new files below or provide a publicly-accessible ' 'URL(s) with the files in the description.</li>' '</ol>' '<b><u>Not providing full information on any of the points above ' 'will significantly slow down your request resolution</u></b>, ' 'since our support staff will have to reply back with a request ' 'for missing information.' ), 'recipients': ['info@zenodo.org'], }, { 'key': 'upload-quota', 'title': 'File upload quota increase', 'description': ( 'All requests for a quota increase beyond the 50GB limit. ' 'Please include the following information with your request:' '<ol>' '<li>The total size of your dataset, number of files and the ' 'largest file in the dataset. When referring to file sizes' ' use <a href="https://en.wikipedia.org/wiki/IEEE_1541-2002">' 'SI units</a></li>' '<li>Information related to the organization, project or grant ' 'which was involved in the research, which produced the ' 'dataset.</li>' '<li>Information on the currently in-review or future papers that ' 'will cite this dataset (if applicable). If possible specify the ' 'journal or conference.</li>' '</ol>' ), 'recipients': ['info@zenodo.org'], }, { 'key': 'record-inactivation', 'title': 'Record inactivation', 'description': ( 'Requests related to record inactivation, either by the record ' 'owner or a third party. Please specify the record(s) in question ' 'by the URL(s), and reason for the inactivation.' ), 'recipients': ['info@zenodo.org'], }, { 'key': 'openaire', 'title': 'OpenAIRE', 'description': ( 'All questions related to OpenAIRE reporting and grants. ' 'Before sending a request, make sure your problem was not ' 'already resolved, see OpenAIRE ' '<a href="https://www.openaire.eu/support/faq">FAQ</a>. ' 'For questions unrelated to Zenodo, you should contact OpenAIRE ' '<a href="https://www.openaire.eu/support/helpdesk">' 'helpdesk</a> directly.' ), 'recipients': ['info@zenodo.org'], }, { 'key': 'partnership', 'title': 'Partnership, outreach and media', 'description': ( 'All questions related to possible partnerships, outreach, ' 'invited talks and other official inquiries by media.' 'If you are a journal, organization or conference organizer ' 'interested in using Zenodo as archive for your papers, software ' 'or data, please provide details for your usecase.' ), 'recipients': ['info@zenodo.org'], }, { 'key': 'tech-support', 'title': 'Security issue, bug or spam report', 'description': ( 'Report a technical issue or a spam content on Zenodo.' 'Please provide details on how to reproduce the bug. ' 'Upload any screenshots or files which are relevant to the issue ' 'or to means of reproducing it. Include error messages and ' 'error codes you might be getting in the description.<br /> ' 'For REST API errors, provide a minimal code which produces the ' 'issues. Use external services for scripts and long text' ', e.g.: <a href="https://gist.github.com/">GitHub Gist</a>. ' '<strong>Do not disclose your password or REST API access tokens.' '</strong>' ), 'recipients': ['info@zenodo.org'], }, { 'key': 'other', 'title': 'Other', 'description': ( 'Questions which do not fit into any other category.'), 'recipients': ['info@zenodo.org'], }, ] #: Email address of sender. SUPPORT_SENDER_EMAIL = 'info@zenodo.org' #: Name of the sender SUPPORT_SENDER_NAME = 'Zenodo' #: Email address for support. SUPPORT_SUPPORT_EMAIL = ['info@zenodo.org']
# -*- coding: utf-8 -*- """ Tablib - JSON Support """ import decimal import tablib try: import ujson as json except ImportError: import json title = 'json' extensions = ('json', 'jsn') def date_handler(obj): if isinstance(obj, decimal.Decimal): return str(obj) elif hasattr(obj, 'isoformat'): return obj.isoformat() else: return obj # return obj.isoformat() if hasattr(obj, 'isoformat') else obj def export_set(dataset): """Returns JSON representation of Dataset.""" return json.dumps(dataset.dict, default=date_handler) def export_book(databook): """Returns JSON repr
esentation of Databook.""" return json.dumps(databook._package(), default=date_handler) def import_s
et(dset, in_stream): """Returns dataset from JSON stream.""" dset.wipe() dset.dict = json.loads(in_stream) def import_book(dbook, in_stream): """Returns databook from JSON stream.""" dbook.wipe() for sheet in json.loads(in_stream): data = tablib.Dataset() data.title = sheet['title'] data.dict = sheet['data'] dbook.add_sheet(data) def detect(stream): """Returns True if given stream is valid JSON.""" try: json.loads(stream) return True except ValueError: return False
# -*- coding: utf-8 -*- """ Implements a Class for Representing a Simulated Senate Election. """ from collections import Counter from random import random from random import seed as set_seed from time import asctime from time import localtime from aus_senate_audit.senate_election.base_senate_election import BaseSenateElection class SimulatedSenateElection(BaseSenateElection): """ Implements a class for representing a simulated senate election. :ivar int _sample_increment_size: The number of ballots to add to the growing sample during each audit stage. NOTE: The :attr:`_candidates` and :attr:`_candidate_ids` instance attributes are set as a [1, ..., :attr:`_m`]. """ TYPE = 'Simulated' DEFAULT_ID = 'SimulatedElection{}' def __init__(self, seed, n, m, sample_increment_size): """ Initializes a :class:`SimulatedSenateElection` object. The number of seats in a simulated senate election is equal to the floor of the number of candidates in the election divided by two. :param int seed: The starting value for the random number generator. :param int n: The total number of ballots cast in the election. :param int m: The total number of candidates in the election. :param int sample_increment_size: The number of ballots to add to the growing sample during each audit stage. """ super(SimulatedSenateElection, self).__init__() self._n = n self._m = m self._seats = int(self._m / 2) self._candidates = list(range(1, self._m + 1)) self._candidate_ids = list(range(1, self._m + 1)) self._election_id = SimulatedSenateElection.DEFAULT_ID.format(asctime(localtime())) self._sample_increment_size = sample_increment_size set_seed(seed) # Set the initial value of the RNG. def draw_ballots(self): """ Adds simulated ballots to the sample of ballots drawn thus far. These ballots are biased so (1, 2, ..., m) is likely to be the winner. More precisely, each ballot candidate `i` is given a value `i + v * U` where `U = uniform(0, 1)` and `v` is the level of noise. Then the candidates are sorted into increasing order by these values. Note that the total number of ballots drawn may not exceed the total number of cast votes, :attr:`_n`. """ v = self._m / 2.0 # Noise level to control position variance. batch_size = min(self._sample_increment_size, self._n - self._num_ballots_drawn) for _ in range(batch_size): candidate_values = [(i + v * random(), cid) for i, cid in enumerate(self._candidate_ids)] ballot = tuple(cid for val, cid in sorted(candidate_values)) self.add_bal
lot(ballot, 1) def get_outcome(self, ballot_weights): """ Returns the outcome of a senate election with the given ballot weights. The social choice function used in the simulated senate election is Borda count. :param :class:`Counter` ballot_weights: A mapping from a ballot type to the number of ballots drawn of that type. :returns: The IDs of the candidates elected to the available seats, sorted in lexicographical order. :r
type: tuple """ counter = Counter() for ballot, weight in ballot_weights.items(): for i, cid in enumerate(ballot): counter[cid] += weight * i # Get the :attr:`_seat` candidates with the lowest Borda counts in increasing order. winners = counter.most_common()[-self._seats:][::-1] return tuple(sorted([cid for cid, count in winners]))
# -*- coding: utf-8 -*- # pylint: disable=invalid-name # Copyright 2017 IBM RESEARCH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ local_qiskit_simulator command to snapshot the quantum state. """ from qiskit import CompositeGate from qiskit import Gate from qiskit import QuantumCircuit from qiskit._instructionset import InstructionSet from qiskit._quantumregister import QuantumRegister from qiskit.qasm import _node as node class SnapshotGate(Gate): """Simulator snapshot operation.""" def __init__(self, m, qubit, circ=None): """Create new snapshot gate.""" super().__init__("snapshot", [m], [qubit], circ) def qasm(self): """Return OPENQASM string.""" qubit = self.arg[0] m = self.param[0] return self._qasmif("snapshot(%d) %s[%d];" % (m, qubit[0].name, qubit[1])) def inverse(self): """Invert this gate.""" return self # self-inverse def reapply(self, circ): """Reapply this gate to corresponding qubits in circ.""" self._modifiers(circ.snapshot(self.param[0], self.arg[0])) def snapshot(self, m, q): """Cache the quantum state of local_qiskit_simulator.""" if isinstance(q, QuantumRegister): gs
= InstructionSet() for j in range(q.size): gs.add(self.snapshot(m, (q, j))) return gs self._check_qubit(q) return self._attach(SnapshotGate(m, q, self)) # Add to QuantumCircuit and CompositeGate classes QuantumCircuit.snapshot = snapshot CompositeGate.snapshot = snapshot # cache quantum state (identity) QuantumCircuit.definitions["snapshot"] = { "print": True, "opaque": False, "n_args": 1, "n_bits": 1, "args": ["m"],
"bits": ["a"], # gate snapshot(m) a { } "body": node.GateBody([]) }
from django.con
f.urls import url from . import views urlpatterns = [ url(r'^carrinho/adicionar/(?P<slug>[\w_-]+)/$', views.CreateCartItemView.as_view(), name='create_cartitem'), ur
l(r'^carrinho/$', views.CartItemView.as_view(), name='cart_item') ]
in out_str # keywords tooltip m = self.world.explore( tooltip=True, popup=False, tooltip_kwds=dict(aliases=[0, 1, 2, 3, 4, 5], sticky=False), ) out_str = self._fetch_map_string(m) assert ( 'fields=["pop_est","continent","name","iso_a3","gdp_md_est","range"]' in out_str ) assert "aliases=[0,1,2,3,4,5]" in out_str assert '"sticky":false' in out_str # keywords popup m = self.world.explore( tooltip=False, popup=True, popup_kwds=dict(aliases=[0, 1, 2, 3, 4, 5]), ) out_str = self._fetch_map_string(m) assert ( 'fields=["pop_est","continent","name","iso_a3","gdp_md_est","range"]' in out_str ) assert "aliases=[0,1,2,3,4,5]" in out_str assert "<th>${aliases[i]" in out_str # no labels m = self.world.explore( tooltip=True, popup=True, tooltip_kwds=dict(labels=False), popup_kwds=dict(labels=False), ) out_str = self._fetch_map_string(m) assert "<th>${aliases[i]" not in out_str # named index gdf = self.nybb.set_index("BoroName") m = gdf.explore() out_str = self._fetch_map_string(m) assert "BoroName" in out_str def test_default_markers(self): # check overridden default for points m = self.cities.explore() strings = ['"radius":2', '"fill":true', "CircleMarker(latlng,opts)"] out_str = self._fetch_map_string(m) for s in strings: assert s in out_str m = self.cities.explore(marker_kwds=dict(radius=5, fill=False)) strings = ['"radius":5', '"fill":false', "CircleMarker(latlng,opts)"] out_str = self._fetch_map_string(m) for s in strings: assert s in out_str def test_custom_markers(self): # Markers m = self.cities.explore( marker_type="marker", marker_kwds={"icon": folium.Icon(icon="star")}, ) assert ""","icon":"star",""" in self._fetch_map_string(m) # Circle Markers m = self.cities.explore(marker_type="circle", marker_kwds={"fill_color": "red"}) assert ""","fillColor":"red",""" in self._fetch_map_string(m) # Folium Markers m = self.cities.explore( marker_type=folium.Circle( radius=4, fill_color="orange", fill_opacity=0.4, color="black", weight=1 ), ) assert ""","color":"black",""" in self._fetch_map_string(m) # Circle m = self.cities.explore(marker_type="circle_marker", marker_kwds={"radius": 10}) assert ""","radius":10,""" in self._fetch_map_string(m) # Unsupported Markers with pytest.raises( ValueError, match="Only 'marker', 'circle', and 'circle_marker' are supported", ): self.cities.explore(marker_type="dummy") def test_vmin_vmax(self): df = self.world.copy() df["range"] = range(len(df)) m = df.explore("range", vmin=-100, vmax=1000) out_str = self._fetch_map_string(m) assert 'case"176":return{"color":"#3b528b","fillColor":"#3b528b"' in out_str assert 'case"119":return{"color":"#414287","fillColor":"#414287"' in out_str assert 'case"3":return{"color":"#482173","fillColor":"#482173"' in out_str # test 0 df2 = self.nybb.copy() df2["values"] = df2["BoroCode"] * 10.0 m = df2[df2["values"] >= 30].explore("values", vmin=0) out_str = self._fetch_map_string(m) assert 'case"1":return{"color":"#7ad151","fillColor":"#7ad151"' in out_str assert 'case"2":return{"color":"#22a884","fillColor":"#22a884"' in out_str df2["values_negative"] = df2["BoroCode"] * -10.0 m = df2[df2["values_negative"] <= 30].explore("values_negative", vmax=0) out_str = self._fetch_map_string(m) assert 'case"1":return{"color":"#414487","fillColor":"#414487"' in out_str assert 'case"2":return{"color":"#2a788e","fillColor":"#2a788e"' in out_str def test_missing_vals(self): m = self.missing.explore("continent") assert '"fillColor":null' in self._fetch_map_string(m) m = self.missing.explore("pop_est") assert '"fillColor":null' in self._fetch_map_string(m) m = self.missing.explore("pop_est", missing_kwds=dict(color="red")) assert '"fillColor":"red"' in self._fetch_map_string(m) m = self.missing.explore("continent", missing_kwds=dict(color="red")) assert '"fillColor":"red"' in self._fetch_map_string(m) def test_categorical_legend(self): m = self.world.explore("continent", legend=True) out_str = self._fetch_map_string(m) assert "#1f77b4'></span>Africa" in out_str assert "#ff7f0e'></span>Antarctica" in out_str assert "#98df8a'></span>Asia" in out_str assert "#9467bd'></span>Europe" in out_str assert "#c49c94'></span>NorthAmerica" in out_str
assert "#7f7f7f'></span>Oceania" in out_str assert "#dbdb8d'></span>Sevenseas(openocean)" in out_str assert "#9edae5'></span>SouthAmerica" in out_str m = self.missing.explore( "continent", legend=True, missing_kwds={"color": "red"} ) out_str = self._fetch_map_string(m) assert "red'></span>NaN" in out_str def test_color
bar(self): m = self.world.explore("range", legend=True) out_str = self._fetch_map_string(m) assert "attr(\"id\",'legend')" in out_str assert "text('range')" in out_str m = self.world.explore( "range", legend=True, legend_kwds=dict(caption="my_caption") ) out_str = self._fetch_map_string(m) assert "attr(\"id\",'legend')" in out_str assert "text('my_caption')" in out_str m = self.missing.explore("pop_est", legend=True, missing_kwds=dict(color="red")) out_str = self._fetch_map_string(m) assert "red'></span>NaN" in out_str # do not scale legend m = self.world.explore( "pop_est", legend=True, legend_kwds=dict(scale=False), scheme="Headtailbreaks", ) out_str = self._fetch_map_string(m) assert out_str.count("#440154ff") == 100 assert out_str.count("#3b528bff") == 100 assert out_str.count("#21918cff") == 100 assert out_str.count("#5ec962ff") == 100 assert out_str.count("#fde725ff") == 100 # scale legend accordingly m = self.world.explore( "pop_est", legend=True, scheme="Headtailbreaks", ) out_str = self._fetch_map_string(m) assert out_str.count("#440154ff") == 16 assert out_str.count("#3b528bff") == 51 assert out_str.count("#21918cff") == 133 assert out_str.count("#5ec962ff") == 282 assert out_str.count("#fde725ff") == 18 # discrete cmap m = self.world.explore("pop_est", legend=True, cmap="Pastel2") out_str = self._fetch_map_string(m) assert out_str.count("b3e2cdff") == 63 assert out_str.count("fdcdacff") == 62 assert out_str.count("cbd5e8ff") == 63 assert out_str.count("f4cae4ff") == 62 assert out_str.count("e6f5c9ff") == 62 assert out_str.count("fff2aeff") == 63 assert out_str.count("f1e2ccff") == 62 assert out_str.count("ccccccff") == 63 @pytest.mark.skipif(not BRANCA_05, reason="requires branca >= 0.5.0") def test_colorbar_max_labels(self): # linear m = self.world.explore("pop_est", legend_kwds=dict(max_labels=3)) out_str = self._fetch_map_string(m) tick_values = [140.0, 465176713.5921569, 930353287.1843138] for tick in tick_values: assert str(tick) in out_str # scheme m = self.world.explore( "pop_est", scheme="headtailbreaks", legend_kwds=dict(max_labels=3)
from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import PermissionRequiredMixin from django.db.models import Count from django.urls import reverse_lazy from django.utils.translation import ugettext as _ from django.shortcuts import render, get_object_or_404, redirect from django.contrib import messages from django.core.exceptions import PermissionDenied from django.views.generic import DeleteView from guardian.decorators import permission_required_or_403 as permission_required from djing.global_base_views import OrderedFilteredList from djing.lib.mixins import LoginAdminMixin from abonapp.models import Abon from .models import Tariff, PeriodicPay from djing import lib from djing.lib.decorators import only_admins from . import forms login_decs = login_required, only_admins class TariffsListView(LoginAdminMixin, PermissionRequiredMixin, OrderedFilteredList): """ Show Services(Tariffs) list """ permission_required = 'tariff_app.view_tariff' template_name = 'tariff_app/tarifs.html' context_object_name = 'tariflist' model = Tariff queryset = Tariff.objects.annotate(usercount=Count('linkto_tariff__abon')) @login_required @only_admins @permission_required('tariff_app.change_tariff') def edit_tarif(request, tarif_id=0): tarif_id = lib.safe_int(tarif_id) if tarif_id == 0: if not request.user.has_perm('tariff_app.add_tariff'): raise PermissionDenied tarif = None else: if not request.user.has_perm('tariff_app.change_tariff'): raise PermissionDenied tarif = get_object_or_404(Tariff, pk=tarif_id) if request.method == 'POST': frm = forms.TariffForm(request.POST, instance=tarif) if frm.is_valid(): service = frm.save()
if tarif is None: request.user.log(request.META, 'csrv', '"%(title)s", "%(descr)s", %(amount).2f' % { 'title': service.title or '-', 'descr': service.descr or '-', 'amount': service.amount or 0.0
}) messages.success(request, _('Service has been saved')) return redirect('tarifs:edit', tarif_id=service.pk) else: messages.warning(request, _('Some fields were filled incorrect, please try again')) else: frm = forms.TariffForm(instance=tarif) return render(request, 'tariff_app/editTarif.html', { 'form': frm, 'tarif_id': tarif_id }) class TariffDeleteView(LoginAdminMixin, PermissionRequiredMixin, DeleteView): permission_required = 'tariff_app.delete_tariff' model = Tariff pk_url_kwarg = 'tid' success_url = reverse_lazy('tarifs:home') def delete(self, request, *args, **kwargs): res = super().delete(request, *args, **kwargs) request.user.log(request.META, 'dsrv', '"%(title)s", "%(descr)s", %(amount).2f' % { 'title': self.object.title or '-', 'descr': self.object.descr or '-', 'amount': self.object.amount or 0.0 }) messages.success(request, _('Service has been deleted')) return res def get_context_data(self, **kwargs): kwargs['tid'] = self.kwargs.get('tid') return super().get_context_data(**kwargs) class PeriodicPaysListView(LoginAdminMixin, PermissionRequiredMixin, OrderedFilteredList): permission_required = 'tariff_app.view_periodicpay' context_object_name = 'pays' model = PeriodicPay template_name = 'tariff_app/periodic_pays/list.html' @login_required @only_admins def periodic_pay(request, pay_id=0): if pay_id != 0: pay_inst = get_object_or_404(PeriodicPay, pk=pay_id) if not request.user.has_perm('tariff_app.change_periodicpay'): raise PermissionDenied else: pay_inst = None if not request.user.has_perm('tariff_app.add_periodicpay'): raise PermissionDenied if request.method == 'POST': frm = forms.PeriodicPayForm(request.POST, instance=pay_inst) if frm.is_valid(): new_periodic_pay = frm.save() if pay_inst is None: comment = _('New periodic pay successfully created') else: comment = _('Periodic pay has been changed') messages.success(request, comment) return redirect('tarifs:periodic_pay_edit', new_periodic_pay.pk) else: messages.error(request, _('Some fields were filled incorrect, please try again')) else: frm = forms.PeriodicPayForm(instance=pay_inst) return render(request, 'tariff_app/periodic_pays/add_edit.html', { 'pay_instance': pay_inst, 'form': frm }) class ServiceUsers(LoginAdminMixin, OrderedFilteredList): template_name = 'tariff_app/service_users.html' model = Abon def get_queryset(self): tarif_id = self.kwargs.get('tarif_id') return Abon.objects.filter(current_tariff__tariff__id=tarif_id).select_related('group') def get_context_data(self, **kwargs): if hasattr(self, 'tariff'): tariff = getattr(self, 'tariff') else: tarif_id = self.kwargs.get('tarif_id') tariff = get_object_or_404(Tariff, pk=tarif_id) setattr(self, 'tariff', tariff) self.tariff = tariff context = { 'tariff': tariff, 'total': self.object_list.count() } context.update(kwargs) return super().get_context_data(**context)
__all__ = ["Block", "Un
known", "Multitextured", "DataValues", "Stairs", "MultitexturedStairs", "Slab", "Multitextured
Slab", "Log"]
#!/usr/bin/python from gi.repository import Gtk, GObject import time import unittest from testutils import setup_test_env setup_test_env() from softwarecenter.enums import XapianValues, ActionButtons TIMEOUT=300 class TestCustomLists(unittest.TestCase): def _
debug(self, index, model, needle): print ("Expected '%s' at index '%s', " + "and custom list contained: '%s'") % ( needle, index, model[index][0].get_value(XapianValues.PKGNAME)) def assertPkgInListAtIndex(self, index, model, needle): doc = model[index][0] self.assertEqual(doc.get_value(XapianValues.PKGNAME),
needle, self._debug(index, model, needle)) def test_custom_lists(self): from softwarecenter.ui.gtk3.panes.availablepane import get_test_window win = get_test_window() pane = win.get_data("pane") self._p() pane.on_search_terms_changed(None, "ark,artha,software-center") self._p() model = pane.app_view.tree_view.get_model() # custom list should return three items self.assertTrue(len(model) == 3) # check package names, ordering is default "by relevance" self.assertPkgInListAtIndex(0, model, "ark") self.assertPkgInListAtIndex(1, model, "software-center") self.assertPkgInListAtIndex(2, model, "artha") # check that the status bar offers to install the packages install_button = pane.action_bar.get_button(ActionButtons.INSTALL) self.assertNotEqual(install_button, None) GObject.timeout_add(TIMEOUT, lambda: win.destroy()) Gtk.main() def _p(self): for i in range(10): time.sleep(0.1) while Gtk.events_pending(): Gtk.main_iteration() if __name__ == "__main__": import logging logging.basicConfig(level=logging.INFO) unittest.main()
#!/usr/bin/python """ This is a wrapper to run the 'lacheck(1)' tool from the 'lacheck' package. Why do we need this wrapper? - lacheck does NOT report in its exit status whether it had warnings or not. - it is too verbose when there are no warnings. """ import sys # for argv, exit, stderr import subprocess # for Popen def main(): out = subprocess.check_output([
'lacheck', sys.argv[1], ]) errors = False remember = None printed_remember = False for line in out.split('\n'): if line.startswith('**'): remember = line printed_remember = False continue if line == '': continue # this is a warning or error errors = True if not printed_re
member: print(remember) printed_remember = True print(line) if errors: sys.exit(1) if __name__ == '__main__': main()
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the
terms of the MIT License; see the # LICENSE file for more details. import re from flask import abort, redirect, request from werkzeug.exceptions import NotFound from indico.modules.categories.models.legacy_mapping import LegacyCategoryMapping from indico.web.flask.util import url_for from indico.web.rh import RHSimple @RHSimple.wrap_function def compat_category(legacy_category_id, path=None): if not re.match(r'^\
d+l\d+$', legacy_category_id): abort(404) mapping = LegacyCategoryMapping.query.filter_by(legacy_category_id=legacy_category_id).first() if mapping is None: raise NotFound(f'Legacy category {legacy_category_id} does not exist') view_args = request.view_args.copy() view_args['legacy_category_id'] = mapping.category_id # To create the same URL with the proper ID we take advantage of the # fact that the legacy endpoint works perfectly fine with proper IDs # too (you can pass an int for a string argument), but due to the # weight of the `int` converter used for new endpoints, the URL will # then be handled by the proper endpoint instead of this one. return redirect(url_for(request.endpoint, **dict(request.args.to_dict(), **view_args)), 301)
import unittest import flavio from math import sqrt, pi from flavio.physics.zdecays.gammazsm import Zobs, pb from flavio.physics.zdecays.gammaz import GammaZ_NP from flavio.physics.zdecays import smeftew par = flavio.default_parameters.get_central_all() class TestGammaZ(unittest.TestCase): def test_obs_sm(self): # check the SM predictions self.assertAlmostEqual(flavio.sm_prediction('GammaZ'), 2.4950, delta=0.0015) self.assertAlmostEqual(flavio.sm_prediction('GammaZ'), 1 / par['tau_Z'], delta=0.0015) self.assertAlmostEqual(flavio.sm_prediction('sigma_had') / pb / 1e3, 41.488, delta=0.05) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->ee)'), 83.966e-3, delta=0.001e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->mumu)'), 83.966e-3, delta=0.001e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->tautau)'), 83.776e-3, delta=0.001e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->uu)'), 299.936e-3, delta=0.04e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->cc)'), 299.860e-3, delta=0.04e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->dd)'), 382.770e-3, delta=0.04e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->ss)'), 382.770e-3, delta=0.04e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->bb)'), 375.724e-3, delta=0.02e-3) self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->nunu)'), 167.157e-3, delta=0.01e-3) self.assertAlmostEqual(flavio.sm_prediction('R_l'), 20750.9e-3, delta=2e-3) self.assertAlmostEqual(flavio.sm_prediction('R_c'), 172.23e-3, delta=0.01e-3) self.assertAlmostEqual(flavio.sm_prediction('R_b'), 215.80e-3, delta=0.01e-3) self.assertAlmostEqual(flavio.sm_prediction('R_e'), 20.743, delta=0.01) self.assertAlmostEqual(flavio.sm_prediction('R_mu'), 20.743, delta=0.01) self.assertAlmostEqual(flavio.sm_prediction('R_tau'), 20.743, delta=0.05) self.assertAlmostEqual(flavio.sm_prediction('R_uc'), 0.1724, delta=0.0002) self.assertEqual(flavio.sm_prediction('R_uc'), (flavio.sm_prediction('R_u') + flavio.sm_prediction('R_c')) / 2) def test_r_sm(self): # check that the Sm predictions for the Ri agree with the Gammas par = flavio.default_parameters.get_central_all() mh = par['m_h'] mt = par['m_t'] als = par['alpha_s'] Da = 0.059 mZ = par['m_Z'] arg = (mh, mt, als, Da, mZ) Rl = Zobs('Rl', *arg) Rc = Zobs('Rc', *arg) Rb = Zobs('Rb', *arg) Ge = Zobs('Gammae,mu', *arg) Gmu = Zobs('Gammae,mu', *arg) Gtau = Zobs('Gammatau', *arg) Gu = Zobs('Gammau', *arg) Gd = Zobs('Gammad,s', *arg) Gs = Zobs('Gammad,s', *arg) Gc = Zobs('Gammac', *arg) Gb = Zobs('Gammab', *arg) Ghad = Gu + Gd + Gc + Gs + Gb Gl = (Ge + Gmu + Gtau) / 3. self.assertAlmostEqual(Rl, Ghad / Gl, delta=1e-4) self.assertAlmostEqual(Rc, Gc / Ghad, delta=1e-4) self.assertAlmostEqual(Rb, Gb / Ghad, delta=1e-4) def test_obs_sm_fv(self): # check the SM predictions for LFV decays self.assertEqual(flavio.sm_prediction('BR(Z->emu)'), 0) self.assertEqual(flavio.sm_pr
ediction('BR(Z->etau)'), 0) self.assertEqual(flavio.sm_prediction('BR(Z->mutau)'), 0) def test_Gamma_NP(self): # compare NP contributions to A.49-A.52 from 1706.08945 GF, mZ, s2w_eff = par['GF'], par['m_Z'], par['s2w']*1.0010 d_gV = 0.055 d_gA = 0.066 # A.49-A.52 from 1706.08945 dGamma_Zll = sqrt(2)*GF*mZ**3/(6*pi) * (-d_gA + (-1+4*s2w_eff)*d_gV) dGamma_Znn = sqrt(2)*GF*mZ**3/(6*pi) * (d_gA + d_gV)
dGamma_Zuu = sqrt(2)*GF*mZ**3/(pi) * (d_gA -1/3*(-3+8*s2w_eff)*d_gV) /2 dGamma_Zdd = sqrt(2)*GF*mZ**3/(pi) * (-3/2*d_gA +1/2*(-3+4*s2w_eff)*d_gV) /3 # term squared in d_gV and d_gA not included in 1706.08945 d_g_squared = sqrt(2)*GF*mZ**3/(3*pi)*(abs(d_gV)**2+abs(d_gA)**2) self.assertAlmostEqual( dGamma_Zll + d_g_squared, GammaZ_NP(par, 1, smeftew.gV_SM('e', par), d_gV, smeftew.gA_SM('e', par), d_gA) ) self.assertAlmostEqual( dGamma_Znn + d_g_squared, GammaZ_NP(par, 1, smeftew.gV_SM('nue', par), d_gV, smeftew.gA_SM('nue', par), d_gA) ) self.assertAlmostEqual( dGamma_Zuu + 3*d_g_squared, GammaZ_NP(par, 3, smeftew.gV_SM('u', par), d_gV, smeftew.gA_SM('u', par), d_gA) ) self.assertAlmostEqual( dGamma_Zdd + 3*d_g_squared, GammaZ_NP(par, 3, smeftew.gV_SM('d', par), d_gV, smeftew.gA_SM('d', par), d_gA) ) class TestAFBZ(unittest.TestCase): def test_afbz_sm(self): for l in ['e', 'mu', 'tau']: self.assertAlmostEqual(flavio.sm_prediction('A(Z->{}{})'.format(l, l)), 0.1472, delta=0.0002, msg="Failed for {}".format(l)) self.assertAlmostEqual(flavio.sm_prediction('AFB(Z->{}{})'.format(l, l)), 0.0163, delta=0.0002, msg="Failed for {}".format(l)) self.assertAlmostEqual(flavio.sm_prediction('A(Z->bb)'), 0.935, delta=0.001) self.assertAlmostEqual(flavio.sm_prediction('A(Z->cc)'), 0.668, delta=0.001) self.assertAlmostEqual(flavio.sm_prediction('A(Z->ss)'), 0.935, delta=0.001) self.assertAlmostEqual(flavio.sm_prediction('AFB(Z->bb)'), 0.1032, delta=0.0002) self.assertAlmostEqual(flavio.sm_prediction('AFB(Z->cc)'), 0.0738, delta=0.0002)
# -*- coding:
utf-8 -*- """ urwintranet.ui.views ~~~~~~~~~~~~~~~~~~ """ from . import (auth, home, pa
rts)
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 from .common import BaseTest import jmespath class TestApacheAirflow(BaseTest): def test_airflow_environment_value_filter(self): session_factory = self.replay_flight_data('test_airflow_environment_value_filter') p = self.load_policy( { "name": "airflow-name-filter", "resource": "airflow", "filters": [ { "type": "value", "key": "Name", "op": "eq", "value": "testEnvironment", } ] }, session_factory=session_factory, ) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0]['Name'], 'testEnvironment') self.assertEqual(resources[0]['c7n:MatchedFilters'], ['Name']) def test_airflow_environment_kms_filter(self): session_factory = self.replay_flight_data('test_airflow_environment_kms_filter') kms = session_factory().client('kms') expression = 'KmsKey' p = self.load_policy( { "name": "airflow-kms-filter", "resource": "airflow", "filters": [ { "type": "kms-key", "key": "c7n:AliasName", "value": "alias/mwaa", } ] }, session_factory=session_factory, ) resources = p.run() self.assertTrue(len(resources), 1) aliases = kms.list_aliases(KeyId=(jmespath.search(expression, resources[0]))) self.assertEqual(aliases['Aliases'][0]['Ali
asName'], 'alias/mwaa') def test_airflow_environment_tag(self): session_factory = self.replay_flight_data('test_airflow_environment_tag') new_tag = {'env': 'dev'} p = self.load_policy( {
'name': 'airflow-tag', 'resource': 'airflow', 'filters': [{ 'tag:env': 'absent' }], 'actions': [{ 'type': 'tag', 'tags': new_tag }] }, session_factory=session_factory ) resources = p.run() self.assertEqual(1, len(resources)) name = resources[0].get('Name') airflow = session_factory().client('mwaa') call = airflow.get_environment(Name=name) self.assertEqual(new_tag, call['Environment'].get('Tags')) def test_airflow_environment_untag(self): session_factory = self.replay_flight_data('test_airflow_environment_untag') p = self.load_policy( { 'name': 'airflow-untag', 'resource': 'airflow', 'filters': [{ 'tag:env': 'dev' }], 'actions': [{ 'type': 'remove-tag', 'tags': ['env'] }] }, session_factory=session_factory ) resources = p.run() self.assertEqual(1, len(resources)) name = resources[0].get('Name') airflow = session_factory().client('mwaa') call = airflow.get_environment(Name=name) self.assertEqual({}, call['Environment'].get('Tags'))
""" Created on Sep 14, 2015 @author: Mikhail """ from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import visibility_of_element_located, visibility_of from selenium.common.exceptions import TimeoutException __author__ = 'Mikhail' class Page(object): def __init__(self, driver, url): self.driver = driver self.url = url self.wait = WebDriverWait(self.driver, 5) def open_page(self, url): self.driver.ge
t(url) def is_element_visible_by_locator(self, locator): try: self.wait.until(visibility_of_element_located(locator)) except TimeoutException: return False return True def is_element_visible(self,
element): try: self.wait.until(visibility_of(element)) except TimeoutException: return False return True
import os import fnmatch def find_files(directory, pattern): for root, di
rs, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): filename = os.path.join(root, basename) yield filename for fname in find_files('.', 'CMakeLists.txt'): f = open(fname, 'a') f.write( '\ninclude_directories(${OPENGL_INCLUDE
_PATH} ${GLUT_INCLUDE_PATH})') f.close()
s_checks = models.ManyToManyField( 'StatusCheck', blank=True, help_text='Checks used to calculate service status.', ) last_alert_sent = models.DateTimeField( null=True, blank=True, ) alerts = models.ManyToManyField( 'AlertPlugin', blank=True, help_text='Alerts channels through which you wish to be notified' ) email_alert = models.BooleanField(default=False) hipchat_alert = models.BooleanField(default=True) sms_alert = models.BooleanField(default=False) telephone_alert = models.BooleanField( default=False, help_text='Must be enabled, and check importance set to Critical, to receive telephone alerts.', ) overall_status = models.TextField(default=PASSING_STATUS) old_overall_status = models.TextField(default=PASSING_STATUS) hackpad_id = models.TextField( null=True, blank=True, verbose_name='Recovery instructions', help_text='Gist, Hackpad or Refheap js embed with recovery instructions e.g. https://you.hackpad.com/some_document.js' ) def __unicode__(self): return self.name def most_severe(self, check_list): failures = [c.importance for c in check_list] if self.CRITICAL_STATUS in failures: return self.CRITICAL_STATUS if self.ERROR_STATUS in failures: return self.ERROR_STATUS if self.WARNING_STATUS in failures: return self.WARNING_STATUS return self.PASSING_STATUS @property def is_critical(self): """ Break out separately because it's a bit of a pain to get wrong. """ if self.old_overall_status != self.CRITICAL_STATUS and self.overall_status == self.CRITICAL_STATUS: return True return False def alert(self): if not self.alerts_enabled: return if self.overall_status != self.PASSING_STATUS: # Don't alert every time if self.overall_status == self.WARNING_STATUS: if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.NOTIFICATION_INTERVAL)) < self.last_alert_sent: return elif self.overall_status in (self.CRITICAL_STATUS, self.ERROR_STATUS): if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.ALERT_INTERVAL)) < self.last_alert_sent: return self.last_alert_sent = timezone.now() else: # We don't count "back to normal" as an alert self.last_alert_sent = None self.save() self.snapshot.did_send_alert = True self.snapshot.save() send_alert(self, duty_officers=get_duty_officers()) @property def recent_snapshots(self): snapshots = self.snapshots.filter( time__gt=(timezone.now() - timedelta(minutes=60 * 24))) snapshots = list(snapshots.values()) for s in snapshots: s['time'] = time.mktime(s['time'].timetuple()) return snapshots def graphite_status_checks(self): return self.status_checks.filter(polymorphic_ctype__model='graphitestatuscheck') def http_status_checks(self): return self.status_checks.filter(polymorphic_ctype__model='httpstatuscheck') def jenkins_status_checks(self): return self.status_checks.filter(polymorphic_ctype__model='jenkinsstatuscheck') def active_graphite_status_checks(self): return self.graphite_status_checks().filter(active=True) def active_http_status_checks(self): return self.http_status_checks().filter(active=True) def active_jenkins_status_checks(self): return self.jenkins_status_checks().filter(active=True) def active_status_checks(self): return self.status_checks.filter(active=True) def inactive_status_checks(self): return self.status_checks.filter(active=False) def all_passing_checks(self): return self.active_status_checks().filter(calculated_status=self.CALCULATED_PASSING_STATUS) def all_failing_checks(self): return self.active_status_checks().exclude(calculated_status=self.CALCULATED_PASSING_STATUS) class Service(CheckGroupMixin): def update_status(self): self.old_overall_status = self.overall_status # Only active checks feed into our calculation status_checks_failed_count = self.all_failing_checks().count() self.overall_status = self.most_severe(self.all_failing_checks()) self.snapshot = ServiceStatusSnapshot( service=self, num_checks_active=self.active_status_checks().count(), num_checks_passing=self.active_status_checks( ).count() - status_checks_failed_count, num_checks_failing=status_checks_failed_count, overall_status=self.overall_status, time=timezone.now(), ) self.snapshot.save() self.save() if not (self.overall_status == Service.PASSING_STATUS and self.old_overall_status == Service.PASSING_STATUS): self.alert() instances = models.ManyToManyField( 'Instance', blank=True, help_text='Instances this service is running on.', ) url = models.TextField( blank=True, help_text="URL of service." ) class Meta: ordering = ['name'] class Instance(C
heckGroupMixin): def duplicate(self): checks = self.status_checks.all() new_instance = self new_instance.pk = None new_instance.id = None new_instance.name = u"Copy of %s" % self.name new_instance.save() for check in checks: check.duplicate(inst_set=(new_instance,), serv_set=()) return new_instance.pk def update_status(self): self.old_overall_status = self.ov
erall_status # Only active checks feed into our calculation status_checks_failed_count = self.all_failing_checks().count() self.overall_status = self.most_severe(self.all_failing_checks()) self.snapshot = InstanceStatusSnapshot( instance=self, num_checks_active=self.active_status_checks().count(), num_checks_passing=self.active_status_checks( ).count() - status_checks_failed_count, num_checks_failing=status_checks_failed_count, overall_status=self.overall_status, time=timezone.now(), ) self.snapshot.save() self.save() class Meta: ordering = ['name'] address = models.TextField( blank=True, help_text="Address (IP/Hostname) of service." ) def icmp_status_checks(self): return self.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck') def active_icmp_status_checks(self): return self.icmp_status_checks().filter(active=True) def delete(self, *args, **kwargs): self.icmp_status_checks().delete() return super(Instance, self).delete(*args, **kwargs) class Snapshot(models.Model): class Meta: abstract = True time = models.DateTimeField(db_index=True) num_checks_active = models.IntegerField(default=0) num_checks_passing = models.IntegerField(default=0) num_checks_failing = models.IntegerField(default=0) overall_status = models.TextField(default=Service.PASSING_STATUS) did_send_alert = models.IntegerField(default=False) class ServiceStatusSnapshot(Snapshot): service = models.ForeignKey(Service, related_name='snapshots') def __unicode__(self): return u"%s: %s" % (self.service.name, self.overall_status) class InstanceStatusSnapshot(Snapshot): instance = models.ForeignKey(Instance, related_name='snapshots') def __unicode__(self): return u"%s: %s" % (self.instance.name, self.overall_status) class StatusCheck(PolymorphicModel): """ Base class for polymorphic models. We're going to use proxy models for inheriting because it makes life much simpler, but this allows us to stick different methods etc on subclasses.
fro
m project import app if __name__ == '__main__': app.r
un()
from collections import defaultdict, namedtuple import regex as re from gary import ignore_parens_list Record = namedtuple('Record', ['dob', 'eng', 'pos', 'phn']) @ignore_parens_list def split_words(text:str) -> list: return re.split('\s*;\s*', text) class ShParser: def __init__(self, text): self.entries = [] pattern = re.compile('^\\\\(\w+)\s+(.*)$') self.entries = [] curr = defaultdict(list) for line in text.splitlines(): match = pattern.search(line) if match and not match[1].startswith('_'): if match[1].strip() == 'lx' and len(curr) > 0: self.entries.append(curr) curr = defaultdict(list) curr['lx'] = match[2] curr['ps'] = ''
if match[1] == 'ps': if len(curr['ge']) > 0: self.entries.append(curr) curr['ge'] = [] curr['ps'] = match[2] if match[1] == 'ge': word_list = split_words(match[2]) for word
in word_list: curr['ge'].append(word) def getEntries(self): for entry in self.entries: if 'lx' in entry: dob = entry['lx'] else: dob = '' eng = '‣'.join( entry['ge']) pos = entry['ps'] phn = entry['ph'] yield Record(dob,eng,pos,phn)
import numpy from cupy._core._scalar import get_typename # Base class for cuda types. class TypeBase: def __str__(self): raise NotImplementedError def declvar(self, x): return f'{self} {x}' class Void(TypeBase): def __init__(self): pass def __str__(self): return 'void' class Scalar(TypeBase): def __init__(self, dtype): self.dtype = numpy.dtype(dtype) def __str__(self): dtype = self.dtype if dtype == numpy.float16: # For the performance dtype = numpy.dtype('float32') return get_typename(dtype) def __eq__(self, other): return isinstance(other, Scalar) and sel
f.dtype == other.dtype def __hash__(self): return hash(self.dtype) class ArrayBase(TypeBase):
def __init__(self, child_type: TypeBase, ndim: int): assert isinstance(child_type, TypeBase) self.child_type = child_type self.ndim = ndim class CArray(ArrayBase): def __init__(self, dtype, ndim, is_c_contiguous, index_32_bits): self.dtype = dtype self._c_contiguous = is_c_contiguous self._index_32_bits = index_32_bits super().__init__(Scalar(dtype), ndim) @classmethod def from_ndarray(cls, x): return CArray(x.dtype, x.ndim, x._c_contiguous, x._index_32_bits) def __str__(self): ctype = get_typename(self.dtype) c_contiguous = get_cuda_code_from_constant(self._c_contiguous, bool_) index_32_bits = get_cuda_code_from_constant(self._index_32_bits, bool_) return f'CArray<{ctype}, {self.ndim}, {c_contiguous}, {index_32_bits}>' def __eq__(self, other): return ( isinstance(other, CArray) and self.dtype == other.dtype and self.ndim == other.ndim and self._c_contiguous == other._c_contiguous and self._index_32_bits == other._index_32_bits ) def __hash__(self): return hash( (self.dtype, self.ndim, self._c_contiguous, self._index_32_bits)) class SharedMem(ArrayBase): def __init__(self, child_type, size): if not (isinstance(size, int) or size is None): raise 'size of shared_memory must be integer or `None`' self._size = size super().__init__(child_type, 1) def declvar(self, x): if self._size is None: return f'extern __shared__ {self.child_type} {x}[]' return f'__shared__ {self.child_type} {x}[{self._size}]' class Ptr(ArrayBase): def __init__(self, child_type): super().__init__(child_type, 1) def __str__(self): return f'{self.child_type}*' class Tuple(TypeBase): def __init__(self, types): self.types = types def __str__(self): types = ', '.join([str(t) for t in self.types]) return f'thrust::tuple<{types}>' def __eq__(self, other): return isinstance(other, Tuple) and self.types == other.types void = Void() bool_ = Scalar(numpy.bool_) int32 = Scalar(numpy.int32) uint32 = Scalar(numpy.uint32) _suffix_literals_dict = { 'float64': '', 'float32': 'f', 'int64': 'll', 'int32': '', 'uint64': 'ull', 'uint32': 'u', 'bool': '', } def get_cuda_code_from_constant(x, ctype): dtype = ctype.dtype suffix_literal = _suffix_literals_dict.get(dtype.name) if suffix_literal is not None: s = str(x).lower() return f'{s}{suffix_literal}' ctype = str(ctype) if dtype.kind == 'c': return f'{ctype}({x.real}, {x.imag})' if ' ' in ctype: return f'({ctype}){x}' return f'{ctype}({x})'
severity failure; res := (others => """ + logic_undef() + """); else for I in res_type'range loop res (I) := la (I) {0} ra (I); end loop; end if; return res; end "{0}";\n""" w (res.format(func, typ)) def disp_non_logical_warning(func): return """ assert NO_WARNING report "NUMERIC_STD.""{0}"": non logical value detected" severity warning;""".format(func) def conv_bit(expr): if logic == 'std': return "sl_to_x01 (" + expr + ")" else: return expr def extract_bit(name): res = "{0}b := " + conv_bit ("{0}a (i)") + ";" return res.format(name) def init_carry(func): if func == '+': return """ carry := '0';""" else: return """ carry := '1';""" def extract_extend_bit(name,typ): res = """ if i > {0}a'left then {0}b := """ if typ == 'UNSIGNED': res += "'0';" else: res += "{0} ({0}'left);" res += """ else """ + extract_bit(name) + """ end if;""" return res.format(name) def disp_vec_vec_binary(func, typ): "Generate vector binary function body" res = """ function "{0}" (l, r : {1}) return {1} is constant lft : integer := MAX (l'length, r'length) - 1; subtype res_type is {1} (lft downto 0); alias la : {1} (l'length - 1 downto 0) is l; alias ra : {1} (r'length - 1 downto 0) is r; variable res : res_type; variable lb, rb, carry : """ + logic_type () + """; begin if la'left < 0 or ra'left < 0 then return null_{1}; end if;""" res += init_carry(func) res += """ for i in 0 to lft loop""" res += extract_extend_bit('l', typ) res += extract_extend_bit('r', typ) if logic == 'std': res += """ if lb = 'X' or rb = 'X' then""" + \ disp_non_logical_warning(func) + """ res := (others => 'X'); exit; end if;""" if func == '-': res += """ rb := not rb;""" res += """ res (i) := compute_sum (carry, rb, lb); carry := compute_carry (carry, rb, lb); end loop; return res; end "{0}"; """ w (res.format (func, typ)) def declare_int_var(name, typ): res = """ variable {0}1, {0}2 : {1}; variable {0}d : nat1;"""; if typ == "INTEGER": res += """ constant {0}msb : nat1 := boolean'pos({0} < 0);""" return res.format(name, typ) def init_int_var(name, typ): return """ {0}1 := {0};""".format(name); def extract_int_lsb(name, typ): res = """ {0}2 := {0}1 / 2;""" if typ == "INTEGER": res += """ if {0}1 < 0 then {0}d := 2 * {0}2 - {0}1; {0}1 := {0}2 - {0}d; else {0}d := {0}1 - 2 * {0}2; {0}1 := {0}2; end if;""" else: res += """ {0}d := {0}1 - 2 * {0}2; {0}1 := {0}2;""" res += """ {0}b := nat1_to_01 ({0}d);""" return res.format(name,typ) def check_int_truncated(func, name, typ): if typ == "INTEGER": v = "-{0}msb".format(name) else: v = "0" return """ if {1}1 /= {2} then assert NO_WARNING report "NUMERIC_STD.""{0}"": vector is truncated" severity warning; end if;""".format(func, name, v) def create_vec_int_dict(func, left, right): if left in vec_types: dic = {'vtype': left, 'itype': right, 'vparam': 'l', 'iparam': 'r'} else: dic = {'vtype': right, 'itype': left, 'vparam': 'r', 'iparam': 'l'} dic.update({'ltype': left, 'rtype': right, 'func': func, 'logic': logic_type()}) return dic def disp_vec_int_binary(func, left, right): "Generate vector binary function body" dic = create_vec_int_dict(func, left, right) res = """ function "{func}" (l : {ltype}; r : {rtype}) return {vtype} is subtype res_type is {vtype} ({vparam}'length - 1 downto 0); alias {vparam}a : res_type is {vparam};""" + \ declare_int_var (dic["iparam"], dic["itype"]) + """ variable res : res_type; variable lb, rb, carry : {logic}; begin if res'length < 0 then return null_{vtype}; end if;""" # Initialize carry. For subtraction, use 2-complement. res += init_carry(func) res += init_int_var(dic['iparam'], dic['itype']) + """ for i in res'reverse_range loop """ + extract_bit(dic['vparam']) + "\n" + \ extract_int_lsb(dic['iparam'], dic['itype']); if logic == 'std': res += """ if {vparam}b = 'X' then""" + \ disp_non_logical_warning(func) + """ res := (others => 'X'); {iparam}1 := 0; exit; end if;""" # 2-complement for subtraction if func == '-': res += """ rb := not rb;""" res += """ res (i) := compute_sum (carry, rb, lb); carry := compute_carry (carry, rb, lb); end loop;""" + \ check_int_truncated(func, dic['iparam'], dic['itype']) + """ return res; end "{func}";\n""" w(res.format (**dic)) def disp_vec_int_gcompare(func, left, right): "Generate comparison function" dic = create_vec_int_dict(func, left, right) res = """ function {func} (l : {ltype}; r : {rtype}) return compare_type is subtype res_type is {vtype} ({vparam}'length - 1 downto 0); alias la : res_type is l;""" + \ declare_int_var (dic['iparam'], dic['itype']) + """ variable lb, rb : {lo
gic}; variable res : compare_type; begin res := compare_eq;"""; res += init_int_var(dic['iparam'], dic['itype']) + """ for i in {vparam}a'reverse_range loop """ + extract_bit (dic['vparam']) + \ extract_int_lsb("r", right) if logic == 'std': res += """ if {vparam}b = 'X' then return compare_unknown; end if;""" res += """ if lb = '1' and rb = '0' then res := compare_gt;
elsif lb = '0' and rb = '1' then res := compare_lt; end if; end loop;""" if func == "ucompare": res += """ if r1 /= 0 then res := compare_lt; end if;""" else: res += """ if """ + conv_bit ("l (l'left)") + """ = '1' then if r >= 0 then res := compare_lt; end if; else if r < 0 then res := compare_gt; end if; end if;""" res += """ return res; end {func}; """ w(res.format (**dic)) def disp_vec_int_compare(func, left, right): "Generate comparison function" dic = create_vec_int_dict(func, left, right) res = """ function "{func}" (l : {ltype}; r : {rtype}) return boolean is subtype res_type is {vtype} ({vparam}'length - 1 downto 0); alias {vparam}a : res_type is {vparam};""" + \ declare_int_var (dic['iparam'], dic['itype']) + """ variable res : compare_type; begin if {vparam}'length = 0 then assert NO_WARNING report "NUMERIC_STD.""{func}"": null argument, returning FALSE" severity warning; return false; end if; res := """ if left == "SIGNED" or right == "SIGNED": res += "scompare" else: res += "ucompare" if left in vec_types: res += " (l, r);" else: res += " (r, l);" if logic == 'std': res += """ if res = compare_unknown then""" + \ disp_non_logical_warning(func) + """ return false; end if;""" if left in vec_types: res += """ return res {func} compare_eq;""" else: res += """ return compare_eq {func} res;""" res += """ end "{func}"; """ w(res.format (**dic)) def disp_vec_vec_gcompare(func, typ): "Generate comparison function" res = """ function {func} (l, r : {typ}) return compare_type is constant sz : integer := MAX (l'length, r'length) - 1; alias la : {typ} (l'length - 1 downto 0) is l; alias ra : {typ} (r'length - 1 downto 0) is r; variable lb, rb : {logic}; variable res : compare_type; begin""" if typ == 'SIGNED': res += ""
from venv import _venv from fabric.api import task @task def migrate():
""" Run Django's migrate command """ _venv("python manage.py migrate") @task def syncdb(): """ Run Django's syn
cdb command """ _venv("python manage.py syncdb")
""" basic set of `jut run` tests """ import json import unittest from tests.util import jut BAD_PROGRAM = 'foo' BAD_PROGRAM_ERROR = 'Error line 1, column 1 of main: Error: no such sub: foo' class JutRunTests(unittest.TestCase): def test_jut_run_syntatically_incorrect_program_reports_error_with_format_json(self): """ verify an invalid program reports the failure correctly when using json output format """ process = jut('run', BAD_PROGRAM, '-f', 'json') process.expect_status(255) process.expect_error(BAD_PROGRAM_ERROR) def test_jut_run_syntatically_incorrect_program_reports_error_with_format_text(self): """ verify an invalid program reports the failure correctly when using text output format """ process = jut('run', BAD_PROGRAM, '-f', 'text') process.expect_status(255) process.expect_error(BAD_PROGRAM_ERROR) def test_jut_run_syntatically_incorrect_program_reports_error_with_format_csv(self): """ verify an invalid program reports the failure correctly when using csv output format """ process = jut('run', BAD_PROGRAM, '-f', 'json') process.expect_status(255) process.expect_error(BAD_PROGRAM_ERROR) def test_jut_run_emit_to_json(self): """ use jut to run the juttle program: emit -from :2014-01-01T00:00:00.000Z: -limit 5 and verify the output is in the expected JSON format """ process = jut('run', 'emit -from :2014-01-01T00:00:00.000Z: -limit 5') process.expect_status(0) points = json.loads(process.read_output()) process.expect_eof() self.assertEqual(points, [ {'time': '2014-01-01T00:00:00.000Z'}, {'time': '2014-01-01T00:00:01.000Z'}, {'time': '2014-01-01T00:00:02.000Z'}, {'time': '2014-01-01T00:00:03.000Z'},
{'time': '2014-01-01T00:00:04.000Z'} ]) def test_jut_run_emit_to_text(self): """ use jut to run the juttle program: emit -from :2014-01-01T00:00:00.000Z: -limit 5
and verify the output is in the expected text format """ process = jut('run', '--format', 'text', 'emit -from :2014-01-01T00:00:00.000Z: -limit 5') process.expect_status(0) stdout = process.read_output() process.expect_eof() self.assertEqual(stdout, '2014-01-01T00:00:00.000Z\n' '2014-01-01T00:00:01.000Z\n' '2014-01-01T00:00:02.000Z\n' '2014-01-01T00:00:03.000Z\n' '2014-01-01T00:00:04.000Z\n') def test_jut_run_emit_to_csv(self): """ use jut to run the juttle program: emit -from :2014-01-01T00:00:00.000Z: -limit 5 and verify the output is in the expected csv format """ process = jut('run', '--format', 'csv', 'emit -from :2014-01-01T00:00:00.000Z: -limit 5') process.expect_status(0) stdout = process.read_output() process.expect_eof() self.assertEqual(stdout, '#time\n' '2014-01-01T00:00:00.000Z\n' '2014-01-01T00:00:01.000Z\n' '2014-01-01T00:00:02.000Z\n' '2014-01-01T00:00:03.000Z\n' '2014-01-01T00:00:04.000Z\n')
# Our friend Monk has an exam that has quite weird rules. Each question has a difficulty level in the form of an # Integer. Now, Monk can only solve the problems that have difficulty level less than X . Now the rules are- # # Score of the student is equal to the maximum number of answers he/she has attempted without skipping a question. # Student is allowed to skip just "one" question that will not be counted in the continuity of the questions. # Note- Assum
e the student knows the solution to
the problem he/she attempts and always starts the paper from first # question. # # Given the number of Questions, N ,the maximum difficulty level of the problem Monk can solve , X ,and the difficulty # level of each question, Ai can you help him determine his maximum score? # # Input Format # First Line contains Integer N , the number of questions and the maximum difficulty X Monk can solve. # Next line contains N integers, Ai denoting the difficulty level of each question. # # Output Format # Maximum score Monk can achieve in the exam. # # Constraints # 1≤N≤105 # 1≤X≤109 # 1≤Ai≤109 # # SAMPLE INPUT # 7 6 # 4 3 7 6 7 2 2 # # SAMPLE OUTPUT # 3 n, x = map(int, input().split()) questions = input().split() count = 0 skip = 0 for i in range(n): if int(questions[i]) > x: skip += 1 else: if skip == 2: break count += 1 print(count)
'''OpenGL extension ARB.robustness_isolation This module customises the behaviour of the OpenGL.raw.GL.ARB.robustness_isolation to provide a more Python-friendly API Overview (from the spec) GL
_ARB_robustness and supporting window system extensions allow creating an OpenGL context supporting graphics reset notification behavior. GL_ARB_ro
bustness_isolation provides stronger guarantees about the possible side-effects of a graphics reset. It is expected that there may be a performance cost associated with isolating an application or share group from other contexts on the GPU. For this reason, GL_ARB_robustness_isolation is phrased as an opt-in mechanism, with a new context creation bit defined in the window system bindings. It is expected that implementations might only advertise the strings in this extension if both the implementation supports the desired isolation properties, and the context was created with the appropriate reset isolation bit. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/robustness_isolation.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.ARB.robustness_isolation import * from OpenGL.raw.GL.ARB.robustness_isolation import _EXTENSION_NAME def glInitRobustnessIsolationARB(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
from setuptools import find_packages from setuptools import setup setup( name='svs', version='1.0.0', description='The InAcademia Simple validation Service allows for the easy validation of affiliation (Student,' 'Faculty, Staff) of a user in Academia', license='Apache 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', ], author='Rebecka Gulliksson', author_email='
tech@inacademia.org', zip_safe=False, url='http://www.inacademia.org', packages=find_packages('src'), package_dir={'': 'src'}, package_data={ 'svs': [ 'data/i18n/locale/*/LC_MESSAGES/*.mo', 'templates/*.mako', 'site/static/*', ], }, message_extractors={ 'src/svs': [ ('**.py', 'python', None), ('templates/**.mako', 'mak
o', None), ('site/**', 'ignore', None) ] }, install_requires=[ 'satosa==3.3.1', 'Mako', 'gunicorn', 'Werkzeug' ] )
custom_attributes_values = \ _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes, custom_attributes_values) _store_custom_attributes_values(validator.object, custom_attributes_values, "epic", validators.EpicCustomAttributesValuesExportValidator) return validator add_errors("epics", validator.errors) return None def store_epics(project, data): results = [] for epic in data.get("epics", []): epic = store_epic(project, epic) results.append(epic) return results ## TASKS def store_task(project, data): if "status" not in data and project.default_task_status: data["status"] = project.default_task_status.name validator = validators.TaskExportValidator(data=data, context={"project": project}) if validator.is_valid(): validator.object.project = project if validator.object.owner is None: validator.object.owner = validator.object.project.owner validator.object._importing = True validator.object._not_notify = True validator.save() validator.save_watchers() if validator.object.ref: sequence_name = refs.make_sequence_name(project) if not seq.exists(sequence_name): seq.create(sequence_name) seq.set_max(sequence_name, validator.object.ref) else: validator.object.ref, _ = refs.make_reference(validator.object, project) validator.object.save() for task_attachment in data.get("attachments", []): _store_attachment(project, validator.object, task_attachment) history_entries = data.get("history", []) for history in history_entries: _store_history(project, validator.object, history) if not history_entries: take_snapshot(validator.object, user=validator.object.owner) custom_attributes_values = data.get("custom_attributes_values", None) if custom_attributes_values: custom_attributes = validator.object.project.taskcustomattributes.all().values('id', 'name') custom_attributes_values = \ _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes, custom_attributes_values) _store_custom_attributes_values(validator.object, custom_attributes_values, "task", validators.TaskCustomAttributesValuesExportValidator) return validator add_errors("tasks", validator.errors) return None def store_tasks(project, data): results = [] for task in data.get("tasks", []): task = store_task(project, task) results.append(task) return results ## ISSUES def store_issue(project, data): validator = validators.IssueExportValidator(data=data, context={"project": project}) if "type" not in data and project.default_issue_type: data["type"] = project.default_issue_type.name if "status" not in data and project.default_issue_status: data["status"] = project.default_issue_status.name if "priority" not in data and project.default_priority: data["priority"] = project.default_priority.name if "severity" not in data and project.default_severity: data["severity"] = project.default_severity.name if validator.is_valid(): validator.object.project = project if validator.object.owner is None: validator.object.owner = validator.object.project.owner validator.object._importing = True validator.object._not_notify = True validator.save() validator.save_watchers() if validator.object.ref: sequence_name = refs.make_sequence_name(project) if not seq.exists(sequence_name): seq.create(sequence_name) seq.set_max(sequence_name, validator.object.ref) else: validator.object.ref, _ = refs.make_reference(validator.object, project) validator.object.save() for attachment in data.get("attachments", []): _store_attachment(project, validator.object, attachment) history_entries = data.get("history", []) for history in history_entries: _store_history(project, validator.object, history) if not history_entries:
take_snapshot(validator.object, user=validator.object.owne
r) custom_attributes_values = data.get("custom_attributes_values", None) if custom_attributes_values: custom_attributes = validator.object.project.issuecustomattributes.all().values('id', 'name') custom_attributes_values = \ _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes, custom_attributes_values) _store_custom_attributes_values(validator.object, custom_attributes_values, "issue", validators.IssueCustomAttributesValuesExportValidator) return validator add_errors("issues", validator.errors) return None def store_issues(project, data): issues = [] for issue in data.get("issues", []): issues.append(store_issue(project, issue)) return issues ## WIKI PAGES def store_wiki_page(project, wiki_page): wiki_page["slug"] = slugify(unidecode(wiki_page.get("slug", ""))) validator = validators.WikiPageExportValidator(data=wiki_page) if validator.is_valid(): validator.object.project = project if validator.object.owner is None: validator.object.owner = validator.object.project.owner validator.object._importing = True validator.object._not_notify = True validator.save() validator.save_watchers() for attachment in wiki_page.get("attachments", []): _store_attachment(project, validator.object, attachment) history_entries = wiki_page.get("history", []) for history in history_entries: _store_history(project, validator.object, history) if not history_entries: take_snapshot(validator.object, user=validator.object.owner) return validator add_errors("wiki_pages", validator.errors) return None def store_wiki_pages(project, data): results = [] for wiki_page in data.get("wiki_pages", []): results.append(store_wiki_page(project, wiki_page)) return results ## WIKI LINKS def store_wiki_link(project, wiki_link): validator = validators.WikiLinkExportValidator(data=wiki_link) if validator.is_valid(): validator.object.project = project validator.object._importing = True validator.save() return validator add_errors("wiki_links", validator.errors) return None def store_wiki_links(project, data): results = [] for wiki_link in data.get("wiki_links", []): results.append(store_wiki_link(project, wiki_link)) return results ## TAGS COLORS def store_tags_colors(project, data): project.tags_colors = data.get("tags_colors", []) project.save() return None ## TIMELINE def _store_timeline_entry(project, timeline): validator = validators.TimelineExportValidator(data=timeline, context={"project": project}) if validator.is_valid(): validator.object.project = project validator.object.namespace = build_project_namespace(project) validator.object.object_id = project.id validator.object.content_type = ContentType.objects.get_for_model(project.__class__) validator.object._importing = True validator.save() return validator add_errors("timeline", validator.errors) return validator def store_timeline_ent
#!/usr/bin/env python """list all previously made bookings""" import os import sys import cgi import datetime import json import shuttle import shconstants import smtplib import shcookie print "Content-type: text/html\r\n" shuttle.do_login(shcookie.u, shcookie.p) form = cgi.FieldStorage() if 'action' in form: act = form.getvalue("action") if act == "cancel": id = form.getvalue("id") shuttle.cancel_booking(id) show_all_routes = 'ar' in form bookings = shuttle.get_bookings() print '''<html> <head> <title>Connector bookings for %s</title> <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" /> <link href="style.css" rel="stylesheet" /> </head> <body>''' % (shcookie.u) alldata = json.load(open("all.json")) routes = [r[:-3] for r in alldata["true"].keys()] routes.sort() routes = [[r, alldata["true"][r + " AM"][2]] for r in routes if len(shcookie.routes) == 0 or show_all_routes or alldata["true"][r + " AM"][2] in shcookie.routes] # header bar print '<div id="newbar"><div id="newbarin">' for r in routes: print '''<span class="newbutton"> <a href="new.py?r=%s" class="l">%s</a> </span>''' % (r[1], r[0]) if len(shcookie.routes) != 0 and not show_all_routes: print '''<span class="newbutton"><a href="bookings.py?ar=1" class="l">all routes</a></span>''' print '</div></div>' # list of rides if 'cal' in
form: cal = form.getvalue("cal") print '''<div id="outlook"> <a href="outlook.py?cal=%s">download booked trip</a> </div>''' % (cal) print '<div id="bookings">' for b in bookings: past = False dt = datetime.datetime.strptime(b['dd'] + ' ' + b['dt'], "%m/%d/%Y %I:%M %p") if dt < datetime.datetime.now() - datetime.timedelta(hours=2) - datetime.timedelta(minutes=60): continue if "PM" in b['dt']: csspm = " pm" else: csspm = "" if dt < datetime.datetime.now()
- datetime.timedelta(hours=2) - datetime.timedelta(minutes=1): past = True csspm += " past" print '''<div class="booking%s"> <span class="t">%s</span> <span class="r">%s</span> <span class="dt">%s</span><span class="dl">%s</span> <span class="gt">%s</span><span class="gl">%s</span>''' % ( csspm, dt.strftime("%A, %b %d"), b['r'], b['dt'], b['dl'], b['gt'], b['gl']) if 'cn' in b: print ' <span class="cn">Connector %s</span>' % (b['cn']) if not past: loc = shuttle.get_shuttle_location(b['r'], b['cn']) if loc != None: stop = shuttle.get_stop_gps(b['r'], b['dl']) if stop != None: dst = shuttle.get_maps_eta((loc['lat'], loc['lon']), (stop[0], stop[1])) print ' <span class="et">ETA: %s (<a href="https://www.google.com/maps?q=%f,%f">%s</a>)</span>' % ( dst[1], loc['lat'], loc['lon'], dst[0]) if 'cl' in b: print ''' <form method="post" action="%s" onsubmit="return confirm('Cancel?');"> <input type="hidden" name="action" value="cancel"/> <input type="hidden" name="id" value="%s"/> <input type="submit" value="cancel"/> </form>''' % (os.environ["SCRIPT_NAME"], b['cl']) print '</div>' print '</div></body><!--' # print datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') print '--></html>'