prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- coding: utf-8 -*-
# Generate | d by Django 1.11.2 on 2017-08-09 17:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migr | ation):
dependencies = [
('osf', '0048_merge_20170804_0910'),
]
operations = [
migrations.AddField(
model_name='preprintprovider',
name='preprint_word',
field=models.CharField(choices=[('preprint', 'Preprint'), ('paper', 'Paper'), ('thesis', 'Thesis'), ('none', 'None')], default='preprint', max_length=10),
),
]
|
nceclient
import glanceclient.exc
from nova import exception
from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, host, port)
:raises ValueError
"""
o = urlparse.urlparse(image_href)
port = o.port or 80
host = o.netloc.split(':', 1)[0]
image_id = o.path.split('/')[-1]
return (image_id, host, port)
def _create_glance_client(context, host, port):
"""Instantiate a new glanceclient.Client object"""
params = {}
if FLAGS.auth_strategy == 'keystone':
params['token'] = context.auth_token
endpoint = 'http://%s:%s' % (host, port)
return glanceclient.Client('1', endpoint, **params)
def get_api_servers():
"""
Shuffle a list of FLAGS.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
for api_server in FLAGS.glance_api_servers:
host, port_str = api_server.split(':')
api_servers.append((host, int(port_str)))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, host=None, port=None):
if host is not None:
self.client = self._create_static_client(context, host, port)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, host, port):
"""Create a client that we'll use for every call."""
self.host = host
self.port = port
return _create_glance_client(context, self.host, self.port)
def _create_onetime_client(self, context):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.host, self.port = self.api_servers.next()
return _create_glance_client(context, self.host, self.port)
def call(self, context, method, *args, **kwargs):
"""
Call a glance client method. If we get a connection error,
retry the request according to FLAGS.glance_num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + FLAGS.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context)
try:
return getattr(client.images, method)(*args, **kwargs)
except retry_excs as e:
host = self.host
port = self.port
extra = "retrying"
error_msg = _("Error contacting glance server "
"'%(host)s:%(port)s' for '%(method)s', %(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg, locals())
raise exception.GlanceConnectionFailed(
host=host, port=port, reason=str(e))
LOG.exception(error_msg, locals())
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = self._extract_query_params(kwargs)
try:
images = self._client.call(context, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if self._is_image_available(context, image):
_images.append(self._translate_from_glance(image))
return _images
def _extract_query_params(self, params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'sort_key', 'sort_dir | ')
for param in accepted_params:
if param in params:
_params[param] = params.get(param)
| # ensure filters is a dict
params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
params['filters'].setdefault('is_public', 'none')
return _params
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
try:
image = self._client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not self._is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_from_glance(image)
return base_image_meta
def download(self, context, image_id, data):
"""Calls out to Glance for metadata and data and writes data."""
try:
image_chunks = self._client.call(context, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
for chunk in image_chunks:
data.write(chunk)
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = self._translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
recv_service_image_meta = self._client.call(context, 'create',
**sent_service_image_meta)
return self._translate_from_glance(recv_service_image_meta)
def update(self, context, image_id, image_meta, data=None,
purge_props=True):
"""Modify the given image with the new data."""
image_meta = self._translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
#NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
image_meta = self._client.call(context, 'update',
image_id, **image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return self._translate_from_glance(image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
"""
try:
self._client.call(context, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
return True
@staticmethod
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
@staticmethod
def _translate_from_glance(image):
image_meta = _extract_attributes(image)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
@staticmethod
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and con |
class Solution:
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
for i in range(0, len(board)):
if not board:
return False
for j in range(0, len(board[0])):
if self.dfs(i, j, board, word, 0):
return True
re | turn False
def dfs(self, i, j, board, word, index):
if ind | ex == len(word):
return True
else:
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):
return False
if word[index] == board[i][j]:
tmp = board[i][j]
board[i][j] = '.'
ret = self.dfs(i-1,j,board, word, index+1) or self.dfs(i+1,j,board, word, index+1) or self.dfs(i,j-1,board, word, index+1) or self.dfs(i,j+1,board, word, index+1)
board[i][j] = tmp
return ret
else:
return False
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.security
=======================
Security related helpers such as secure password hashing tools and a
random token generator.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
:copyright: (c) 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
:license: Apache Sotware License, see LICENSE for details.
"""
from __future__ import division
import hashlib
import hmac
import math
import random
import string
import webapp2
_rng = random.SystemRandom()
HEXADECIMAL_DIGITS = string.digits + 'abcdef'
DIGITS = string.digits
LOWERCASE_ALPHA = string.lowercase
UPPERCASE_ALPHA = string.uppercase
LOWERCASE_ALPHANUMERIC = string.lowercase + string.digits
UPPERCASE_ALPHANUMERIC = string.uppercase + string.digits
ALPHA = string.letters
ALPHANUMERIC = string.letters + string.digits
ASCII_PRINTABLE = string.letters + string.digits + string.punctuation
ALL_PRINTABLE = string.printable
PUNCTUATION = string.punctuation
def generate_random_string(length=None, entropy=None, pool=ALPHANUMERIC):
"""Generates a random string using the given sequence pool.
To generate stronger passwords, use ASCII_PRINTABLE as pool.
Entropy is:
H = log2(N**L)
where:
- H is the entropy in bits.
- N is the possible symbol count
- L is length of string of symbols
Entropy chart::
-----------------------------------------------------------------
Symbol set Symbol Count (N) Entropy per symbol (H)
-----------------------------------------------------------------
HEXADECIMAL_DIGITS 16 4.0000 bits
DIGITS 10 3.3219 bits
LOWERCASE_ALPHA 26 4.7004 bits
UPPERCASE_ALPHA 26 4.7004 bits
PUNCTUATION 32 5.0000 bits
LOWERCASE_ALPHANUMERIC 36 5.1699 bits
UPPERCASE_ALPHANUMERIC 36 5.1699 bits
ALPHA 52 5.7004 bits
ALPHANUMERIC 62 5.9542 bits
ASCII_PRINTABLE 94 6.5546 bits
ALL_PRINTABLE 100 6.6438 bits
:param length:
The length of the random sequence. Use this or `entropy`, not both.
:param entropy:
Desired entropy in bits. Use this or `length`, not both.
Use this to generate passwords based on entropy:
http://en.wikipedia.org/wiki/Password_strength
:param pool:
A sequence of characters from which random characters are chosen.
Default to case-sensitive alpha-numeric characters.
:returns:
A string with characters randomly chosen from the pool.
"""
pool = list(set(pool))
if length and entropy:
raise ValueError('Use length or entropy, not both.')
if length <= 0 and entropy <= 0:
raise ValueError('Length or entropy must be greater than 0.')
if entropy:
log_of_2 = 0.6931471805599453
length = long(math.ceil((log_of_2 / math.log(len(pool))) * entropy))
return ''.join(_rng.choice(pool) for _ in xrange(length))
def generate_password_hash(password, method='sha1', length=22, pepper=None):
"""Hashes a password.
The format of the string returned includes the method that was used
so that :func:`check_password_hash` can check the hash.
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
:param password:
The password to hash.
:param method:
The hash method to use (``'md5'`` or ``'sha1'``).
:param length:
Length of the salt to be created.
:param pepper:
A secret constant stored in the application code.
:returns:
A formatted hashed string that looks like this::
method$salt$hash
This function was ported and adapted from `Werkzeug`_.
"""
salt = method != 'plain' and generate_random_string(length) or ''
hashval = hash_password(password, method, salt, pepper)
if hashval is None:
raise TypeError('Invalid method %r.' % method)
return '%s$%s$%s' % (hashval, method, salt)
def check_password_hash(password, pwhash, pepper=None):
"""Checks a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
:param password:
The plaintext password to compare against the hash.
:param pwhash:
A hashed string like returned by :func:`generate_password_hash`.
:param pepper:
A secret constant stored in the application code.
:returns:
`True` if the password matched, `False` otherwise.
This function was ported and adapted from `Werkzeug`_.
"""
if pwhash.count('$') < 2:
return False
hashval, method, salt = pwhash.split('$', 2)
return hash_password(password, method, salt, pepper) == hashval
def hash_password(password, method, salt=None, pepper=None):
"""Hashes a password.
Supports plaintext without salt, unsalted and salted passwords. In case
salted passwords are used hmac is used.
:param password:
The password to be hashed.
:param method:
A method from ``hashlib``, e.g., `sha1` or `md5`, or `plain`.
:param salt:
A random salt string.
:param pepper:
A secret constant stored in the application code.
:returns:
A hashed password. |
This function was ported and adapted from `Werkzeug`_.
"""
password = webapp2._to_utf8(password)
if method == 'plain':
return password
method = getattr(hashlib, method, None)
if not method:
return None
if salt:
h = hmac.new(webapp2._to_utf8(salt), password, method)
else:
h = method(password)
if pepper:
h = hmac.new(webapp2._to_utf8(pepper), h.hexdigest(), method)
return h.hexdig | est()
def compare_hashes(a, b):
"""Checks if two hash strings are identical.
The intention is to make the running time be less dependant on the size of
the string.
:param a:
String 1.
:param b:
String 2.
:returns:
True if both strings are equal, False otherwise.
"""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
# Old names.
create_token = generate_random_string
create_password_hash = generate_password_hash
|
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() a | nd url2pathname() | """
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.a |
from Source import Source
from Components.Element import cached
class List(Source, object):
"""The datasource of a listbox. Currently, the format depends on the used converter. So
if you put a simple string list in here, you need to use a StringList converter, if you are
using a "multi content list styled"-list, you need to use the StaticMultiList converter, and
setup the "fonts".
This has been done so another converter could convert the list to a different format, for example
to generate HTML."""
def __init__(self, list=None, enableWrapAround=False, item_height=25, fonts=None):
if not list: list = []
if not fonts: fonts = []
Source.__init__(self)
self.__list = list
self.onSelectionChanged = [ ]
self.item_height = item_height
self.fonts = fonts
self.disable_callbacks = False
self.enableWrapAround = enableWrapAround
self.__style = "default" # style might be an optional string which can be used to define different visualisations in the skin
def setList(self, list):
self.__list = list
self.changed((self.CHANGED_ALL,))
list = property(lambda self: self.__list, setList)
def entry_changed(self, index):
if not self.disable_callbacks:
self.downstream_elements.entry_changed(index)
def modifyEntry(self, index, data):
self.__list[index] = data
self.entry_changed(index)
def count(self):
return len(self.__list)
def selectionChanged(self, index):
if self.disable_callbacks:
return
# update all non-master targets
for x in self.downstream_elements:
if x is not self.master:
x.index = index
for x in self.onSelectionChanged:
x()
@cached
def getCurrent(self):
return self.master is not None and self.master.current
current = property(getCurrent)
def setIndex(self, index):
if self.master is not None:
self.master.index = index
self.selectionChanged(index)
@cached
def getIndex(self):
if self.master is not None:
return self.master.index
else:
return None
setCurrentIndex = setIndex
index = property(getIndex, setIndex)
def selectNext(self):
if self.getIndex() + 1 >= self.count():
if self.enableWrapAround:
self.index | = 0
else:
self.index += 1
self.setIndex(self.index)
def selectPrevious(self):
if self.getIndex() - 1 < 0:
if | self.enableWrapAround:
self.index = self.count() - 1
else:
self.index -= 1
self.setIndex(self.index)
@cached
def getStyle(self):
return self.__style
def setStyle(self, style):
if self.__style != style:
self.__style = style
self.changed((self.CHANGED_SPECIFIC, "style"))
style = property(getStyle, setStyle)
def updateList(self, list):
"""Changes the list without changing the selection or emitting changed Events"""
assert len(list) == len(self.__list)
old_index = self.index
self.disable_callbacks = True
self.list = list
self.index = old_index
self.disable_callbacks = False
def pageUp(self):
if self.getIndex() == 0:
self.index = self.count() - 1
elif self.getIndex() - 10 < 0:
self.index = 0
else:
self.index -= 10
self.setIndex(self.index)
def pageDown(self):
if self.getIndex() == self.count() - 1:
self.index = 0
elif self.getIndex() + 10 >= self.count():
self.index = self.count() - 1
else:
self.index += 10
self.setIndex(self.index)
def up(self):
self.selectPrevious()
def down(self):
self.selectNext()
def getSelectedIndex(self):
return self.getIndex()
|
# coding:utf-8
from __future__ import absolute_import, unicode_literals
__author__ = "golden"
__date__ = '2017/10/12'
import asyncio
class EchoClientProtocol:
def __init__(self, message, loop):
self.message = message
self.loop = loop
self.transport = None
def connection_made(self, transport):
self.transport = transport
print('Send:', self.message)
self.transport.sendto(self.message.encode())
def datagram_received(self, data, addr):
print("Received:", data.decode())
print("Close the socket")
self.transport.close()
def error_received(self, exc):
print('Error received:', exc)
def connection_lost(self, exc):
p | rint("Socket closed, stop the event loop")
loop = asyncio.get_event_loop()
loop.stop()
import socks
import socket
socks.set_default_proxy(socks.SOCKS5, port=8888, addr='1 | 27.0.0.1', username='golden', password='golden')
socket.socket = socks.socksocket
loop = asyncio.get_event_loop()
message = "Hello World!"
connect = loop.create_datagram_endpoint(
lambda: EchoClientProtocol(message, loop),
remote_addr=('127.0.0.1', 2222))
transport, protocol = loop.run_until_complete(connect)
loop.run_forever()
transport.close()
loop.close()
|
### Paper RMSD selections ###
sel = []
sel.append(['a2_subdomain1_backbone','backbone and resid 57:68 and not name H*'])
sel.append(['motif_2_backbone','backbon | e and resid 117:124 and not name H*'])
sel.append(['aligned_CAs','protein and (resid 20:25 50:55 73:75 90:94 112:116 142:147 165:169 190:194 214:218 236:240 253 | :258 303:307) and name CA'])
sel.append(['aligned_betas','protein and (resid 20:25 50:55 73:75 90:94 112:116 142:147 165:169 190:194 214:218 236:240 253:258 303:307) and not name H*'])
#sel.append(['',''])
|
import sys
import click
import os
import subprocess
from packageinfo import BUILD, VERSION, NAME
if "WM_PROJECT" not in os.environ:
print("To run this command you must source edmenv.sh first")
sys.exit(1)
# The version of the buildcommon to checkout.
BUILDCOMMONS_VERSION="v0.1"
def bootstrap_devenv():
try:
os.makedirs(".devenv")
except OSError:
pass
if not os.path.exists(".devenv/buildrecipes-common"):
subprocess.check_call([
"git", "clone", "-b", BUILDCOMMONS_VERSION,
"http://github.com/simphony/buildrecipes-common.git",
".devenv/buildrecipes-common"
])
sys.path.insert(0, ".devenv/buildrecipes-common")
bootstrap_devenv()
import buildcommons as common # noqa
workspace = common.workspace()
common.edmenv_setup()
@click.group()
def cli():
pass
@cli.command()
def egg():
common.local_repo_to_edm_egg(".", name=NAME, version=VERSION, build=BUILD)
with common.cd("openfoam-interface/internal-interface/wrapper"):
common.run("python edmsetup.py egg")
@cli.command()
def upload_egg():
egg_path = "endist/{NAME}-{VERSION}-{BUILD}.egg".format(
NAME=NAME,
VERSION=VERSION,
BUILD=BUILD)
click.echo("Uploading {} to EDM repo".format(egg_path))
common.upload_egg(egg_path)
with common.cd("openfoam-interface/internal-interf | ace/wrapper"):
try:
common.run("python edmsetup.py upload_egg")
except subprocess.CalledProcessError as e:
| print("Error during egg upload of submodule: {}. Continuing.".format(e))
click.echo("Done")
@cli.command()
def clean():
click.echo("Cleaning")
common.clean(["endist", ".devenv"])
cli()
|
import time
from bs4 import BeautifulSoup
import sys
if (sys.version_info > (3, 0)):
# Python 3 code in this block
import urllib.request as urllib2
else:
# Python 2 code in this block
import urllib2
import datetime, re, os
class NonNCEPModel:
'''''
Base Class for all Non-NCEP models.
'''''
def __init__(self):
self.modelUrls = ''
self.isNCEPSource = False
return
'''''
Gets the previous forecast hour for a given model, and forecast hour.
'''''
def getPreviousTime(self, model, currentHour):
if currentHour == '000':
return '000'
defaultHours = self.getDefaultHours()
defaultHours.sort() #assert ascending order
for (idx,hour) in enumerate(defaultHours):
if current | Hour == hour:
return defaultHours[idx-1]
return '000'
'''''
Intialze all of our models hour stamp data to defaults.
'''''
def setDefaultHours(self):
# Default times.
self.modelTimes = self.defaultTimes
return
'''''
Intialze all of our models hour stamp data to defaults.
'''''
def getDefaultHours(self):
# Default times.
return self.defaultTimes
'''''
Intialze all of our models hour stamp data to defaults.
'''''
def getDefaultHours(self):
| # Default times.
modelTimes = self.defaultTimes
return modelTimes
def getName(self):
return self.name
def getAlias(self):
if self.modelAliases != "":
return self.modelAlias
else:
return self.name
def getForecastHourInt(self, filename, noPrefix = False):
fhour = self.getForecastHour(filename, noPrefix)
return int(fhour[1:])
def getForecastHour(self, fileName, noPrefix = False):
return ""
def getLastForecastHour(self):
return "000"
def getRun(self):
return
def getName(self):
return self.name |
# Gen | erated by Django 2.0.10 on 2019-01-14 11:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0008_applicationperiod'),
]
operations = [
migrations.AddField(
model_name='applicationperiod',
name='name',
field=models.CharField(default='Navn', max_length=50, verbose_name='Navn'),
| preserve_default=False,
),
]
|
# -*- encoding: utf-8 -*-
from abjad.tools import durationtools
from abjad.tools.datastructuretools.TypedList import TypedList
class CompoundInequality(TypedList):
'''A compound time-relation inequality.
::
>>> compound_inequality = timespantools.CompoundInequality([
... timespantools.CompoundInequality([
... 'timespan_1.start_offset <= timespan_2.start_offset',
... 'timespan_2.start_offset < timespan_1.stop_offset'],
... logical_operator='and'),
... timespantools.CompoundInequality([
... 'timespan_2.start_offset <= timespan_1.start_offset',
... 'timespan_1.start_offset < timespan_2.stop_offset'],
... logical_operator='and')],
... logical_operator='or',
... )
::
>>> print(format(compound_inequality))
timespantools.CompoundInequality(
[
timespantools.CompoundInequality(
[
timespantools.SimpleInequality('timespan_1.start_offset <= timespan_2.start_offset'),
timespantools.SimpleInequality('timespan_2.start_offset < timespan_1.stop_offset'),
],
logical_operator='and',
),
timespantools.CompoundInequality(
[
timespantools.SimpleInequality('timespan_2.start_offset <= timespan_1.start_offset'),
timespantools.SimpleInequality('timespan_1.start_offset < timespan_2.stop_offset'),
],
logical_operator='and',
),
],
logical_operator='or',
)
'''
### CLASS VARIABLES ###
__slots__ = (
'_logical_operator',
'_name',
)
logical_operator_dictionary = {
'and': '&',
'or': '|',
'xor': '^',
}
### INITIALIZER ###
def __init__(
self,
items=None,
logical_operator='and',
):
TypedList.__init__(self,
items=items,
)
self._logical_operator = logical_operator
### PRIVATE PROPERTIES ###
@property
def _item_coercer(self):
from abjad.tools import timespantools
def coerce_(expr):
if isinstance(expr, str):
return timespantools.SimpleInequality(expr)
elif isinstance(expr, timespantools.SimpleInequality):
return expr
elif isinstance(expr, timespantools.CompoundInequality):
return expr
else:
raise TypeError(expr)
return coerce_
### PUBLIC PROPERTIES ###
@property
def logical_operator(self):
r'''Compound inequality logical operator.
'''
return self._logical_operator
### PUBLIC METHODS ###
def evaluate(
self,
timespan_1_start_offset,
timespan_1_stop_offset,
timespan_2_start_offset,
timespan_2_stop_offset,
):
r'''Evalutes compound inequality.
Returns boolean.
'''
from abjad.tools import timespantools
truth_values = []
for inequality in self:
# TODO: compress the following two branches
if isinstance(inequality, timespantools.SimpleInequality):
truth_value = inequality.evaluate(
timespan_1_start_offset, timespan_1_stop_offset,
timespan_2_start_offset, timespan_2_stop_offset)
truth_values.append(truth_value)
elif isinstance(inequality, type(self)):
truth_value = inequality.evaluate(
timespan_1_start_offset, timespan_1_stop_offset,
timespan_2_start_offset, timespan_2_stop_offset)
truth_values.append(truth_value)
if self.logical_operator == 'and':
truth_value = all(truth_values)
elif self.logical_operator == 'or':
truth_value = any(truth_values)
elif self.logical_operator == 'xor':
truth_value = bool(len([x for x in truth_values if x]) == 1)
else:
message = 'unknown logical operator: {!r}.'
message = message.format(self.logical_operator)
raise ValueError(message)
return truth_value
def evaluate_offset_inequality(
self,
timespan_start,
timespan_stop,
offset,
):
r'''Evalutes offset inequality.
Returns boolean.
'''
from abjad.tools import timespantools
truth_values = []
for inequality in self:
if isinstance(inequality, timespantools.SimpleInequality):
truth_value = inequality.evaluate_offset_inequality(
timespan_start, timespan_stop, offset)
truth_values.append(truth_value)
elif isinstance(inequality, type(self)):
truth_value = inequality.evaluate_offset_inequality(
timespan_start, timespan_stop, offset)
truth_values.append(truth_value)
else:
message = 'unknown inequality: {!r}.'
message = message.format(inequality)
raise TypeError(message)
assert truth_values, repr(truth_values)
if self.logical_operator == 'and':
truth_value = all(truth_values)
elif self.logical_operator == 'or':
truth_value = any(truth_values)
elif self.logical_operator == 'xor':
truth_value = bool(len([x for x in truth_values if x]) == 1)
else:
message = 'unknown logical operator: {!r}.'
message = message.format(self.logical_operator)
raise ValueError(message)
return truth_value
def get_offset_indices(
self,
timespan_1,
timespan_2_start_offsets,
timespan_2_stop_offsets,
):
r'''Gets offset indices of compound inequality.
'''
| from abjad.tools import timespantools
from abjad.tools import timespantools
timespans = timespantools.TimespanInventory()
for element in self:
# TODO: compress the following two branches
if isinstance(element, type(self)):
result = element.get_offset_indices(
timespan_1,
| timespan_2_start_offsets,
timespan_2_stop_offsets)
timespans.extend(result)
elif isinstance(element, timespantools.SimpleInequality):
offset_indices = element.get_offset_indices(
timespan_1,
timespan_2_start_offsets,
timespan_2_stop_offsets)
timespan = timespantools.Timespan(*offset_indices)
timespans.append(timespan)
else:
message = 'unknown inequality: {!r}.'
message = message(element)
raise TypeError(message)
if self.logical_operator == 'and':
result = timespans.compute_logical_and()
elif self.logical_operator == 'or':
timespans.sort()
result = timespans.compute_logical_or()
elif self.logical_operator == 'xor':
result = timespans.compute_logical_xor()
else:
message = 'unknown logical operator: {!r}.'
message = mesage.format(self.logical_operator)
raise ValueError(message)
return result |
from mosek.fusion import Model, Domain, Expr, ObjectiveSense
import sys
# TODO: need a way to determine if we're adding something in front of an
# existing clique, or intersecting with it, etc.
# Example 2. Column generation approach.
# Iteration 2, subproblem.
# Output:
#
# Images:
# w_1 = 1
# w_2 = 1
# w_3 = 1
#
# Commands:
# y_a = 0
# y_b = 1
# y_c = 0
# y_d = 0
#
# Interactions:
# m_2 = 0
# m_3 = 0
# n_b = 1
# n_c = 0
# n_d = 0
r = {'A': 5.0, 'B': 10.0, 'C': 7.0, 'D': 12.0}
m = Model()
binary = (Domain.inRange(0.0, 1.0), Domain.isInteger())
# Variables to determine if we include commands in the clique.
y_a = m.variable('y_a', *binary)
y_b = m.variable('y_b', *binary)
y_c = m.variable('y_c', *binary)
y_d = m.variable('y_d', *binary)
# Variables to determine if we include images in the clique.
w_1 = m.variable('w_1', *binary)
w_2 = m.variable('w_2', *binary)
w_3 = m.variable('w_3', *binary)
# Variables to enforce relationships between y and w decisions.
z_1_a = m.variable('z_1_a', *binary)
z_1_b = m.variable('z_1_b', *binary)
z_2_a = m.variable('z_2_a', *binary)
z_2_b = m.variable('z_2_b', *binary)
z_2_c = m.variable('z_2_c', *binary)
z_2_d = m.variable('z_2_d', *binary)
z_3_b = m.variable('z_3_b', *binary)
z_3_c = m.variable('z_3_c', *binary)
z_3_d = m.variable('z_3_d', *binary)
# Variables to take images or commands from another clique.
m_1 = m.variable('m_1', *binary)
m_2 = m.variable('m_2', *binary)
m_3 = m.variable('m_3', *binary)
n_b = m.variable('n_b', *binary)
n_c = m.variable('n_c', *binary)
n_d = m.variable('n_d', *binary)
# If something is taken out of a clique, it must either be put in the new
# clique or incur its own cost.
q_1_b = m.variable('q_1_b', *binary)
q_2_b = m.variable('q_2_b', *binary)
q_2_c = m.variable('q_2_c', *binary)
q_2_d = m.variable('q_2_d', *binary)
q_3_b = m.variable('q_3_b', *binary)
q_3_c = m.variable('q_3_c', *binary)
q_3_d = m.variable('q_3_d', *binary)
# Inclusion of an image and a command means that image must
# use all command invocation from the clique.
# For instance:
# (1) z_1_a <= w_1
# (2) z_1_a <= y_a
# (3) z_1_a >= w_1 + y_a - 1
m.constraint('c_1_a_1', Expr.sub(z_1_a, w_1), Domain.lessThan(0.0))
m.constraint('c_1_a_2', Expr.sub(z_1_a, y_a), Domain.lessThan(0.0))
m.constraint('c_1_a_3', Expr.sub(z_1_a, Expr.add([w_1, y_a])), Domain.greaterThan(-1.0))
m.constraint('c_1_b_1', Expr.sub(z_1_b, w_1), Domain.lessThan(0.0))
m.constraint('c_1_b_2', Expr.sub(z_1_b, y_b), Domain.lessThan(0.0))
m.constraint('c_1_b_3', Expr.sub(z_1_b, Expr.add([w_1, y_b])), Domain.greaterThan(-1.0))
m.constraint('c_1_c', Expr.sub(0.0, Expr.add([w_1, y_c])), Domain.greaterThan(-1.0))
m.constraint('c_1_d', Expr.sub(0.0, Expr.add([w_1, y_d])), Domai | n.greaterThan(-1.0))
m.constraint('c_2_a_1', Expr.sub(z_2_a, w_2), Domain.lessThan(0.0))
m.constraint('c_2_a_2', Expr.sub(z_2_a, y_a), Domain.lessThan(0.0))
m.constraint('c_2_a_3', Expr.sub(z_2_a, Expr.add([w_2, y_a])), Domain.greaterThan(-1.0))
m.constraint('c_2_b_1', Expr.sub(z_2_b, w_2), Domain.lessThan(0.0))
m.constraint('c_2_b_2', Expr.sub(z_2_b, y_b), Domain.lessThan(0.0))
m.constraint('c_2_b_3', Expr.sub(z_2_b, Expr.add([w_2, y_b])), Domain.greaterThan(- | 1.0))
m.constraint('c_2_c_1', Expr.sub(z_2_c, w_2), Domain.lessThan(0.0))
m.constraint('c_2_c_2', Expr.sub(z_2_c, y_c), Domain.lessThan(0.0))
m.constraint('c_2_c_3', Expr.sub(z_2_c, Expr.add([w_2, y_c])), Domain.greaterThan(-1.0))
m.constraint('c_2_d_1', Expr.sub(z_2_d, w_2), Domain.lessThan(0.0))
m.constraint('c_2_d_2', Expr.sub(z_2_d, y_d), Domain.lessThan(0.0))
m.constraint('c_2_d_3', Expr.sub(z_2_d, Expr.add([w_2, y_d])), Domain.greaterThan(-1.0))
m.constraint('c_3_a', Expr.sub(0.0, Expr.add([w_3, y_a])), Domain.greaterThan(-1.0))
m.constraint('c_3_b_1', Expr.sub(z_3_b, w_3), Domain.lessThan(0.0))
m.constraint('c_3_b_2', Expr.sub(z_3_b, y_b), Domain.lessThan(0.0))
m.constraint('c_3_b_3', Expr.sub(z_3_b, Expr.add([w_3, y_b])), Domain.greaterThan(-1.0))
m.constraint('c_3_c_1', Expr.sub(z_3_c, w_3), Domain.lessThan(0.0))
m.constraint('c_3_c_2', Expr.sub(z_3_c, y_c), Domain.lessThan(0.0))
m.constraint('c_3_c_3', Expr.sub(z_3_c, Expr.add([w_3, y_c])), Domain.greaterThan(-1.0))
m.constraint('c_3_d_1', Expr.sub(z_3_d, w_3), Domain.lessThan(0.0))
m.constraint('c_3_d_2', Expr.sub(z_3_d, y_d), Domain.lessThan(0.0))
m.constraint('c_3_d_3', Expr.sub(z_3_d, Expr.add([w_3, y_d])), Domain.greaterThan(-1.0))
# Taking something from an existing clique means we must incur it cost.
m.constraint('q_1_b', Expr.sub(z_1_b, Expr.add([m_1, n_b])), Domain.lessThan(0.0))
m.constraint('d_1_b_q_1_b', Expr.sub(Expr.add([z_1_b, q_1_b]), Expr.add([m_1, n_b])), Domain.greaterThan(0.0))
m.constraint('d_2_b', Expr.sub(z_2_b, Expr.add([m_2, n_b])), Domain.lessThan(0.0))
m.constraint('d_2_c', Expr.sub(z_2_c, Expr.add([m_2, n_c])), Domain.lessThan(0.0))
m.constraint('d_2_d', Expr.sub(z_2_d, Expr.add([m_2, n_d])), Domain.lessThan(0.0))
m.constraint('d_2_b_q_2_b', Expr.sub(Expr.add([z_2_b, q_2_b]), Expr.add([m_2, n_b])), Domain.greaterThan(0.0))
m.constraint('d_2_c_q_2_c', Expr.sub(Expr.add([z_2_c, q_2_c]), Expr.add([m_2, n_c])), Domain.greaterThan(0.0))
m.constraint('d_2_d_q_2_d', Expr.sub(Expr.add([z_2_d, q_2_d]), Expr.add([m_2, n_d])), Domain.greaterThan(0.0))
m.constraint('d_3_b', Expr.sub(z_3_b, Expr.add([m_3, n_b])), Domain.lessThan(0.0))
m.constraint('d_3_c', Expr.sub(z_3_c, Expr.add([m_3, n_c])), Domain.lessThan(0.0))
m.constraint('d_3_d', Expr.sub(z_3_d, Expr.add([m_3, n_d])), Domain.lessThan(0.0))
m.constraint('d_3_b_q_3_b', Expr.sub(Expr.add([z_3_b, q_3_b]), Expr.add([m_3, n_b])), Domain.greaterThan(0.0))
m.constraint('d_3_c_q_3_c', Expr.sub(Expr.add([z_3_c, q_3_c]), Expr.add([m_3, n_c])), Domain.greaterThan(0.0))
m.constraint('d_3_d_q_3_d', Expr.sub(Expr.add([z_3_d, q_3_d]), Expr.add([m_3, n_d])), Domain.greaterThan(0.0))
# Maximize the amount we can improve our objective by adding a new clique.
obj1 = [Expr.mul(c, y) for c, y in [
(r['A'], y_a), (r['B'], y_b), (r['C'], y_c), (r['D'], y_d)
]]
obj2 = [Expr.mul(c, z) for c, z in [
# Individual image/command pairs
(r['A'], z_1_a),
(r['A'], z_2_a),
]]
obj3 = [Expr.mul(c, z) for c, z in [
# Individual image/command pairs for commands that are now run alone
(r['B'], q_1_b),
(r['B'], q_2_b), (r['C'], q_2_c), (r['D'], q_2_d),
(r['B'], q_3_b), (r['C'], q_3_c), (r['D'], q_3_d),
]]
obj4 = [Expr.mul(c, y) for c, y in [
# Commands taken out of the existing cliques
(r['B'], n_b), (r['C'], n_c), (r['D'], n_d)
]]
m.objective('w', ObjectiveSense.Maximize,
Expr.sub(Expr.add(obj2 + obj4), Expr.add(obj1 + obj3))
)
m.setLogHandler(sys.stdout)
m.solve()
print
print 'Images:'
print '\tw_1 = %.0f' % w_1.level()[0]
print '\tw_2 = %.0f' % w_2.level()[0]
print '\tw_3 = %.0f' % w_3.level()[0]
print
print 'Commands:'
print '\ty_a = %.0f' % y_a.level()[0]
print '\ty_b = %.0f' % y_b.level()[0]
print '\ty_c = %.0f' % y_c.level()[0]
print '\ty_d = %.0f' % y_d.level()[0]
print
print 'Interactions:'
print '\tm_1 = %.0f' % m_1.level()[0]
print '\tm_2 = %.0f' % m_2.level()[0]
print '\tm_3 = %.0f' % m_3.level()[0]
print '\tn_b = %.0f' % n_b.level()[0]
print '\tn_c = %.0f' % n_c.level()[0]
print '\tn_d = %.0f' % n_d.level()[0]
print
|
# -*- coding: utf-8 -*-
'''
Management of LVS (Linux Virtual Server) Real Server
====================================================
'''
def __virtual__():
'''
Only load if the lvs module is available in __salt__
'''
return 'lvs_server' if 'lvs.get_rules' in __salt__ else False
def present(name,
protocol=None,
service_address=None,
server_address=None,
packet_forward_method='dr',
weight=1
):
'''
Ensure that the named service is present.
name
The LVS server name
protocol
The service protocol
service_address
The LVS service address
server_address
The real server address.
packet_forward_method
The LVS packet forwarding method(``dr`` for direct routing, ``tunnel`` for tunneling, ``nat`` for network access translation).
weight
The capacity of a server relative to the others in the pool.
.. code-block:: yaml
lvsrs:
lvs_server.present:
- protocol: tcp
- service_address: 1.1.1.1:80
- server_address: 192.168.0.11:8080
- packet_forward_method: dr
- weight: 10
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check server
server_check = __salt__['lvs.check_server'](protocol=protocol,
service_address=service_address,
server_address=server_address)
if server_check is True:
server_rule_check = __salt__['lvs.check_server'](protocol=protocol,
service_address=service_address,
server_address=server_address,
packet_forward_method=packet_forward_method,
weight=weight)
if server_rule_check is True:
ret['comment'] = 'LVS Server {0} in service {1}({2}) is present'.format(name, service_address, protocol)
return ret
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'LVS Server {0} in service {1}({2}) is present but some options should up | date'.format(name, service_address, protocol)
return ret
else:
server_edit = __salt__['lvs.edit_server'](protocol=protocol,
service_address=service_address,
server_address=server_address,
packe | t_forward_method=packet_forward_method,
weight=weight)
if server_edit is True:
ret['comment'] = 'LVS Server {0} in service {1}({2}) has been updated'.format(name, service_address, protocol)
ret['changes'][name] = 'Update'
return ret
else:
ret['result'] = False
ret['comment'] = 'LVS Server {0} in service {1}({2}) update failed({3})'.format(name, service_address, protocol, server_edit)
return ret
else:
if __opts__['test']:
ret['comment'] = 'LVS Server {0} in service {1}({2}) is not present and needs to be created'.format(name, service_address, protocol)
ret['result'] = None
return ret
else:
server_add = __salt__['lvs.add_server'](protocol=protocol,
service_address=service_address,
server_address=server_address,
packet_forward_method=packet_forward_method,
weight=weight)
if server_add is True:
ret['comment'] = 'LVS Server {0} in service {1}({2}) has been created'.format(name, service_address, protocol)
ret['changes'][name] = 'Present'
return ret
else:
ret['comment'] = 'LVS Service {0} in service {1}({2}) create failed({3})'.format(name, service_address, protocol, server_add)
ret['result'] = False
return ret
def absent(name, protocol=None, service_address=None, server_address=None):
'''
Ensure the LVS Real Server in specified service is absent.
name
The name of the LVS server.
protocol
The service protocol(only support ``tcp``, ``udp`` and ``fwmark`` service).
service_address
The LVS service address.
server_address
The LVS real server address.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if server exists and remove it
server_check = __salt__['lvs.check_server'](protocol=protocol,
service_address=service_address,
server_address=server_address)
if server_check is True:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'LVS Server {0} in service {1}({2}) is present and needs to be removed'.format(name, service_address, protocol)
return ret
server_delete = __salt__['lvs.delete_server'](protocol=protocol,
service_address=service_address,
server_address=server_address)
if server_delete is True:
ret['comment'] = 'LVS Server {0} in service {1}({2}) has been removed'.format(name, service_address, protocol)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'LVS Server {0} in service {1}({2}) removed failed({3})'.format(name, service_address, protocol, server_delete)
ret['result'] = False
return ret
else:
ret['comment'] = 'LVS Server {0} in service {1}({2}) is not present, so it cannot be removed'.format(name, service_address, protocol)
return ret
|
from ete3 import Tree, TreeStyle, Tree, TextFace, NodeStyle, add_face_to_node
import sys
from collections import OrderedDict
import os
# build list of filenames
filenames = []
for d in os.listdir('out/'):
filenames.append('out/' + d + '/')
# roman numeral thing just incase things get crowded...
def write_roman(num):
roman = OrderedDict()
roman[1000] = "M"
roman[900] = "CM"
roman[500] = "D"
roman[400] = "CD"
roman[100] = "C"
roman[90] = "XC"
roman[ | 50] = "L"
r | oman[40] = "XL"
roman[10] = "X"
roman[9] = "IX"
roman[5] = "V"
roman[4] = "IV"
roman[1] = "I"
def roman_num(num):
for r in roman.keys():
x, y = divmod(num, r)
yield roman[r] * x
num -= (r * x)
if num > 0:
roman_num(num)
else:
break
return "".join([a for a in roman_num(num)])
for filename in filenames:
if not os.path.exists(filename + 'tree_map.png'):
t_str = open(filename + 'tree_map.html', 'r').read()
t = Tree(t_str, format=1)
ts = TreeStyle()
ts.show_leaf_name = False
ts.mode = "c"
ts.show_scale = False
ts.optimal_scale_level = "full"
#ts.arc_start = -180 # 0 degrees = 3 o'clock
#ts.arc_span = 180
def my_layout(node):
F = TextFace(node.name, tight_text=False)
F.margin_top = 1
F.margin_right = 5
F.margin_left = 5
add_face_to_node(F, node, column=0, position="branch-bottom")
ts.layout_fn = my_layout
nstyle = NodeStyle()
nstyle["size"] = 15
nstyle["hz_line_width"] = 2
nstyle["vt_line_width"] = 2
i = 1
for n in t.traverse():
n.set_style(nstyle)
#ts.legend.add_face(TextFace(write_roman(i).lower() + ". "), column=0)
#ts.legend.add_face(TextFace(n.name), column=1)
#n.name = write_roman(i).lower()
i += 1
#t.render("mytree.pdf", w=8.5, units="in", tree_style=ts)
t.render(os.path.split(filename)[0] + "/mytree.png", h=1080, units="px", tree_style=ts)
else:
print("skipping " + os.path.split(filename)[0])
|
"""
WSGI config for test_proj project.
It exposes the WSGI callable as a module-level variable named ``ap | plication``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_proj.settings")
application = get_wsgi_application()
| |
upport:manage_user_detail",
)
def test_require_login(self, url_name):
url = reverse(url_name)
# Log out then try to retrieve the page
self.client.logout()
response = self.client.get(url)
# Expect a redirect to the login page
redirect_url = "{login_url}?next={original_url}".format(
login_url=reverse("signin_user"),
original_url=url,
)
self.assertRedirects(response, redirect_url)
class SupportViewIndexTests(SupportViewTestCase):
"""
Tests for the support index view.
"""
EXPECTED_URL_NAMES = [
"support:certificates",
"support:refund",
]
def setUp(self):
"""Make the user support staff. """
super(SupportViewIndexTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_index(self):
response = self.client.get(reverse("support:index"))
self.assertContains(response, "Support")
# Check that all the expected links appear on the index page.
for url_name in self.EXPECTED_URL_NAMES:
self.assertContains(response, reverse(url_name))
class SupportViewCertificatesTests(SupportViewTestCase):
"""
Tests for the certificates support view.
"""
def setUp(self):
"""Make the user support staff. """
super(SupportViewCertificatesTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_certificates_no_filter(self):
# Check that an empty initial filter is passed to the JavaScript client correctly.
response = self.client.get(reverse("support:certificates"))
self.assertContains(response, "userFilter: ''")
def test_certificates_with_user_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com"
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
def test_certificates_along_with_course_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com&course_id=" + unicode(self.course.id)
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
self.assertContains(response, "courseFilter: '" + unicode(self.course.id) + "'")
@ddt.ddt
class SupportViewEnrollmentsTests(SharedModuleStoreTestCase, SupportViewTestCase):
"""Tests for the enrollment support view."""
def setUp(self):
super(SupportViewEnrollmentsTests, self).setUp()
SupportStaffRole().add_users(self.user)
self.course = CourseFactory(display_name=u'teꜱᴛ')
self.student = UserFactory.create(username='student', email='test@example.com', password='test')
for mode in (
CourseMode.AUDIT, CourseMode.PROFESSIONAL, CourseMode.CREDIT_MODE,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.VERIFIED, CourseMode.HONOR
):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id) # pylint: disable=no-member
self.verification_deadline = VerificationDeadline(
course_key=self.course.id, # pylint: disable=no-member
deadline=datetime.now(UTC) + timedelta(days=365)
)
self.verification_deadline.save()
CourseEnrollmentFactory.create(mode=CourseMode.AUDIT, user=self.student, course_id=self.course.id) # pylint: disable=no-member
self.url = reverse('support:enrollment_list', kwargs={'username_or_email': self.student.username})
def assert_enrollment(self, mode):
"""
Assert that the student's enrollment has the correct mode.
"""
enrollment = CourseEnrollment.get_enrollment(self.student, self.course.id) # pylint: disable=no-member
self.assertEqual(enrollment.mode, mode)
@ddt.data('username', 'email')
def test_get_enrollments(self, search_string_type):
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertDictContainsSubset({
'mode': CourseMode.AUDIT,
'manual_enrollment': {},
'user': self.student.username,
'course_id': unicode(self.course.id), # pylint: disable=no-member
'is_active': True,
'verified_upgrade_deadline': None,
}, data[0])
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.HONOR,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.PROFESSIONAL},
{mode['slug'] for mode in data[0]['course_modes']}
)
def test_get_manual_enrollment_history(self):
ManualEnrollmentAudit.create_manual_enrollment_audit(
self.user,
self.student.email,
ENROLLED_TO_ENROLLED,
'Financial Assistance',
CourseEnrollment.objects.get(course_id=self.course.id, user=self.student) # pylint: disable=no-member
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset({
'enrolled_by': self.user.email,
'reason': 'Financial Assistance',
}, json.loads(response.content)[0]['manual_enrollment'])
@disable_signal(signals, 'post_save')
@ddt.data('username', 'email')
def test_change_enrollment(self, search_string_type):
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': 'Financial Assistance'
})
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(CourseMode.VERIFIED)
@ddt.data(
({}, r"The field \"'\w+'\" is required."), # The double quoting goes away in Django 2.0.1
({'course_id': 'bad course key'}, 'Could not parse course key.'),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'r | eason': ''
}, 'Could not find enrollment for user'),
({
'course_id': None,
'old_mode': CourseMode.HONOR,
'new_mode': CourseMode.VERIFIED,
'reason': ''
}, r'User \w+ is not enrolled with mode ' + CourseMode.HONOR),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.CREDIT_MODE,
| 'reason': 'Enrollment cannot be changed to credit mode'
}, '')
)
@ddt.unpack
def test_change_enrollment_bad_data(self, data, error_message):
# `self` isn't available from within the DDT declaration, so
# assign the course ID here
if 'course_id' in data and data['course_id'] is None:
data['course_id'] = unicode(self.course.id) # pylint: disable=no-member
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 400)
self.assertIsNotNone(re.match(error_message, response.content))
self.assert_enrollment(CourseMode.AUDIT)
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
@disable_signal(signals, 'post_save')
@ddt.data('honor', 'audit', 'verified', 'professional', ' |
from django.db.models.query_utils import DeferredAttribute
from django.forms.models import model_to_dict
from ..backend import utils
class UpdateScriptsMixin(object):
pass
class WooeyPy2Mixin(object):
def __unicode__(self):
return unicode(self.__str__())
# from
# http://stackoverflow.com/questions/1355150/django-when-saving-how-can-you-check-if-a-field-has-changed
class ModelDiffMixin(object):
"""
A model mixin that tracks model fields' values and provide some useful api
to know what fields have been changed.
| """
def __init__(self, *args, **kwargs):
super(ModelDiffMixin, self).__init__(*args, **kwargs)
self.__initial = self._dict
@property
def diff(self):
d1 = self.__initial
d2 = self._dict
diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]]
return dict(diffs)
@property
def has_changed(self):
return bool(self.diff)
@property
def changed_fields(self):
return self.diff | .keys()
def get_field_diff(self, field_name):
"""
Returns a diff for field if it's changed and None otherwise.
"""
return self.diff.get(field_name, None)
def save(self, *args, **kwargs):
"""
Saves model and set initial state.
"""
super(ModelDiffMixin, self).save(*args, **kwargs)
self.__initial = self._dict
@property
def _dict(self):
exclude = self.get_deferred_fields()
return model_to_dict(self, fields=[field.name for field in
self._meta.fields], exclude=exclude)
|
# MajorMajor - Collaborative Document Editing Library
# Copyright (C) 2013 Ritchie Wilson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from majormajor.document import Document
from majormajor.ops.op import Op
from majormajor.changeset import Changeset
class TestDocumentMissingChangesets:
def test_missing_changesets(self):
doc = Document(snapshot='')
doc.HAS_EVENT_LOOP = False
assert doc.missing_changesets == s | et([] | )
assert doc.pending_new_changesets == []
root = doc.get_root_changeset()
A = Changeset(doc.get_id(), "dummyuser", [root])
doc.receive_changeset(A)
assert doc.missing_changesets == set([])
assert doc.pending_new_changesets == []
# Just one Changeset gets put in pending list
B = Changeset(doc.get_id(), "user1", ["C"])
B.set_id("B")
doc.receive_changeset(B)
assert doc.get_ordered_changesets() == [root, A]
assert doc.missing_changesets == set(["C"])
assert doc.pending_new_changesets == [B]
C = Changeset(doc.get_id(), "user1", [A])
C.set_id("C")
doc.receive_changeset(C)
assert doc.missing_changesets == set([])
assert doc.pending_new_changesets == []
assert B.get_parents() == [C]
assert doc.get_ordered_changesets() == [root, A, C, B]
# Now a string of changesets put on pending list
D = Changeset(doc.get_id(), "user1", ["G"])
D.set_id("D")
doc.receive_changeset(D)
assert doc.missing_changesets == set(["G"])
assert doc.pending_new_changesets == [D]
assert doc.get_ordered_changesets() == [root, A, C, B]
E = Changeset(doc.get_id(), "user1", ["D"])
E.set_id("E")
doc.receive_changeset(E)
assert E.get_parents() == [D]
assert doc.missing_changesets == set(["G"])
assert doc.pending_new_changesets == [D, E]
assert doc.get_ordered_changesets() == [root, A, C, B]
F = Changeset(doc.get_id(), "user1", ["E"])
F.set_id("F")
doc.receive_changeset(F)
assert doc.missing_changesets ==set( ["G"])
assert doc.pending_new_changesets == [D, E, F]
assert doc.get_ordered_changesets() == [root, A, C, B]
G = Changeset(doc.get_id(), "user1", ["C"])
G.set_id("G")
doc.receive_changeset(G)
assert doc.missing_changesets == set([])
assert doc.pending_new_changesets == []
assert doc.get_ordered_changesets() == [root, A, C, B, G, D, E, F]
assert doc.get_ordered_changesets() == doc.tree_to_list()
|
# -*- coding: utf-8 -*-
from __future__ import u | nicode_literals
from django.apps import AppConfig
class Lo | CatrConfig(AppConfig):
name = 'LoCatr'
|
from validx import Dict, List, Tuple
import typing
from .protos import DataObject
from .utils.validator import UInt8 |
from .utils.types import Color, Remapping
class Palette(DataObject):
__slots__ = ("data",)
schema = Dict({"data": List(Tuple(UInt8, UInt8, UInt8))})
def __init__(self):
self.data: typing.List[Color] = [(0, 0, 0) for _ in range(256)]
def remap(self, remapping: Remapping) -> "Palette":
pal = Palette()
p | al.data = [self.data[r] for r in remapping]
return pal
@staticmethod
def _read_one(parser) -> Color:
r = parser.get_uint8()
g = parser.get_uint8()
b = parser.get_uint8()
r_8 = int((r * 255) / 63.0)
g_8 = int((g * 255) / 63.0)
b_8 = int((b * 255) / 63.0)
return r_8, g_8, b_8
def read_range(self, parser, start: int, length: int):
for m in range(start, start + length):
self.data[m] = self._read_one(parser)
return self
def read(self, parser):
self.data.clear()
for m in range(0, 256):
self.data.append(self._read_one(parser))
return self
@staticmethod
def _write_one(parser, c: Color) -> None:
parser.put_uint8((c[0] & 0xFF) >> 2)
parser.put_uint8((c[1] & 0xFF) >> 2)
parser.put_uint8((c[2] & 0xFF) >> 2)
def write_range(self, parser, start: int, length: int):
for m in range(start, start + length):
self._write_one(parser, self.data[m])
def write(self, parser):
for m in range(0, 256):
self._write_one(parser, self.data[m])
def serialize(self) -> dict:
return {
"data": self.data,
}
def unserialize(self, data: dict):
self.data = data["data"]
return self
|
try:
import unittest.mock as mock
except ImportError:
import mock
import subprocess
import dags.utils.helpers as helpers
from dags.operators.postgres_to_s3_transfer import PostgresToS3Transfer
class TestPostgresToS3Transfer(object):
def test_its_created_successfully(self):
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
assert operator
assert operator.task_id == 'task_id'
@mock.patch('subprocess.Popen')
@mock.patch('boto3.resource', autospec=True)
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_execute_streams_url_data_to_s3(self, get_connection_mock, boto3_mock, popen_mock):
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
| s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
operator.execute(None)
boto3_mock().Bucket.assert_called_with('bucket')
boto3_mock().Bucket().upload_fileobj.assert_called_with(
popen | _mock().stdout.__enter__(), # Needs __enter__() because it's called in a context manager
'key'
)
@mock.patch('subprocess.Popen')
@mock.patch('boto3.resource', autospec=True)
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_execute_calls_pg_dump_correctly(self, get_connection_mock, boto3_mock, popen_mock):
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
operator.execute(None)
expected_command = [
'pg_dump',
'-Fc',
helpers.get_postgres_uri(operator.postgres_conn_id),
]
popen_mock.assert_called_with(expected_command, stdout=subprocess.PIPE)
@mock.patch('subprocess.Popen')
@mock.patch('boto3.resource', autospec=True)
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_execute_dumps_only_whitelisted_tables(self, get_connection_mock, boto3_mock, popen_mock):
tables = [
'users',
'log',
]
operator = PostgresToS3Transfer(
task_id='task_id',
postgres_conn_id='postgres_conn_id',
tables=tables,
s3_conn_id='s3_conn_id',
s3_url='s3://bucket/key'
)
operator.execute(None)
popen_command = popen_mock.call_args[0][0]
# Ignore executable and the Postgres URI, as the params need to be
# between these two
pg_dump_params_without_uri = popen_command[1:-1]
for table in tables:
assert '--table={}'.format(table) in pg_dump_params_without_uri
|
"""Utility Class used to report timings """
import time
class Timings(object):
def __init__(self):
"""Key of data dictionary is the name for timings, values are a tuple which is (n,t,st) where
n is the number of calls, and t the cumulative time it took, and st the status ('finished',STARTTIME)"""
self.reset()
self._last = None #remember which activity we started to measure last.
def reset(self):
self.data = {}
self.creationtime = time.time()
def start(self,name):
if name in self.data.keys():
assert self.data[name][2]=='finished',"Seems a measurement for '%s' has started already?" % name
self.data[name][2]=time.time()
else:
self.data[name]=[0,0.,time.time()]
self._last = name
def stop(self,name):
assert name in self.data.keys(),"name '%s' not known. Known values: %s" % self.data.keys()
assert self.data[name][2] != 'finished',"No measurement started for name '%s'" % name
timetaken = time.time()-self.data[name][2]
#print 'time taken for name "%s"=%g s' % (name,timetaken)
self.data[name][0] += 1
self.data[name][1] += timetaken
self.data[name][2] = 'finished'
self._last = None
def stoplast(self):
"""Stop the last measurement at this point."""
assert self._last != None
self.stop(self._last)
def startnext(self,name):
"""Will stop whatever measurement has been started most recently, and start the
next one with name 'name'."""
if self._last:
self.stop(self._last)
self.start(name)
def getncalls(self,name):
return self.data[name][0]
def gettime(self,name):
return self.data[name][1]
def report_str(self,n=10):
"""Lists the n items that took the longest time to execute."""
msg = "Timings summary, longest items first:\n"
#print in descending order of time taken
sorted_keys = sorted(self.data.keys(),key=lambda x:self.data[x][1],reverse=True)
for name in sorted_keys:
if self.data[name][0]>0:
msg + | = "%25s:%6d calls took %10.4fs (%8.6fs per call)\n" % (name[0:25],
self.g | etncalls(name),
self.gettime(name),
self.gettime(name)\
/float(self.getncalls(name))
)
else:
msg = "Timings %s: none completed\n" % name
recorded_sum= self.recorded_sum()
walltime = time.time()-self.creationtime
msg+="Wall time: %.4gs (sum of time recorded: %gs=%5.1f%%)\n" % \
(walltime,recorded_sum,recorded_sum/walltime*100.)
return msg
def __str__(self):
return self.report_str()
def recorded_sum(self):
return sum([ self.data[name][1] for name in self.data.keys()])
timings=Timings()
if __name__=="__main__":
#create global object that can be shared
t=Timings()
for x in xrange(20000):
t.start("test-one")
t.stop("test-one")
print t
|
## Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass | Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""SysV IPC related information
"""
plugin_name = "sysvipc"
def setup(self):
self.add_copy_specs([
"/proc/sysvipc/msg",
"/proc/sysvipc/sem",
"/proc/sysvipc/shm"
])
self.add_cmd_output("ipcs")
# vim: et ts= | 4 sw=4
|
import random
def random_data(points=50, maximum=100):
| return [random.random() * maximum for a in xrange(points)]
def random_colour(min=20, max=200):
func = lambda: int(random.random() * (max-min) + min)
r, g, b = func(), func(), func()
return '%02X%02X%02X' % (r, | g, b)
|
"""This test checks that Nevergrad is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.nevergrad import NevergradSearch
def easy_objective(config, reporter):
import time
time.sleep(0.2)
for i in range(config["iterations"]):
reporter(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
from nevergrad.optimization import optimizerlib
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
config = {
"num_samples": 10 if args.smoke_test else 50,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
}
}
instrumentation = 2
parameter_names = ["height", "width"]
# With nevergrad v0.2.0+ the following is also possible:
# from nevergrad import instrumentation as inst
# instrumentation = inst.Instrumentation(
# height=inst.var.Array(1).bounded(0, 200).asfloat(),
# width=inst.var.OrderedDiscrete([0, 10, 20, 30, 40, 50]))
# parameter_names = None # names are provided by the instrumentation
optimizer = optimizerlib.OnePlusOn | e(instrumentation)
algo = NevergradSearch(
optimizer,
parameter_names,
max_concurrent=4,
metric="mean_loss",
mode="min")
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective,
name="nevergrad",
search_alg=algo,
scheduler=sched | uler,
**config)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
| url(r'^list/$', views.list, name='list'),
| url(r'^search/$', views.search, name='search'),
url(r'^add/$', views.add, name='add'),
url(r'^restaurant/(?P<id>[0-9]+)$', views.restaurant, name='restaurant'),
url(r'^images/(?P<id>[0-9]+)$', views.show_image, name='show_image')
]
|
__author__ = 'Joel Akeret'
__version__ = '0.1.2'
__credits__ = 'E | TH Zurich, Institute fo | r Astronomy'
|
import subprocess
import os
INPUT_FILE = "/tmp/input"
ERROR_ | LOG = "/tmp/error.log"
RESULTS_FILE = "/tmp/results"
def get_command_from_first_line(line):
if line.startswith("#!"):
return line[2:]
return line
def get_program_output_from_buffer_contents(buffer_contents):
write_buffer_contents_to_file(INPUT_FILE, buffer_contents[1:])
command = get_command_from_first_line(buffer_contents[0])
execute_file_with_specified_shell_program(command)
errors = read_file_lines(ERROR_LOG)
std_out = read_file_lines(RESULTS_FILE)
new | _buf = errors + std_out
return new_buf
def write_buffer_contents_to_file(file_name, contents):
with open(file_name, "w") as f:
for line in contents:
f.write(line + "\n")
def execute_file_with_specified_shell_program(shell_command):
try:
subprocess.check_call("{0} {1} {2} > {3} 2> {4}".format(
shell_command,
redirect_or_arg(shell_command),
INPUT_FILE,
RESULTS_FILE,
ERROR_LOG),
shell=True
)
except:
pass
def redirect_or_arg(shell_command):
redirect_or_agr = "<"
if shell_command == "coffee":
redirect_or_agr = ""
return redirect_or_agr
def read_file_lines(file_to_read):
if os.path.isfile(file_to_read):
with open(file_to_read, "r") as f:
return [l.rstrip('\n') for l in f.readlines()]
|
import lmdb
from zounds.nputil import Growable, packed_hamming_distance
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import cpu_count
import os
import binascii
class HammingDb(object):
def __init__(self, path, map_size=1000000000, code_size=8, writeonly=False):
super(HammingDb, self).__init__()
self.writeonly = writeonly
if not os.path.exists(path):
os.makedirs(path)
self.path = path
self.env = lmdb.open(
self.path,
max_dbs=10,
map_size=map_size,
writemap=True,
map_async=True,
metasync=True)
self.env.reader_check()
self.metadata = self.env.open_db(b'metadata')
try:
self.code_size = int(self.get_metadata(b'codesize'))
if code_size and code_size != self.code_size:
raise ValueError(
'Database is already initialized with code size {code_size}'
', but {self.code_size} was passed to __init__'
.format(**locals()))
except TypeError:
if code_size is None:
raise ValueError(
'You must supply a code size for an uninitialized database')
if code_size % 8:
raise ValueError('code_size must be a multiple of 8')
self.set_metadata(b'codesize', str(code_size).encode())
self.code_size = code_size
self.index = self.env.open_db(b'index')
self._append_buffer = self._recarray(1)
self._code_bytearray = bytearray(b'a' * self.code_size)
self._code_buffer = np.frombuffer(self._code_bytearray, dtype=np.uint64)
self._codes = None
self._ids = set()
self._catch_up_on_in_memory_store()
self._thread_count = cpu_count()
self._pool = ThreadPool(processes=self._thread_count)
def close(self):
self.env.close()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def set_metadata(self, key, value):
with self.env.begin(write=True) as txn:
txn.put(key, value, db=self.metadata)
def get_metadata(self, key):
with self.env.begin() as txn:
return txn.get(key, db=self.metadata)
def _catch_up_on_in_memory_store(self):
self._initialize_in_memory_store()
with self.env.begin() as txn:
cursor = txn.cursor(db=self.index)
for i, bundle in enumerate(cursor.iternext(keys=True, values=True)):
_id, value = bundle
if _id in self._ids:
continue
code = value[:self.code_size]
self._add_code(_id, code)
def __len__(self):
with self.env.begin() as txn:
lmdb_size = txn.stat(self.index)['entries']
if not lmdb_size:
return 0
return lmdb_size
def _recarray(self, size):
return np.recarray(
size,
dtype=[
('id', 'S32'),
('code', np.uint64, self.code_size // 8)],
order='F')
def _initialize_in_memory_store(self):
if self.writeonly:
return
if self._codes is not None:
return
initial_size = max(int(1e6), len(self))
self._codes = Growable(self._recarray(initial_size))
def _np_code(self, code):
self._code_bytearray[:] = code
return self._code_buffer
def _validate_code_size(self, code):
code_len = len(code)
if code_len != self.code_size:
fmt = '''code must be equal to code_size
({self.code_size}), but was {code_len}'''
raise ValueError(fmt.format(**locals()))
def _add_code(self, _id, code):
if self.writeonly:
return
arr = self._append_buffer
arr[0]['id'] = _id
arr[0]['code'] = self._np_code(code)
self._codes.append(arr)
self._ids.add(_id)
def _check_for_external_modifications(self):
if self.__len__() != self._codes.logical_size:
self._catch_up_on_in_memory_store()
def _new_id(self):
return binascii.hexlify(os.urandom(16))
def append(self, code, data):
self._validate_code_size(code)
self._initialize_in_memory_store()
with self.env.begin(write=True) as txn:
_id = self._new_id()
try:
code = code.encode()
except AttributeError:
pass
try:
data = data.encode()
except AttributeError:
pass
txn.put(_id, code + data, db=self.index)
self._add_code(_id, code)
def _random_code(self):
with self.env.begin() as txn:
with txn.cursor(self.index) as cursor:
code = None
while not code:
if cursor.set_range(self._new_id()):
return txn.get(
cursor.key(), db=self.index)[:self.code_size]
continue
def random_search(self, n_results, multithreaded=False, sort=False):
code = self._random_code()
return code, self.search(code, n_results, multithreaded, sort=sort)
def search(self, code, n_results, multithreaded=False, sort=False):
if self.writeonly:
error_msg = 'searches may not be performed in writeonly mode'
raise RuntimeError(error_msg)
self._validate_code_size(code)
self._check_for_external_modifications()
query = self._np_code(code)
codes = self._codes.logical_data['code']
if codes.ndim == 1:
codes = codes[..., None]
if not multithreaded:
scores = packed_hamming_distance(query, codes)
else:
n_codes = len(codes)
chunksize = max(1, n_codes // self._thread_count)
scores = np.concatenate(self._pool.map(
lambda x: packed_hamming_distance(query, | x),
(codes[i: i + chunksize] for i in
range(0, n_codes, chunksize))))
# argpartition will ensure that the lowest scores will all be
# withing the firs | t n_results elements, but makes no guarantees
# about the ordering *within* n_results
partitioned_indices = np.argpartition(scores, n_results)[:n_results]
if sort:
# since argpartition doesn't guarantee that the results are
# sorted *within* n_results, sort the much smaller result set
sorted_indices = np.argsort(scores[partitioned_indices])
indices = partitioned_indices[sorted_indices]
else:
# the partitioned indices are good enough. results will all be
# within some degree of similarity, but not necessarily in any
# particular order
indices = partitioned_indices
nearest = self._codes.logical_data[indices]['id']
with self.env.begin() as txn:
for _id in nearest:
yield txn.get(_id, db=self.index)[self.code_size:]
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pytest
from pandas.compat import product as cart_product, range
from pandas import Dat | aFrame, MultiIndex, Period, Series, Timedelta, Timestamp
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestCounting(object):
def test_cumcount(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A']) |
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3])
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype='int64')
assert_series_equal(e, ge.cumcount())
assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_ngroup(self):
df = DataFrame({'A': list('aaaba')})
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0])
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_distinct(self):
df = DataFrame({'A': list('abcde')})
g = df.groupby('A')
sg = g.A
expected = Series(range(5), dtype='int64')
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_one_group(self):
df = DataFrame({'A': [0] * 5})
g = df.groupby('A')
sg = g.A
expected = Series([0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype='int64')
assert_series_equal(e, ge.ngroup())
assert_series_equal(e, se.ngroup())
def test_ngroup_series_matches_frame(self):
df = DataFrame({'A': list('aaaba')})
s = Series(list('aaaba'))
assert_series_equal(df.groupby(s).ngroup(),
s.groupby(s).ngroup())
def test_ngroup_dupe_index(self):
df = DataFrame({'A': list('aaaba')}, index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame({'A': list('aaaba')}, index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=mi)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_groupby_not_col(self):
df = DataFrame({'A': list('aaaba')}, index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_descending(self):
df = DataFrame(['a', 'a', 'b', 'a', 'b'], columns=['A'])
g = df.groupby(['A'])
ascending = Series([0, 0, 1, 0, 1])
descending = Series([1, 1, 0, 1, 0])
assert_series_equal(descending, (g.ngroups - 1) - ascending)
assert_series_equal(ascending, g.ngroup(ascending=True))
assert_series_equal(descending, g.ngroup(ascending=False))
def test_ngroup_matches_cumcount(self):
# verify one manually-worked out case works
df = DataFrame([['a', 'x'], ['a', 'y'], ['b', 'x'],
['a', 'x'], ['b', 'y']], columns=['A', 'X'])
g = df.groupby(['A', 'X'])
g_ngroup = g.ngroup()
g_cumcount = g.cumcount()
expected_ngroup = Series([0, 1, 2, 0, 3])
expected_cumcount = Series([0, 0, 0, 1, 0])
assert_series_equal(g_ngroup, expected_ngroup)
assert_series_equal(g_cumcount, expected_cumcount)
def test_ngroup_cumcount_pair(self):
# brute force comparison for all small series
for p in cart_product(range(3), repeat=4):
df = DataFrame({'a': p})
g = df.groupby(['a'])
order = sorted(set(p))
ngroupd = [order.index(val) for val in p]
cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
assert_series_equal(g.ngroup(), Series(ngroupd))
assert_series_equal(g.cumcount(), Series(cumcounted))
def test_ngroup_respects_groupby_order(self):
np.random.seed(0)
df = DataFrame({'a': np.random.choice(list('abcdef'), 100)})
for sort_flag in (False, True):
g = df.groupby(['a'], sort=sort_flag)
df['group_id'] = -1
df['group_index'] = -1
for i, (_, group) in enumerate(g):
df.loc[group.index, 'group_id'] = i
for j, ind in enumerate(group.index):
df.loc[ind, 'group_index'] = j
assert_series_equal(Series(df['group_id'].values),
g.ngroup())
assert_series_equal(Series(df['group_index'].values),
g.cumcount())
@pytest.mark.parametrize('datetimelike', [
[Timestamp('2016-05-%02d 20:09:25+00:00' % i) for i in range(1, 4)],
[Timestamp('2016-05-%02d 20:09:25' % i) for i in range(1, 4)],
[Timedelta(x, unit="h") for x in range(1, 4)],
[Period(freq="2W", year=2017, month=x) for x in range(1, 4)]])
def test_count_with_datetimelike(self, datetimelike):
# test for #13393, where DataframeGroupBy.count() fails
# when counting a datetimelike column.
df = DataFrame({'x': ['a', 'a', 'b'], 'y': datetimelike})
res = df.groupby('x').count()
expected = DataFrame({'y': [2, 1]}, index=['a', 'b'])
expected.index.name = "x"
assert_frame_equal(expected, res)
def test_count_with_only_nans_in_first_group(self):
# GH21956
df = DataFrame({'A': [np.nan, np.nan], 'B': ['a', 'b'], 'C': [1, 2]})
result = df.groupby(['A', 'B']).C.count()
mi = MultiIndex(levels=[[], ['a', 'b']],
codes=[[], []],
names=['A', 'B'])
expected = Series([], index=mi, dtype=np.int64, name='C')
assert_series_equal(result, expected, check_index_type=False)
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import logging
import os
import structlog
import sys
from datetime import datetime
stdout_log = None
logger = structlog.get_logger()
def setupConsoleLogging(quiet):
global stdout_log
root = logging.getLogger('')
if quiet:
root.setLevel(logging.WARNING)
else:
root.setLevel(logging.NOTSET)
formatter = logging.Formatter('%(asctime)s %(message)s')
stdout_log = logging.StreamHandler(sys.stdout)
stdout_log.setLevel(logging.DEBUG)
stdout_log.setFormatter(formatter)
root.addHandler(stdout_log)
class UnstructuredRenderer(structlog.processors.KeyValueRenderer):
def __call__(self, logger, method_name, event_dict):
event = event_dict.pop('event')
if event_dict:
# if there are other keys, use the parent class to render them
# and append to the event
rendered = super(UnstructuredRenderer, self).__call__(
logger, method_name, event_dict)
return "%s (%s)" % (event, rendered)
else:
return event
def mozdef_format(logger, method_name, event_dict):
# see | http://mozdef.readthedocs.org/en/latest/usage.html#sending-logs-to-mozdef
# move everything to a 'details' sub-key
details = event_dict
event_dict = {'details': details}
# but pull out the summary/event
event_dict['summary'] | = details.pop('event')
if not details:
event_dict.pop('details')
# and set some other fields based on context
event_dict['timestamp'] = datetime.utcnow().isoformat()
event_dict['processid'] = os.getpid()
event_dict['processname'] = 'relengapi'
event_dict['source'] = logger.name
event_dict['severity'] = method_name.upper()
event_dict['tags'] = ['relengapi']
return event_dict
def reset_context(**kwargs):
logger.new(**kwargs)
def configure_logging(app):
if app.config.get('JSON_STRUCTURED_LOGGING'):
processors = [
structlog.stdlib.filter_by_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
mozdef_format,
structlog.processors.JSONRenderer()
]
else:
processors = [
structlog.stdlib.filter_by_level,
structlog.stdlib.PositionalArgumentsFormatter(),
UnstructuredRenderer()
]
if app.config.get('JSON_STRUCTURED_LOGGING') and stdout_log:
# structlog has combined all of the interesting data into the
# (JSON-formatted) message, so only log that
stdout_log.setFormatter(logging.Formatter('%(message)s'))
structlog.configure(
context_class=structlog.threadlocal.wrap_dict(dict),
processors=processors,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0]*size[1], -1))
return out.view(size[0], size[1], -1)
class Linear(Bottle, nn.Linear):
pass
class FeedForwardLayer(nn.Module):
def __init__(self, in_dim, hidden_dim, out_dim, activation=nn.SELU(), dp_ratio=0.2):
super(FeedForwardLayer, | self).__init__()
self.name = 'FeedForwardLayer'
self.Layer1 = Linear(in_dim, hidden_dim)
self.Layer2 = Linear(hidden_dim, out_dim)
self.activation = activation
self.dropout | = nn.Dropout(p=dp_ratio)
def forward(self, x):
x = self.Layer1(x)
x = self.activation(x)
x = self.dropout(x)
x = self.Layer2(x)
x = self.activation(x)
x = self.dropout(x)
return x
class DecomposableModel(nn.Module):
def __init__(self, config):
super(DecomposableModel, self).__init__()
self.config = config
self.embed = nn.Embedding(config.n_embed, config.d_embed)
self.projection = Linear(config.d_embed, config.d_proj)
self.dropout = nn.Dropout(p=config.dp_ratio)
self.activation = nn.SELU()
self.F = FeedForwardLayer(config.d_embed, config.d_hidden, config.d_F, self.activation, config.dp_ratio)
self.G = FeedForwardLayer(2 * config.d_embed, config.d_hidden, config.d_G, self.activation, config.dp_ratio)
self.H = FeedForwardLayer(2 * config.d_G, config.d_hidden, config.d_out, self.activation, config.dp_ratio)
def forward(self, batch):
prem_embed = self.embed(batch.premise.transpose(0, 1))
hypo_embed = self.embed(batch.hypothesis.transpose(0, 1))
if self.config.fix_emb:
prem_embed = Variable(prem_embed.data)
hypo_embed = Variable(hypo_embed.data)
e = torch.bmm(self.F(prem_embed), self.F(hypo_embed).transpose(1, 2))
e_ = F.softmax(e)
e_t = F.softmax(e.transpose(1, 2))
beta = torch.bmm(e_, hypo_embed)
alpha = torch.bmm(e_t, prem_embed)
v1 = self.G(torch.cat((prem_embed, beta), 2)).sum(1)
v2 = self.G(torch.cat((hypo_embed, alpha), 2)).sum(1)
v = F.softmax(self.H(self.dropout(torch.cat((v1, v2), 1))).squeeze())
return v
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copy | right (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even th | e implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
''' Model name: SaleOrder
'''
_inherit = 'sale.order'
# BUtton event:
def update_parcels_event(self, cr, uid, ids, context=None):
''' Get total of parcels
'''
assert len(ids) == 1, 'Only one element a time'
parcels = 0
parcels_note = ''
for line in self.browse(cr, uid, ids, context=context)[0].order_line:
if line.product_id.exclude_parcels:
continue # jump no parcels element
qty = line.product_uom_qty
q_x_pack = line.product_id.q_x_pack
if q_x_pack > 0:
if qty % q_x_pack > 0:
parcels_note += _('%s not correct q x pack\n') % (
line.product_id.default_code)
else:
parcel = int(qty / q_x_pack)
parcels += parcel
parcels_note += _('%s: parcels [%s x] %s \n') % (
line.product_id.default_code, q_x_pack, parcel)
else:
parcels_note += _(
'%s no q x pack\n') % line.product_id.default_code
self.write(cr, uid, ids, {
'parcels': parcels,
'parcels_note': parcels_note,
}, context=context)
_columns = {
'parcels_note': fields.text(
'Parcel note', help='Calculation procedure note') ,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf- | 8 -*-
class Codes(object):
def __init__(self, **kws):
self._reverse_dict = {}
for k, v in kws.items():
self.__setattr__(k, v)
def str_value(self, value):
return self._reverse_dict[value]
def __setattr__(self, name, value):
super(Codes, self).__setattr__(name, value)
if not name.startswith("_"):
self._reverse_dict[value] = name
def __repr__(self):
constants_str = ", " | .join(
f"{v}={k!r}" for k, v in sorted(self._reverse_dict.items())
)
return f"Codes({constants_str})"
def __getitem__(self, key):
return self.__dict__[key.replace("-", "_").upper()]
|
# -*- coding: utf-8 -*-
"""
tmdbsimple.search
~~~~~~~~~~~~~~~~~
This module implements the Search functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2018 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class Search(TMDB):
"""
Search functionality
See: https://developers.themoviedb.org/3/search
"""
BASE_PATH = 'search'
URLS = {
'movie': '/movie',
'collection': '/collection',
'tv': '/tv',
'person': '/person',
'company': '/company',
'keyword': '/keyword',
'multi': '/multi'
}
def movie(self, **kwargs):
"""
Search for movies by title.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
year: (optional) Filter the results release dates to matches that
include this value.
primary_release_year: (optional) Filter the results so that only
the primary release dates have this value.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('movie')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def collection(self, **kwargs):
"""
Search for collections by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('collection')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv(self, **kwargs):
"""
Search for TV shows by title.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
first_air_date_year: (optional) Filter the results to only match
shows that have a air date with with value.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('tv')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def person(self, **kwargs):
"""
Search for people by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all p | urpose search type and by far the
most tuned for every day querying. For those wanting
| more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('person')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def company(self, **kwargs):
"""
Search for companies by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('company')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def keyword(self, **kwargs):
"""
Search for keywords by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('keyword')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def multi(self, **kwargs):
"""
Search the movie, tv show and person collections with a single query.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('multi')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
sed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ceilometerclient import client as ceilometer_client
from cinderclient import client as cinder_client
from heat.common.i18n import _
from heatclient import client as heat_client
from keystoneclient.auth.identity.generic import password
from keystoneclient import exceptions as kc_exceptions
from keystoneclient import session
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from swiftclient import client as swift_client
class KeystoneWrapperClient(object):
"""Wrapper object for keystone client
This wraps keystone client, so we can encpasulate certain
added properties like auth_token, project_id etc.
"""
def __init__(self, auth_plugin, verify=True):
self.auth_plugin = auth_plugin
self.session = session.Session(
auth=auth_plugin,
verify=verify)
@property
def auth_token(self):
return self.auth_plugin.get_token(self.session)
@property
def auth_ref(self):
return self.auth_plugin.get_access(self.session)
@property
def project_id(self):
return self.auth_plugin.get_project_id(self.session)
def get_endpoint_url(self, service_type, region=None):
kwargs = {
'service_type': service_type,
'endpoint_type': 'publicURL'}
if region:
kwargs.update({'attr': 'region',
'filter_value': region})
return self.auth_ref.service_catalog.url_for(**kwargs)
class ClientManager(object):
"""Provides access to the official python clients for calling various APIs.
Manager that provides access to the official python clients for
calling various OpenStack APIs.
"""
CINDERCLIENT_VERSION = '2'
HEATCLIENT_VERSION = '1'
NOVACLIENT_VERSION = '2'
CEILOMETER_VERSION = '2'
def __init__(self, conf):
self.conf = conf
if self.conf.auth_url.find('/v'):
self.v2_auth_url = self.conf.auth_url.replace('/v3', '/v2.0')
self.auth_version = self.conf.auth_url.split('/v')[1]
else:
raise ValueError(_('Incorrectly specified auth_url config: no '
'version found.'))
self.insecure = self.conf.disable_ssl_certificate_validation
self.ca_file = self.conf.ca_file
self.identity_client = self._get_identity_client()
self.orchestration_client = self._get_orchestration_client()
self.compute_client = self._get_compute_client()
self.network_client = self._get_network_client()
self.volume_client = self._get_volume_client()
self.object_client = self._get_object_client()
self.metering_client = self._get_metering_client()
def _get_orchestration_client(self):
endpoint = os.environ.get('HEAT_URL')
if os.environ.get('OS_NO_CLIENT_AUTH') == 'True':
token = None
else:
token = self.identity_client.auth_token
try:
if endpoint is None:
endpoint = self.identity_client.get_endpoint_url(
'orchestration', self.conf.region)
except kc_exceptions.EndpointNotFound:
return None
else:
return heat_client.Client(
self.HEATCLIENT_VERSION,
endpoint,
token=token,
username=self.conf.username,
password=self.conf.password)
def _get_identity_client(self):
user_domain_name = self.conf.user_domain_name
project_domain_name = self.conf.project_domain_name
kwargs = {
'username': self.conf.username,
'password': self.conf.password,
'tenant_name': self.conf.tenant_name,
'auth_url': self.conf.auth_url
}
# keystone v2 can't ignore domain details
if self.auth_version == '3':
kwargs.update({
'user_domain_name': user_domain_name,
'project_domain_name': project_domain_name})
auth = password.Password(**kwargs)
if self.insecure:
verify_cert = False
else:
verify_cert = self.ca_file or True
return KeystoneWrapperClient(auth, verify_cert)
def _get_compute_client(self):
region = self.conf.region
client_args = (
self.conf.username,
self.conf.password,
self.conf.tenant_name,
# novaclient can not use v3 url
self.v2_auth_url
)
# Create our default Nova client to use in testing
return nova_client.Client(
self.NOVACLIENT_VERSION,
*client_args,
service_type='compute',
endpoint_type='publicURL',
region_name=region,
no_cache=True,
insecure=self.insecure,
cacert=self.ca_file,
http_log_debug=True)
def _get_network_client(self):
return neutron_client.Client(
username=self.conf.username,
password=self.conf.password,
tenant_name=self.conf.tenant_name,
endpoint_type='publicURL',
# neutronclient can not use v3 url
auth_url=self.v2_auth_url,
insecure=self.insecure,
ca_cert=self.ca_file)
def _get_volume_client(self):
region = self.conf.region
endpoint_type = 'publicURL'
return cinder_client.Client(
self.CINDERCLIENT_VERSION,
self.conf.username,
self.conf.password,
self.conf.tenant_name,
# cinderclient can not use v3 url
| self.v2_auth_url,
region_name=region,
endpoint_type=endpoint_type,
insecure=self.insecure,
cacert=self.ca_file,
http_log_debug=True)
de | f _get_object_client(self):
args = {
'auth_version': self.auth_version,
'tenant_name': self.conf.tenant_name,
'user': self.conf.username,
'key': self.conf.password,
'authurl': self.conf.auth_url,
'os_options': {'endpoint_type': 'publicURL'},
'insecure': self.insecure,
'cacert': self.ca_file,
}
return swift_client.Connection(**args)
def _get_metering_client(self):
user_domain_name = self.conf.user_domain_name
project_domain_name = self.conf.project_domain_name
try:
endpoint = self.identity_client.get_endpoint_url('metering',
self.conf.region)
except kc_exceptions.EndpointNotFound:
return None
else:
args = {
'username': self.conf.username,
'password': self.conf.password,
'tenant_name': self.conf.tenant_name,
'auth_url': self.conf.auth_url,
'insecure': self.insecure,
'cacert': self.ca_file,
'region_name': self.conf.region,
'endpoint_type': 'publicURL',
'service_type': 'metering',
}
# ceilometerclient can't ignore domain details for
# v2 auth_url
if self.auth_version == '3':
args.update(
{'user_domain_name': user_domain_name,
'project_domain_name': project_domain_name})
return ceilometer_client.Client(self.CEILOMETER_VERSION,
endpoint, **arg |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import taskflow.engines
from taskflow.patterns import linear_flow as lf
from taskflow.persistence.backends import impl_memory
from taskflow import task
from taskflow import test
from taskflow.utils import persistence_utils as p_utils
class ProgressTask(task.Task):
def __init__(self, name, segments):
super(ProgressTask, self).__init__(name=name)
self._segments = segments
def execute(self):
if self._segments <= 0:
return
for i in range(1, self._segments):
progress = float(i) / self._segments
self.update_progress(progress)
class ProgressTaskWithDetails(task.Task):
def execute(self):
details = {
'progress': 0.5,
'test': 'test data',
'foo': 'bar',
}
self.notifier.notify(task.EVENT_UPDATE_PROGRESS, details)
class TestProgress(test.TestCase):
def _make_engine(self, flow, flow_detail=None, backend=None):
e = taskflow.engines.load(flow,
flow_detail=flow_detail,
backend=backend)
e.compile()
e.prepare()
return e
def tearDown(self):
super(TestProgress, self).tearDown()
with contextlib.closing(impl_memory.MemoryBackend({})) as be:
with contextlib.closing(be.get_connection()) as conn:
conn.clear_all()
def test_sanity_progress(self):
fired_events = []
def notify_me(event_type, details):
fired_events.append(details.pop('progress'))
ev_count = 5
t = ProgressTask("test", ev_count)
t.notifier.register(task.EVENT_UPDATE_PROGRESS, notify_me)
flo = lf.Flow("test")
flo.add(t)
e = self._make_engine(flo)
e.run()
self.assertEqual(ev_count + 1, len(fired_events))
self.assertEqual(1.0, fired_events[-1])
self.assertEqual(0.0, fired_events[0])
def test_no_segments_progress(self):
fired_events = []
def notify_me(event_type, details):
fired_events.append(details.pop('progress'))
t = ProgressTask("test", 0)
t.notifier.register(task.EVENT_UPDATE_PROGRESS, notify_me)
flo = lf.Flow("test")
flo.add(t)
e = self._make_engine(flo)
e.run()
# 0.0 and 1.0 should be automatically fired
self.assertEqual(2, len(fired_events))
self.assertEqual(1.0, fired_events[-1])
self.assertEqual(0.0, fired_events[0])
def test_storage_progress(self):
with contextlib.closing(impl_memory.MemoryBackend({})) as be:
flo = lf.Flow("test")
flo.add(ProgressTask("test", 3))
b, fd = p_utils.temporary_flow_detail(be)
e = self._make_engine(flo, flow_detail=fd, backend=be)
e.run()
end_progress = e.storage.get_task_progress("test")
self.assertEqual(1.0, end_progress)
task_uuid = e.storage.get_atom_uuid("test")
td = fd.find(task_uuid)
self.assertEqual(1.0, td.meta['progress'])
self.assertFalse(td.meta['progress_details'])
def test_storage_progress_detail(self):
flo = ProgressTaskWithDetails("test")
e = self._make_engine(flo)
e.run()
end_progress = e.storage.get_task_progress("test")
self.assertEqual(1.0, end_progress)
end_details = e.storage.get_task_progress_details("test")
self.assertEqual(end_details.get('at_progress'), 0.5)
self.assertEqual(end_details.get('details'), {
'test': 'test data',
'foo': 'bar'
})
def test_dual_storage_progress(self):
fired_events = []
def notify_me(event_type, details):
fired_events.append(details.pop('progress'))
with contextlib.closing(impl_memory.MemoryBackend({})) as be:
t = ProgressTask("test", 5)
t. | notifier.register(task.EVENT_UPDATE_PROGRESS, notify_me)
flo = lf.Flow("test")
flo.add(t)
b, fd = p_utils.temporary_flow_detail(be)
e = self._make_engine(flo, flow_detail=fd, backend=be)
e.run()
end_progress = e.storage.get_task_progress("test")
self.assertEqual(1.0, end_progress)
| task_uuid = e.storage.get_atom_uuid("test")
td = fd.find(task_uuid)
self.assertEqual(1.0, td.meta['progress'])
self.assertFalse(td.meta['progress_details'])
self.assertEqual(6, len(fired_events))
|
.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'space_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tools_in_space'" | , 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Space']"}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'descr | iption': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.Intege |
mcinif='mcini_gen2'
runname='gen_t | est2111b'
mcpick='gen_test2b.pickle'
pathdir='/beegfs/work/ka_oj4748/echoRD'
wdir='/beegfs/work/ka_oj4748/gen_tests'
update_prec=0.04
update_mf=False
update_part=500
import sys
sys.path.append(pathdir)
import run_echoRD as rE
rE.echoRD_job | (mcinif=mcinif,mcpick=mcpick,runname=runname,wdir=wdir,pathdir=pathdir,update_prec=update_prec,update_mf=update_mf,update_part=update_part,hdf5pick=False)
|
#! /usr/bin/env python3
a = [1, 3, 4, 5, 8]
a | .append(23) #adding element at last position of list.
a.insert(0,23) #inserting at first position of list.
a.insert(2,21) #inserting at second position of list.
print("list : ", a)
del a[-1] #delete last element.
a.remove(3) #delete element from list.
print("deletion opeartion on list : ", a)
a.append(11)
k= a.count(11)
print("k : ", k)
b = [34, 56, 221, 3]
a.append(b) # append list at end of list a.
print(a)
a.extend(b)
print(a)
a.remove(b)
a.reverse()
print | ("reverse : ",a)
a.sort()
print(a)
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import os
import sys
import constants
sys.path.insert(0, constants.SOURCE_ROOT)
from chromite.buildbot import repository
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
# pylint: disable=W0212,R0904,E1101,W0613
class RepositoryTests(cros_test_lib.MoxTestCase):
def RunCommand_Mock(self, result, *args, **kwargs):
output = self.mox.CreateMockAnything()
output.output = result
return output
def testExternalRepoCheckout(self):
"""Test we detect external checkouts properly."""
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
tests = [
'https://chromium.googlesource.com/chromiumos/manifest.git',
'ssh://gerrit-int.chromium.org:29419/chromeos/manifest.git',
'test@abcdef.bla.com:39291/bla/manifest.git',
'test@abcdef.bla.com:39291/bla/manifest',
'test@abcdef.bla.com:39291/bla/Manifest-internal',
]
for test in tests:
cros_build_lib.RunCommand = functools.partial(self.RunCommand_Mock, test)
self.assertFalse(repository.IsInternalRepoCheckout('.'))
def testInternalRepoCheckout(self):
"""Test we detect internal checkouts properly."""
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
tests = [
'ssh://gerrit-int.chromium.org:29419/chromeos/manifest-internal.git',
'ssh://gerrit-int.chromium.org:29419/chromeos/manifest-internal',
'ssh://gerrit.chromium.org:29418/chromeos/manifest-internal',
'test@abcdef.bla.com:39291/bla/manifest-internal.git',
]
for test in tests:
cros_build_lib.RunCommand = functools.partial(self.RunCommand_Mock, test)
self.assertTrue(repository.IsInternalRepoCheckout('.'))
class RepoInitTests(cros_test_lib.MoxTempDirTestCase):
def _Initialize(self, branch='master'):
repo = repository.RepoRepository(constants.MANIFEST_URL, self.tempdir,
branch=branch)
repo.Initialize()
def testReInitialization(self):
"""Test ability to switch between branches."""
self._Initialize('release-R19-2046.B')
self._Initialize('master')
# Test that a failed re-init due to bad branch doesn't leave repo in bad
| # state.
| self.assertRaises(Exception, self._Initialize, 'monkey')
self._Initialize('release-R20-2268.B')
class RepoInitChromeBotTests(RepoInitTests):
"""Test that Re-init works with the chrome-bot account.
In testing, repo init behavior on the buildbots is different from a
local run, because there is some logic in 'repo' that filters changes based on
GIT_COMMITTER_IDENT. So for sanity's sake, try to emulate running on the
buildbots.
"""
def setUp(self):
os.putenv('GIT_COMMITTER_EMAIL', 'chrome-bot@chromium.org')
os.putenv('GIT_AUTHOR_EMAIL', 'chrome-bot@chromium.org')
if __name__ == '__main__':
cros_test_lib.main()
|
def generate_dispatcher(method_handler, parent_class=None):
"""
Create a dispatcher class and return an instance of it from a dispatcher
definition.
The definition is a class with the following attributes:
_ EXPORTED_METHOD: dictionary where keys are method names and values
class attribute names of the attributes holding references to an object
implementing the method
_ attributes defined in EXPORTED_METHODS values. They must contain an
object instance which implements the respective methods (EXPORTED_METHODS
keys)
Ex:
class TestDispatchHandler:
EXPORTED_METHODS = {'method1': 'attr1',
'method2': 'attr1',
'method3': 'attr2'}
attr1 = Object1()
attr2 = Object2()
where Object1 is a class which provides method1 and method2 and Object2 a
class which provides method3
obj_inst = generate_dispatcher(TestDispatchHandler)
will affect in 'obj_inst' a class instance which provide method1, method2
and method3 by delegate it to the correct object
"""
# class def | inition
if parent_class:
class_str = 'class Dispatcher(%s):\n' % parent_class
statements = ' %s.__init__(self)\n' % parent_class
else:
class_str = 'class Dispatcher:\n'
statements = ''
# methods definition
registered = []
for method, objname in method_handler.EXPORTED_METHODS.items():
if not o | bjname in registered:
registered.append(objname)
class_str = '%s def %s(self, *attrs):\n return self.%s.%s(*attrs)\n'%\
(class_str, method, objname, method)
# constructor definition
attrs = ''
for objname in registered:
attrs = '%s, %s' % (attrs, objname)
statements = '%s self.%s=%s\n' % (statements, objname, objname)
# retrieve object reference in current context
exec '%s=getattr(method_handler, "%s")'%(objname, objname)
# assemble all parts
class_str = '%s def __init__(self%s):\n%s' % (class_str, attrs, statements)
# now we can eval the full class
exec class_str
# return an instance of constructed class
return eval('Dispatcher(%s)'%attrs[2:]) # attrs[2:] for removing ', '
|
# should be using spacy for everything NLP from now on
from ml.document_features import en_nlp, selectContentWords
from proc.query_extraction | import SentenceQueryExtractor, EXTRACTOR_LIST
class FilteredSentenceQueryExtractor(SentenceQueryExtractor):
def getQueryTextFromSentence(self, sent):
doc = en_nlp(sent["text"])
wo | rds = selectContentWords(doc)
text = " ".join(words)
return text
EXTRACTOR_LIST[ "Sentences_filtered"] = FilteredSentenceQueryExtractor() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-1 | 3 03:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0036_auto_20170813_0049'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='follows',
field=models.ManyToManyField(blank=True, related_name='follower', to='website.UserProfile'),
| ),
]
|
children.
for child in self.get_node_children(node):
if child is not None:
# It actually has this child, so look at it.
stack.append(child)
def iteritems(self):
"""
Yield each key, value pair in the AuthenticatedDictionary. Not key hashes, the
actual key strings.
The caller may not add to or remove from the AuthenticatedDictionary
while iterating over it. Modifying values shouild be OK.
"""
# What do we need to look at?
stack = []
stack.append("root")
while len(stack) > 0:
# Pop a node to look at
node = stack.pop()
# Load the node
node_obj = self.load_node(node)
key = node_obj.key
value = node_obj.value
if key is not None:
# It's a data node
yield (key, value)
else:
# It's not a data node. Look at its children.
for child in self.get_node_children(node):
if child is not None:
# It actually has this c | hild, so look at it.
stack.append(child)
def insert(self, key, value):
"""
Insert the given value into the trie under the given key. The key and
the value must both be strings.
"""
# Hash the key
key_hash = util.bytes2hex(hashlib.sha512(key).digest())
self.recursive_insert("root", key_hash, key, 0, value)
def recursive_insert(self, node, key_hash, ke | y, level, value):
"""
Insert the given value under the given key with the given hash (in hex)
into the subtree rooted by the given node. level indicates the character
in key_hash that corresponds to this node.
"""
# It goes under the child slot corresponding to the level-th
# character of the key hash.
if level >= len(key_hash):
raise Exception("Tree deeper ({}) than length of keys.".format(
level))
# Which child slot do we use?
child_index = int(key_hash[level], base=16)
# Get the child pointer value, or None if there is no child there.
child = self.get_node_children(node)[child_index]
# logging.debug('INSERT to [{}:{}] with "{}" {} bytes'.format(
# self.store.table, node, key_hash[:8], len(value)))
if child is None:
# If that slot is empty, put the value there in a new node.
child = self.create_node()
self.set_node_key(child, key)
self.set_node_value(child, value)
self.update_node_hash(child)
# Attach the node in the right place
self.set_node_child(node, child_index, child)
else:
# Get the child's key
child_key = self.get_node_key(child)
if child_key == key:
# If the slot has a node with the same key, overwrite
# it.
self.set_node_value(child, value)
self.update_node_hash(child)
if self.get_node_by_hash(self.get_node_hash(child)) != child:
raise Exception("Inconsistent insert")
if self.load_node(child).children is not None:
raise Exception("Updated value on node with children")
elif child_key is not None:
# If the slot has a node with a different key hash,
# recursively insert both that old value and this new ones
# as children of the node that's there.
# Hash the child key
child_key_hash = util.bytes2hex(hashlib.sha512(
child_key).digest())
# Get the value the child was storing.
child_value = self.get_node_value(child)
# Blank it out
self.set_node_key(child, None)
self.set_node_value(child, None)
# Store the value that was there as a child of the child
# node.
self.recursive_insert(child, child_key_hash, child_key,
level + 1, child_value)
# Store our value as a (hopefully different) child of the
# child node.
self.recursive_insert(child, key_hash, key, level + 1, value)
if self.get_node_by_hash(self.get_node_hash(child)) != child:
raise Exception("Inconsistent insert")
if (self.get_node_key(child) is not None or
self.get_node_value(child) is not None):
raise Exception("Node with children added still has value")
else:
# If the slot has a node with no key hash (i.e. it has
# children), insert the new value as a child of that node.
self.recursive_insert(child, key_hash, key, level + 1, value)
if self.get_node_by_hash(self.get_node_hash(child)) != child:
raise Exception("Inconsistent insert")
if (self.get_node_key(child) is not None or
self.get_node_value(child) is not None):
raise Exception("Node with children added still has value")
# Update our Merkle hash
self.update_node_hash(node)
if self.get_node_by_hash(self.get_node_hash(node)) != node:
raise Exception("Inconsistent insert")
def remove(self, key):
"""
Remove the value under the given key from the trie. The key must be in
the trie, and a string.
"""
# Hash the key
key_hash = util.bytes2hex(hashlib.sha512(key).digest())
# Run the removal
self.recursive_remove("root", key_hash, key, 0)
def recursive_remove(self, node, key_hash, key, level):
"""
Remove the value with the given key (which has the given hash) from the
subtree rooted at the given node. The key hash is in hex, and level is
the character in that hash being used at this level to decide on a child
storage location.
The algorithm works by the invariant that every leaf node (i.e. one with
a value) has a sibling.
If the key to remove is our direct descendant, drop it. This will never
leave us with no children, since every leaf node has a sibling. It may
leave us with one child with a value that now has no siblings.
If the key to remove is our indirect descendant, we know it has a
sibling. Remove the key we are removing, recursively. If this leaves a
leaf node without any siblings, that value will be promoted to the child
we recursed into. This may leave us with one child with a value that now
has no siblings.
If we now have only one child, which has a value, promote that value to
this node and drop the child.
"""
# The key can be found under the child slot corresponding to the level-
# th character of the key hash.
if level >= len(key_hash):
raise Exception("Tree deeper ({}) than length of keys.".format(
level))
# Which child slot do we use?
child_index = int(key_hash[level], base=16)
# Get the pointer value, or None if there is no child there.
child = self.get_node_children(node)[child_index]
# logging.debug('REMOVE from [{}:{}] key {}'.format(self.store.table, node, key_hash[:8]))
if child is None:
# If that slot is empty, the key can't possibly be in the trie.
raise Exception("Tried to remove key hash {} that wasn't in the "
"trie".format(key_hash))
child_key = self.get_node_key(child)
if child_key == key:
# If the slot has a node with the same key, we've found its leaf
# node. Remove the leaf node.
self.delete_node(child)
# Set t |
#!/usr/bin/python
import json
import parcon
import operator
import pprint
import os
import sys
import getopt
import re
import optparse
import md5
import hashlib
import version
###
def sign_data(data_to_sign):
m = hashlib.md5()
m.update(data_to_sign)
signature = m.hexdigest()
return signature
####
def source_file_signature(filename, version_number = version.VERSION):
FILENAME = os.path.splitext(os.path.split(filename)[1])[0] + ".py"
DIRECTORY = os.path.split(filename)[0]
s = sign_data(open(os.path.join(DIRECTORY,FILENAME)).read())
return "%s:%s:%s" % (FILENAME, hex(version_number), s)
#### tool version:
VERSION_STR = source_file_signature(__file__)
#### initial signature toke that i have to use
SIGNATURE_TOKEN = '<<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>'
#### a signature header (prefixed to the signed file):
header_template = """// @generated %s
// signed with: https://github.com/korovkin/WNNotifier/notifier/sign.py
"""
#####
def sign(options, data):
"""
sign the given, yield a signature that can be verified by phabricator and lint
Replaces the followin string in the input file:
// @generated <<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>
With:
// @generated SignedSource<<md5 of the whole file including the previous line>>
"""
data_to_sign = (header_template % SIGNATURE_TOKEN) + data
signature = sign_data(data_to_sign)
signature = "SignedSource<<%s>>" % signature
signed_data = (header_template % signature) + data
return (signed_data, data_to_sign)
#####
def | write_data_to_sign(options, data_to_sign, filename):
if options.store_signed_data:
open(filename + ".to.sign", "w").write(data_to_sign)
#####
def main():
parser = optparse.OptionParser(usage="\n python %prog <options> <source file1>...\n or \n python %prog <options> < file.in > file.out")
parser.add_option("", "--version",
action="store_true",
help="print version number | ",
dest="version",
default=False)
parser.add_option("", "--store_signed_data",
action="store_true",
help="store ",
dest="store_signed_data",
default=False)
(options, filenames) = parser.parse_args()
if options.version:
print(VERSION_STR)
sys.exit(0)
if filenames == []:
sys.stderr.write(os.path.split(__file__)[1] + " warning: reading input from stdin...\n")
data = sys.stdin.read()
signed_data, data_to_sign = sign(options, data)
print signed_data,
write_data_to_sign(options, data_to_sign, "data_to_sign.to.sign")
else:
for filename in filenames:
data = open(filename, "r").read()
signed_data, data_to_sign = sign(options, data)
open(filename, "w").write(signed_data)
print "signed, ", filename
write_data_to_sign(options, data_to_sign, filename + ".to.sign")
#####
if __name__ == "__main__":
main()
|
import pickle
import scipy.misc
import numpy as np
from sys import argv
def plot_25_figure(images, output_name, num_channels = 1):
HEIGHT, WIDTH = images.shape[1], images.shape[2]
if num_channels == 1:
images = images.reshape((5,5,HEIGHT,WIDTH))
# rowx, rowy, height, width -> rowy, height, rowx, width
images = images.transpose(1,2,0,3)
images = images.reshape((5*28, 5*28))
scipy.misc.toimage(images, cmin=0.0, cmax=1.0).save(output_name)
elif num_channels == 3:
images = images.reshape((5,5,HEIGHT,WIDTH,3))
images = images.transpose(1,2,0,3,4)
images = images.reshape((5*HEIGHT, 5*WIDTH, 3))
scipy.misc.toimage(images).save(output_name)
else:
raise Exception("You should not be here!! | Only 1 or 3 channels | allowed for images!!")
def plot_100_figure(images, output_name, num_channels = 1):
HEIGHT, WIDTH = images.shape[1], images.shape[2]
if num_channels == 1:
images = images.reshape((10,10,HEIGHT,WIDTH))
# rowx, rowy, height, width -> rowy, height, rowx, width
images = images.transpose(1,2,0,3)
images = images.reshape((10*28, 10*28))
scipy.misc.toimage(images, cmin=0.0, cmax=1.0).save(output_name)
elif num_channels == 3:
images = images.reshape((10,10,HEIGHT,WIDTH,3))
images = images.transpose(1,2,0,3,4)
images = images.reshape((10*HEIGHT, 10*WIDTH, 3))
scipy.misc.toimage(images).save(output_name)
else:
raise Exception("You should not be here!! Only 1 or 3 channels allowed for images!!")
if __name__ == "__main__":
X = pickle.load(open(argv[1],'rb'))
output_name = argv[1].split('/')[-1].split('.')[0] + '.jpg'
plot_25_figure(X, output_name)
|
nt(configs['txPassword'])
print(self.sAccountNo)
except:
return ('error with read setting files')
def get_config(self):
config = configparser.ConfigParser()
try:
config.read(
str(os.path.dirname(os.path.realpath(__file__))) + '\setting.ini')
self.sHost = config['trade-mock']['host']
self.nPort = config['trade-mock']['port']
self.sVersion = config['trade-mock']['version']
self.sBranchID = config['trade-mock']['branchID']
self.sAccountNo = config['trade-mock']['accountNo']
self.sTradeAccountNo = config['trade-mock']['tradeAccountNo']
self.sPassword = int(config['trade-mock']['password'])
self.sTxPassword = int(config['trade-mock']['txPassword'])
config_setting = {
"host": config['trade-mock']['host'],
"port": config['trade-mock']['port'],
"version": config['trade-mock']['version'],
"branchID": config['trade-mock']['branchID'],
"accountNo": config['trade-mock']['accountNo'],
"tradeAccountNo": config['trade-mock']['tradeAccountNo'],
"password": int(config['trade-mock']['password']),
"txPassword": int(config['trade-mock']['txPassword'])
}
return config_setting
except:
return ('error with read setting files')
def QA_trade_stock_login(self):
try:
TradeX.OpenTdx()
client = TradeX.Logon(str(self.sHost), int(self.nPort), str(self.sVersion), int(self.sBranchID),
str(self.sAccountNo), str(
self.sTradeAccountNo),
str(self.sPassword), str(self.sTxPassword))
return client
except TradeX.error as e:
return ("error: " + e.message)
def QA_trade_stock_login_with_config(self, config):
try:
TradeX.OpenTdx()
client = TradeX.Logon(str(config[0]), int(config[1]), str(config[2]), int(self.sBranchID),
str(self.sAccountNo), str(
self | .sTradeAccountNo),
str(self.sPassword), str(self.sTxPassword))
return client
except TradeX.error as e:
return ("error: " + e.message)
"""
| nCategory
0 资金
1 股份
2 当日委托
3 当日成交
4 可撤单
5 股东代码
6 融资余额
7 融券余额
8 可融证券
9
10
11
12 可申购新股查询
13 新股申购额度查询
14 配号查询
15 中签查询
"""
def QA_trade_stock_get_cash(self, _client):
# 资金
self.nCategory = 0
_errinfo, self.result = _client.QueryData(self.nCategory)
if _errinfo != "":
return (_errinfo)
else:
accounts = self.result.split('\n')[1].split('\t')
account = {}
account['account_id'] = accounts[0]
account['available'] = accounts[3]
account['freeze'] = accounts[4]
account['on_way'] = accounts[5]
account['withdraw'] = accounts[6]
return account
def QA_trade_stock_get_stock(self, client):
# 股份
self.nCategory = 1
_errinfo, self.result = client.QueryData(self.nCategory)
if _errinfo != "":
return (_errinfo)
else:
stocks = self.result.split('\n')
stock = []
for i in range(1, len(stocks)):
temp = {}
temp['code'] = stocks[i].split('\t')[0]
temp['name'] = stocks[i].split('\t')[1]
temp['number'] = stocks[i].split('\t')[2]
temp['hold'] = stocks[i].split('\t')[3]
temp['sell_available'] = stocks[i].split('\t')[4]
temp['price_now'] = stocks[i].split('\t')[5]
temp['value_now'] = stocks[i].split('\t')[6]
temp['price_buy'] = stocks[i].split('\t')[7]
temp['pnl_float'] = stocks[i].split('\t')[8]
temp['pnl_ratio'] = stocks[i].split('\t')[9]
temp['account_type'] = stocks[i].split('\t')[10]
temp['account_id'] = stocks[i].split('\t')[11]
temp['shareholder'] = stocks[i].split('\t')[12]
temp['exchange'] = stocks[i].split('\t')[13]
temp['trade_mark'] = stocks[i].split('\t')[14]
temp['insure_mark'] = stocks[i].split('\t')[15]
temp['buy_today'] = stocks[i].split('\t')[16]
temp['sell_today'] = stocks[i].split('\t')[17]
temp['position_buy'] = stocks[i].split('\t')[18]
temp['position_sell'] = stocks[i].split('\t')[19]
temp['price_yesterday'] = stocks[i].split('\t')[20]
temp['margin'] = stocks[i].split('\t')[21]
stock.append(temp)
return stock
def QA_trade_stock_get_orders(self, client):
# 当日委托
self.nCategory = 2
_errinfo, self.result = client.QueryData(self.nCategory)
if _errinfo != "":
return (_errinfo)
else:
return self.result
def QA_trade_stock_get_deals(self, client):
# 当日成交
self.nCategory = 2
_errinfo, self.result = client.QueryData(self.nCategory)
if _errinfo != "":
return (_errinfo)
else:
print(self.result)
return self.result
def QA_trade_stock_get_holder(self, client):
# 股东代码
self.nCategory = 5
_errinfo, self.result = client.QueryData(self.nCategory)
if _errinfo != "":
print(_errinfo)
else:
# print(self.result.split('\n')[1].split('\t')[0])
# print(self.result.split('\n')[2].split('\t')[0])
return [self.result.split('\n')[1].split('\t')[0], self.result.split('\n')[2].split('\t')[0]]
"""
nCategory - 委托业务的种类
0 买入
1 卖出
2 融资买入
3 融券卖出
4 买券还券
5 卖券还款
6 现券还券
nOrderType - 委托报价方式
0 限价委托; 上海限价委托/ 深圳限价委托
1 市价委托(深圳对方最优价格)
2 市价委托(深圳本方最优价格)
3 市价委托(深圳即时成交剩余撤销)
4 市价委托(上海五档即成剩撤/ 深圳五档即成剩撤)
5 市价委托(深圳全额成交或撤销)
6 市价委托(上海五档即成转限价)
sAccount - 股东代码
sStockCode - 证券代码
sPrice - 价格
sVolume - 委托证券的股数
返回值:
_errinfo - 出错时函数抛出的异常信息;
result - 查询到的数据。
nCategory = 0
nOrderType = 4
sInvestorAccount = "p001001001005793"
sStockCode = "601988"
sPrice = 0
sVolume = 100
"""
def QA_trade_stock_post_order(self, client, order):
if len(order) == 6:
_errinfo, self.result = client.SendOrder(
order[0], order[1], order[2], order[3], order[4], order[5])
if _errinfo != "":
print(_errinfo)
else:
print(self.result)
def QA_trade_stock_post_orders(self, orderLists):
orderLists = [{
"nCategory": 0,
"nOrderType": 4,
"sInvestorAccount": "p001001001005793",
"sStockCode": "601988",
"sPrice": 0,
"sVolume": 100
}, {
"nCategory": 0,
"nOrderType": 4,
"sInvestorAccount": "p001001001005793",
"sStockCode": "601988",
"sPrice": 0,
"sVolume": 100
}]
pass
def QA_trade_stock_delete_order(self, client, order_list):
"""
参数:
nMarket - 市场代码0:深圳,1:上海
Orderid - 可撤销的委托单号
返回值:
_errinfo - 出错时函数抛出的异常信息;
result - 查询到的数据。
"""
_errinfo, result = client.CancelOrder(
int(order_list[0]), str(order_list[ |
import urllib
import os
import json
import time
import datetime
import sublime
from xml.sax.saxutils import escape
from .. import requests
from .. import util
from ..libs import auth
# https://github.com/xjsender/simple-salesforce/blob/master/simple_salesforce/login.py
def soap_login(settings, session_id_expired=False, timeout=10):
if not session_id_expired:
session = util.get_session_info(settings)
try:
# Force login again every two hours
time_stamp = session.get("time_stamp")
dt = datetime.datetime.strptime(time_stamp, "%Y-%m-%d %H:%M:%S")
intervalDT = datetime.timedelta(minutes=settings["force_login_interval"])
if (dt + intervalDT) >= datetime.datetime.now():
return session
except:
pass
login_soap_request_body = """<?xml version="1.0" encoding="utf-8" ?>
<env:Envelope
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:env="http://schemas.xmlsoap.org/soap/envelope/">
<env:Body>
<n1:login xmlns:n1="urn:partner.soap.sforce.com">
<n1:username>{username}</n1:username>
<n1:password>{password}</n1:password>
</n1:login>
</env:Body>
</env:Envelope>
""".format(
username = settings["username"],
password = escape(settings["password"]) + settings["security_token"]
)
headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'login'
}
try:
response = requests.post(settings["soap_login_url"], login_soap_request_body,
verify=False, headers=headers, timeout=timeout)
except requests.exceptions.RequestException as e:
if "repeat_times" not in globals():
globals()["repeat_times"] = 1
else:
globals()["repeat_times"] += 1
if settings["debug_mode"]:
print ("Login Exception: " + str(e))
print ("repeat_times: " + str(globals()["repeat_times"]))
if globals()["repeat_times"] <= 12:
return soap_login(settings, True, timeout)
result = {
"Error Message": "Network connection timeout",
"success": False
}
return result
# If request succeed, just clear repeat_times
if "repeat_times" in globals():
del globals()["repeat_times"]
result = {}
if response.status_code != 200:
# Log the error message
if settings["debug_mode"]:
print (response.content)
except_msg = util.getUniqueElementValueFromXmlString(response.content, 'sf:exceptionMessage')
result["Error Message"] = except_msg
result["success"] = False
return result
session_id = util.getUniqueElementValueFromXmlString(response.content, 'sessionId')
server_url = util.getUniqueElementValueFromXmlString(response.content, 'serverUrl')
instance_url = server_url[ : server_url.find('/services')]
user_id = util.getUniqueElementValueFromXmlString(response.content, 'userId')
result = {
"project name": settings["default_project"]["project_name"],
"session_id": session_id,
"metadata_url": instance_url + "/services/Soap/m/%s.0" % settings["api_version"],
"rest_url": instance_url + "/services/data/v%s.0" % settings["api_version"],
"apex_url": instance_url + "/services/Soap/s/%s.0" % settings["api_version"],
"partner_url": instance_url + "/services/Soap/u/%s.0" % settings["api_version"],
"instance_url": instance_url,
"user_id": user_id,
"time_stamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())),
"headers": {
"Authorization": "OAuth " + session_id,
"Content-Type": "application/json; charset=UTF-8",
"Accept": "application/json"
},
"success": response.status_code < 399,
}
# If session is expired, just write session
# to .config/session.json
util.add_config_history('session', result, settings)
return result
from ..libs import server
sfdc_oauth_server = None
def start_server():
global sfdc_oauth_server
if sfdc_oauth_server is None:
sfdc_oauth_server = server.Server()
def stop_server():
global sfdc_oauth_server
if sfdc_oauth_server is not None:
sfdc_oauth_server.stop()
sfdc_oauth_server = None
# Only support grant_type is authorization_code
def rest_login(settings, session_id_expired=False, timeout=10):
session = util.get_session_info(settings)
if not session_id_expired:
try:
# Force login again every two hours
time_stamp = session.get("time_stamp")
dt = datetime.datetime.strptime(time_stamp, "%Y-%m-%d %H:%M:%S")
intervalDT = datetime.timedelta(minutes=settings["force_login_interval"])
if (dt + intervalDT) >= datetime.datetime.now():
return session
except:
pass
# Get haoide default oAuth2 info
app = sublime.load_settings("app.sublime-settings")
oauth = auth.SalesforceOAuth2(
app.get("client_id"),
app.get("client_secret"),
app.get("redirect_uri"),
login_url=settings["login_url"]
)
# If refresh token is exist, just refresh token
if session and session.get("refresh_token"):
result = oauth.refresh_token(session.get("refresh_token"))
# If succeed, |
if result.get("access_token"):
instance_url = result["instance_url"]
| result["project name"] = settings["default_project"]["project_name"]
result["session_id"] = result["access_token"]
result["metadata_url"] = instance_url + "/services/Soap/m/%s.0" % settings["api_version"]
result["rest_url"] = instance_url + "/services/data/v%s.0" % settings["api_version"]
result["apex_url"] = instance_url + "/services/Soap/s/%s.0" % settings["api_version"]
result["partner_url"] = instance_url + "/services/Soap/u/%s.0" % settings["api_version"]
result["instance_url"] = instance_url
result["time_stamp"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
result["user_id"] = result["id"][-18:]
result["headers"] = {
"Authorization": "OAuth " + result["access_token"],
"Content-Type": "application/json; charset=UTF-8",
"Accept": "application/json"
}
result["success"] = True
result["refresh_token"] = session.get("refresh_token")
util.add_config_history('session', result, settings)
return result
else:
if settings["debug_mode"]:
print (result)
# Remove refresh token and start oAuth2 login again
result.pop('refresh_token', None)
util.add_config_history('session', result, settings)
return rest_login(settings, session_id_expired)
# Start oAuth2 login process
authorize_url = oauth.authorize_url(settings["username"])
start_server()
util.open_with_browser(authorize_url)
# Return Message if not login, session expired or session invalid
error_message = "Waiting for oAuth2 login finished"
if session_id_expired:
error_message = "Session invalid or expired, " + error_message
return {
"success": False,
"error_message": error_message
} |
asets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Merge
from keras.utils import np_utils
import numpy as np
nb_classes = 10
batch_size = 128
nb_epoch = 1
max_train_samples = 5000
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
#########################
# sequential model test #
#########################
print('Test sequential')
model = Sequential()
model.add(Dense(784, 50))
model.add(Activation('relu'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate(X_train, Y_train, verbose=0)
print('score:', score)
if score < 0.25:
raise Exception('Score too low, learning issue.')
preds = model.predict(X_test, verbose=0)
classes = model.predict_classes(X_test, verbose=0)
model.get_config(verbose=1)
###################
# merge test: sum #
###################
print('Test merge: sum')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 50))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate([X_train, X_train], Y_train, verbose=0)
print('score:', score)
if score < 0.22:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test], verbose=0)
model.get_config(verbose=1)
###################
# merge test: c | oncat #
###################
print('Test merge: concat')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 50))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(50*2, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_trai | n], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate([X_train, X_train], Y_train, verbose=0)
print('score:', score)
if score < 0.22:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test], verbose=0)
model.get_config(verbose=1)
##########################
# test merge recursivity #
##########################
print('Test merge recursivity')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 50))
right.add(Activation('relu'))
righter = Sequential()
righter.add(Dense(784, 50))
righter.add(Activation('relu'))
intermediate = Sequential()
intermediate.add(Merge([left, right], mode='sum'))
intermediate.add(Dense(50, 50))
intermediate.add(Activation('relu'))
model = Sequential()
model.add(Merge([intermediate, righter], mode='sum'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], Y_test))
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], Y_test))
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate([X_train, X_train, X_train], Y_train, verbose=0)
print('score:', score)
if score < 0.19:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test, X_test], verbose=0)
model.get_config(verbose=1)
model.save_weights('temp.h5')
model.load_weights('temp.h5')
score = model.evaluate([X_train, X_train, X_train], Y_train, verbose=0)
print('score:', score)
######################
# test merge overlap #
######################
print('Test merge overlap')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, left], mode='sum'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, Y_train, batch_size=batc |
ironment"""
raise NotImplementedError()
def __setTarget(self,targetName):
if re.match("['\"].*['\"]",targetName):
targetName = targetName[1:-1]
if targetName == "test" or targetName == "testi":
print "Warning: 'test' and 'testi' considered dummy targets."
return True
for t in self._targets:
if t.name == targetName:
self._activeTarget = t
return True
return False
def initTest(self):
"""
Inits a test run.
Creates a log file and starts recording if defined.
"""
print "Setting up testing environment..."
if not self._setupTestAutomation():
return False
print "setup complete"
self._activeTarget = self._targets[0]
if self._logger:
print "Recording test to a file"
self._logger.startLog()
return True
def _stopTest(self):
"""
Stops a test run.
Closes the log-file and stops recording process.
"""
print "Cleaning up testing environment..."
self._cleanupTestAutomation()
print "clean up complete"
if self._logger:
self._logger.endLog()
print "Test finished"
def endTest(self):
print "Shutting down"
self._stopTest()
def keywordInfo(self, kw ):
kws = self._getKeywords()
if kw in kws:
print kw
self.printKw(kw,"#",kws[kw][1])
def printKw(self,kw,header,text):
print header*len(kw)
print
docstring = text.splitlines()
strip_len = 0
if len(docstring[0]) == 0:
docstring = docstring[1:]
for line in docstring:
if len(line.strip()) > 0:
first_line = line.lstrip()
strip_len = len(line) - len(first_line)
break
for line in docstring:
print line[strip_len:].rstrip()
print
def listKeywords(self, basekw = keyword.Keyword,full=False,header="#"):
kws = self._getKeywords({},basekw)
kws_keys = sorted(kws.keys())
for kw in kws_keys:
print kw
if full:
self.printKw(kw,header,kws[kw][1])
def _getKeywords(self, kw_dictionary = {}, basekw = keyword.Keyword):
use_cache = len(kw_dictionary) == 0
if use_cache and basekw in self._kw_cache:
return self._kw_cache[basekw]
for kw in basekw.__subclasses__():
kw_name = str(kw)[str(kw).rfind('.')+1:str(kw).rfind("'")]
if not kw_name.endswith("Keyword"):
kw_dictionary[kw_name] = (str(kw.__module__),str(kw.__doc__))
self._getKeywords(kw_dictionary,kw)
if use_cache:
self._kw_cache[basekw] = kw_dictionary
return kw_dictionary
def __instantiateKeywordProxyObject(self,kwproxy, kwName,kwAttr,kwproxy_class):
kwobject = None
try:
kwmodule = __import__(kwproxy_class, globals(), locals(), [kwproxy], -1)
# kwobject = eval("kwmodule." + kw + "()")
kwobject = getattr(kwmodule,kwproxy)()
if not kwobject.initialize(kwName, kwAttr,self._activeTarget):
kwobject = None
if kwobject:
print 'Recognized keyword: %s' % kwName
print 'Attributes: %s' % kwAttr
except Exception, e:
print e
print "Error: KeywordProxy error"
kwobject = None
return kwobject
def __instantiateKeywordObject(self,kw_name,attributes,kw_class):
kwobject = None
try:
kwmodule = __import__(kw_class, globals(), locals(), [kw_name], -1)
# kwobject = eval("kwmodule." + kw + "()")
kwobject = getattr(kwmodule,kw_name)()
print 'Recognized keyword: %s' % kw_name
print 'Attributes: %s' % attributes
if not kwobject.initialize(attributes,self._activeTarget):
print "Invalid parameters"
kwobject = None
except Exception, e:
print e
print "Error: Keyword not recognized!"
kwobject = None
return kwobject
def _instantiateKeyword(self, kwName, kwAttr):
kw_dictionary = self._getKeywords()
kwproxy_dictionary = self._getKeywords({}, keywordproxy.KeywordProxy)
kwobject = None
for kw in kw_dictionary:
if kw.lower() == kwName.lower():
kwobject = self.__instantiateKeywordObject(kw,kwAttr,kw_dictionary[kw][0])
break
else:
for kwproxy in kwproxy_dictionary:
kwobject = self.__instantiateKeywordProxyObject(kwproxy, kwName,kwAttr,kwproxy_dictionary[kwproxy][0])
if kwobject:
break
if not kwobject:
print "Error: Keyword not recognized!"
return kwobject
def __executeKeyword(self, kw):
"""
Executes a single keyword.
Searches a corresponding keyword object from the list of keywords and executes the keyword with that object.
@type kw: string
@param kw: executed keyword
@rtype: boolean or string
@return: True if execution was succesfull; False if execution was succesdfull, but the keyword returned False;
Error if there was problems in the execution.
"""
print ""
print "Executing keyword: %s" % kw
#Which keyword
result = False
kw = kw.strip()
| if kw | .startswith("kw_"):
kw = kw[3:].strip()
# Testengine-note: generate-taskswitcher uses space as separator
if kw.startswith("LaunchApp") or kw.startswith("SetTarget"):
if not (kw.startswith("LaunchApp#") or kw.startswith("SetTarget#")):
kw = kw.replace(" ",self._separator,1)
kw_split = kw.split(self._separator,1)
kwName = kw_split[0].strip()
if len(kw_split) == 2:
kwAttr = kw_split[1].strip()
else:
kwAttr = ""
#Changing target
if kwName.lower() == "settarget":
result = self.__setTarget(kwAttr)
print 'result: %s' % str(result)
return result
kwobject = self._instantiateKeyword(kwName,kwAttr)
if not kwobject:
return "ERROR"
startTime = datetime.datetime.now()
result = kwobject.execute()
execTime = datetime.datetime.now() - startTime
print 'result: %s' % str(result)
kwDelay = kwobject.delay
if kwDelay != -1:
if self.delay > kwDelay:
kwDelay = self.delay
time.sleep(kwDelay)
if self._logger:
self._logger.logKeyword(self._activeTarget, kwobject, result, str(execTime))
self.kwCount = self._kwCount + 1
return result
def _handleSpecialCommands(self,command):
return False
def runInteractive(self):
"""
Runs an interactive test.
Keywords are read from stdin.
"""
# Only import here, so that we can use completion mechanism
# Readline only available in unix
try:
import readline
kws = self._getKeywords({}, keyword.Keyword).keys()
for command_list in self._commands.values():
kws.extend(command_list)
readline.set_completer(AdapterCompleter(kws).complete)
readline.parse_and_bind('tab: complete')
except:
pas |
import six
import requests
from bugwarrior.config import die
from bugwarrior.services import Issue, IssueService, ServiceClient
import logging
log = logging.getLogger(__name__)
class TeamLabClient(ServiceClient):
def __init__(self, hostname, verbose=False):
self.hostname = hostname
self.verbose = verbose
self.token = None
def authenticate(self, login, password):
resp = self.call_api("/api/1.0/authentication.json", post={
"userName": six.text_type(login),
"password": six.text_type(password),
})
self.token = six.text_type(resp["token"])
def get_task_list(self):
resp = self.call_api("/api/1.0/project/task/@self.json")
return resp
def call_api(self, uri, post=None, params=None):
uri = "http://" + self.hostname + uri
kwargs = {'params': params}
if self.token:
kwargs['headers'] = {'Authorization': self.token}
response = (requests.post(uri, data=post, **kwargs) if post
else requests.get(uri, **kwargs))
return self.json_response(response)
class TeamLabIssue(Issue):
URL = 'teamlaburl'
FOREIGN_ID = 'teamlabid'
TITLE = 'teamlabtitle'
PROJECTOWNER_ID = 'teamlabprojectownerid'
UDAS = {
URL: {
'type | ': 'string',
'label': 'Teamlab | URL',
},
FOREIGN_ID: {
'type': 'string',
'label': 'Teamlab ID',
},
TITLE: {
'type': 'string',
'label': 'Teamlab Title',
},
PROJECTOWNER_ID: {
'type': 'string',
'label': 'Teamlab ProjectOwner ID',
}
}
UNIQUE_KEY = (URL, )
def to_taskwarrior(self):
return {
'project': self.get_project(),
'priority': self.get_priority(),
self.TITLE: self.record['title'],
self.FOREIGN_ID: self.record['id'],
self.URL: self.get_issue_url(),
self.PROJECTOWNER_ID: self.record['projectOwner']['id'],
}
def get_default_description(self):
return self.build_default_description(
title=self.record['title'],
url=self.get_processed_url(self.get_issue_url()),
number=self.record['id'],
cls='issue',
)
def get_project(self):
return self.origin['project_name']
def get_issue_url(self):
return "http://%s/products/projects/tasks.aspx?prjID=%d&id=%d" % (
self.origin['hostname'],
self.record["projectOwner"]["id"],
self.record["id"]
)
def get_priority(self):
if self.record.get("priority") == 1:
return "H"
return self.origin['default_priority']
class TeamLabService(IssueService):
ISSUE_CLASS = TeamLabIssue
CONFIG_PREFIX = 'teamlab'
def __init__(self, *args, **kw):
super(TeamLabService, self).__init__(*args, **kw)
self.hostname = self.config_get('hostname')
_login = self.config_get('login')
_password = self.config_get_password('password', _login)
self.client = TeamLabClient(self.hostname)
self.client.authenticate(_login, _password)
self.project_name = self.config_get_default(
'project_name', self.hostname
)
@classmethod
def get_keyring_service(cls, config, section):
login = config.get(section, cls._get_key('login'))
hostname = config.get(section, cls._get_key('hostname'))
return "teamlab://%s@%s" % (login, hostname)
def get_service_metadata(self):
return {
'hostname': self.hostname,
'project_name': self.project_name,
}
@classmethod
def validate_config(cls, config, target):
for k in ('teamlab.login', 'teamlab.password', 'teamlab.hostname'):
if not config.has_option(target, k):
die("[%s] has no '%s'" % (target, k))
IssueService.validate_config(config, target)
def issues(self):
issues = self.client.get_task_list()
log.debug(" Remote has %i total issues.", len(issues))
# Filter out closed tasks.
issues = [i for i in issues if i["status"] == 1]
log.debug(" Remote has %i active issues.", len(issues))
for issue in issues:
yield self.get_issue_for_record(issue)
|
# -*- coding: utf-8 -*-
from gluon import *
from s3 import *
# =============================================================================
class S3MainMenuOuterLayout(S3NavigationItem):
"""
Main Menu Outer Layout for a Bootstrap-based theme
"""
@staticmethod
def layout(item):
""" Custom Layout Method """
# Menu Items
items = item.render_components()
# When the screen width is reduced, show a button to open the menu
attr = {"_data-toggle": "collapse",
"_data-target": ".nav-collapse",
}
button = BUTTON(SPAN(_class="icon-bar"),
SPAN(_class="icon-bar"),
SPAN(_class="icon-bar"),
_type="button",
_class="btn btn-navbar",
**attr
)
return DIV(DIV(DIV(button,
DIV(items,
_class="nav-collapse collapse"
),
_class="container"),
_class="navbar-inner"),
_class="navbar navbar-fixed-top")
# -----------------------------------------------------------------------------
# Shortcut
MMO = S3MainMenuOuterLayout
# =============================================================================
class S3MainMenuLayout(S3NavigationItem):
"""
Main Menu Layout for a Bootstrap-based theme
"""
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
if isinstance(item.parent, S3MainMenuOuterLayout):
# The main menu
items = item.render_components()
if item.opts.right:
_class = "nav pull-right"
else:
_class = "nav"
return UL(items, _class=_class)
else:
label = XML(" %s" % item.label)
if item.components:
# A submenu
items = item.render_components()
anch = {"data-toggle": "dropdown"}
attr = {"aria-labelledby": item.attr._id}
return LI([A([I(_class=item.opts.icon),
label, B(_class="caret")],
_href=item.url(),
_id=item.attr._id,
_class="dropdown-toggle disabled top-level",
**anch),
UL(items,
_class="dropdown-menu",
_role="menu",
**attr)],
_class="dropdown")
elif item.parent.parent is None:
# A top-level item
return LI(A([I(_class=item.opts.icon), label],
_href=item.url()))
else:
# A menu item
return LI(A([I(_class=item.opts.icon), label],
_href=item.url(),
_tabindex='-1',
_role="menuitem"))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MM = S3MainMenuLayout
# =============================================================================
class S3HomeMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
# @ToDo: Move image to CSS?
home_menu = LI(A(IMG(_src=URL(c="static", f="img",
args="sahanalarge_14.png"),
_alt="Sahana"),
_class="brand",
_href=URL(c="default", f="index"),
))
return home_menu
# -----------------------------------------------------------------------------
# Shortcut
HM = S3HomeMenuLayout
# ============================================ | =================================
class S3MenuDividerLayout(S3NavigationItem):
@staticmethod
def layout(item):
return LI(_class="divider")
# ------------------------------ | -----------------------------------------------
# Shortcut
SEP = S3MenuDividerLayout
# =============================================================================
#class S3MenuEmptyLayout(S3NavigationItem):
#
# @staticmethod
# def layout(item):
#
# items = item.render_components()
# return TAG[""](items)
# -----------------------------------------------------------------------------
# Shortcut
#EMPTY = S3MenuEmptyLayout
# END =========================================================================
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import logging
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.to import UserDetailsTO
from plugins.tff_backend.bizz.global_stats import ApiCallException
from plugins.tff_backend.bizz.iyo.utils import get_username
from plugins.tff_backend.models.document import Document, DocumentType
from plugins.tff_backend.models.hoster import NodeOrder
from plugins.tff_backend.models.investor import InvestmentAgreement
from plugins.tff_backend.to.user import SignedDocumentTO
@returns([SignedDocumentTO])
@arguments(params=dict, user_detail=UserDetailsTO)
def api_list_documents(params, user_detail):
try:
username = get_username(user_detail)
orders = NodeOrder.list_by_user(username).fetch_async()
agreements = InvestmentAgreement.list_by_user(username).fetch_async()
documents = Document.list_by_username(username).fetch_async()
results = []
for order in orders.get_result(): # type: NodeOrder
results.append(SignedDocumentTO(description=u'Terms and conditions for ordering a Zero-Node',
signature=order.signature,
name=u'Zero-Node order %s' % order.id,
link=order.document_url))
for agreement in agreements.get_result(): # type: InvestmentAgreement
results.append(SignedDocumentTO(description=u'Internal token offering - Investment Agreement',
signature=agreement.signature,
name=u'Investment agreement %s' % agreement.id,
link=agreement.document_url))
for document in documents.get_result(): # type: Document
if document.type == DocumentType.TOKEN_VALUE_ADDENDUM:
description = u"""After much feedback from the blockchain and cryptocurrency community, we have adjusted the price of the iTFT from USD $5.00 to
USD $0.05. This means for the Purchase | Amount previously outlined in | your Purchase Agreement(s), you will receive more tokens."""
results.append(SignedDocumentTO(description=description,
signature=document.signature,
name=u'ITFT Price Adjustment %s' % document.id,
link=document.url))
return results
except:
logging.error('Failed to list documents', exc_info=True)
raise ApiCallException(u'Could not load ThreeFold documents. Please try again later.')
|
import logging
import sys
import xmlrpc.client as xmlrpcclient
import pytest
from cobbler.utils import local_get_cobbler_api_url, get_shared_secret
# "import xmlrpc.client" does currently not work. No explanation found anywhere.
def pytest_addoption(parser):
parser.addoption("-E", action="store", metavar="NAME", help="only run tests matching the environment NAME.")
def pytest_configure(config):
# register an additional marker
config.addinivalue_line("markers", "env(name): mark test to run only | on named environment")
@pytest.fixture(scope="session")
def remote(cobbler_xmlrpc_base):
| """
:param cobbler_xmlrpc_base:
:return:
"""
return cobbler_xmlrpc_base[0]
@pytest.fixture(scope="session")
def token(cobbler_xmlrpc_base):
"""
:param cobbler_xmlrpc_base:
:return:
"""
return cobbler_xmlrpc_base[1]
@pytest.fixture(scope="session")
def cobbler_xmlrpc_base():
"""
Initialises the api object and makes it available to the test.
"""
# create logger
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("xobbler_xmlrpc_base")
logger.setLevel(logging.DEBUG)
# create XML-RPC client and connect to server
api_url = local_get_cobbler_api_url()
remote = xmlrpcclient.Server(api_url, allow_none=True)
shared_secret = get_shared_secret()
token = remote.login("", shared_secret)
if not token:
sys.exit(1)
yield (remote, token)
|
. See ``canvas.context.config`` for possible
options.
shared : Canvas | GLContext | None
An existing canvas or context to share OpenGL objects with.
keys : str | dict | None
Default key mapping to use. If 'interactive', escape and F11 will
close the canvas and toggle full-screen mode, respectively.
If dict, maps keys to functions. If dict values are strings,
they are assumed to be ``Canvas`` methods, otherwise they should
be callable.
parent : widget-object
The parent widget if this makes sense for the used backend.
dpi : float | None
Resolution in dots-per-inch to use for the canvas. If dpi is None,
then the value will be determined by querying the global config first,
and then the operating system.
always_on_top : bool
If True, try to create the window in always-on-top mode.
px_scale : int > 0
A scale factor to apply between logical and physical pixels in addition
to the actual scale factor determined by the backend. This option
allows the scale factor to be adjusted for testing.
Notes
-----
The `Canvas` receives the following events:
* initialize
* resize
* draw
* mouse_press
* mouse_release
* mouse_double_click
* mouse_move
* mouse_wheel
* key_press
* key_release
* stylus
* touch
* close
The ordering of the mouse_double_click, mouse_press, and mouse_release
events are not guaranteed to be consistent between backends. Only certain
backends natively support double-clicking (currently Qt and WX); on other
backends, they are detected manually with a fixed time delay.
This can cause problems with accessibility, as increasing the OS detection
time or using a dedicated double-click button will not be respected.
"""
def __init__(self, title='VisPy canvas', size=(800, 600), position=None,
show=False, autoswap=True, app=None, create_native=True,
vsync=False, resizable=True, decorate=True, fullscreen=False,
config=None, shared=None, keys=None, parent=None, dpi=None,
always_on_top=False, px_scale=1):
size = tuple(int(s) * px_scale for s in size)
if len(size) != 2:
raise ValueError('size must be a 2-element list')
title = str(title)
if not isinstance(fullscreen, (bool, int)):
raise TypeError('fullscreen must be bool or int')
# Initialize some values
self._autoswap = autoswap
self._title = title
self._frame_count = 0
self._fps = 0
self._basetime = time()
self._fps_callback = None
self._backend = None
self._closed = False
self._fps_window = 0.
self._px_scale = int(px_scale)
if dpi is None:
dpi = util_config['dpi']
if dpi is None:
dpi = get_dpi(raise_error=False)
self.dpi = dpi
# Create events
self.events = EmitterGroup(source=self,
initialize=Event,
resize=ResizeEvent,
draw=DrawEvent,
mouse_press=MouseEvent,
mouse_release=MouseEvent,
mouse_double_click=MouseEvent,
mouse_move=MouseEvent,
mouse_wheel=MouseEvent,
key_press=KeyEvent,
key_release=KeyEvent,
stylus=Event,
touch=Event,
close=Event)
# Deprecated paint emitter
emitter = WarningEmitter('Canvas.events.paint and Canvas.on_paint are '
'deprecated; use Canvas.events.draw and '
'Canvas.on_draw instead.',
source=self, type='draw',
event_class=DrawEvent)
self.events.add(paint=emitter)
self.events.draw.connect(self.events.paint)
# Get app instance
if app is None:
self._app = use_app(call_reuse=False)
elif isinstance(app, Application):
self._app = app
elif isinstance(app, string_types):
self._app = Application(app)
else:
raise ValueError('Invalid value for app %r' % app)
# Check shared and context
if shared is None:
pass
elif isinstance(shared, Canvas):
shared = shared.context.shared
elif isinstance(shared, GLContext):
shared = shared.shared
else:
raise TypeError('shared must be a Canvas, not %s' % type(shared))
config = config or {}
if not isinstance(config, dict):
raise TypeError('config must be a dict, not %s' % type(config))
# Create new context
self._context = GLContext(config, shared)
# Deal with special keys
self._set_keys(keys)
# store arguments that get set on Canvas init
kwargs = dict(title=title, size=size, position=position, show=show,
vsync=vsync, resizable=resizable, decorate=decorate,
fullscreen=fullscreen, context=self._context,
parent=parent, always_on_top=always_on_top)
self._backend_kwargs = kwargs
# Create widget now (always do this *last*, after all err checks)
if create_native:
self.create_native()
# Now we're ready to become current
self.set_current()
if '--vispy-fps' in sys.argv:
self.measure_fps()
def create_native(self):
""" Create the native widget if not already done so. If the widget
is already created, this function does nothing.
"""
if self._backend is not None:
return
# Make sure that the app is active
assert self._app.native
# Instantiate the backend with the right class
self._app.backend_module.CanvasBackend(self, **self._backend_kwargs)
# self._backend = set by BaseCanvasBackend
self._backend_kwargs = None # Clean up
# Connect to draw event (append to the end)
# Process GLIR commands at each paint event
self.events.draw.connect(self.context.flush_commands, position='last')
if self._autoswap:
self.events.draw.connect((self, 'swap_buffers'),
ref=True, position='last')
def _set_keys(self, keys):
if keys is not None:
if isinstance(keys, string_types):
if keys != 'interactive':
raise ValueError('keys, if string, must be "interactive", '
'not %s' % (keys,))
def toggle_fs():
self.fullscreen = not self.fullscreen
keys = dict(escape='close', F11=toggle_fs)
else:
keys = {}
if not isinstance(keys, dict):
raise TypeError('keys must be a dict, str, or None')
if len(keys) > 0:
# ensure all are callable
for key, val in keys.items():
| if isinstance(val, string_types):
new_val = getattr(self, val, None)
if new_val is None:
raise ValueError('value %s is not an attribute of '
'Canvas' % val)
val = new_val
if not hasattr(val, '__call__'):
ra | ise TypeError('Entry for key %s is not callable' % key)
# convert to lower-case representation
keys.pop(key)
keys[key.lower()] = val
self._keys_check = keys
def keys_check(event):
if event.key is not None:
|
# System imports
import itertools
import concurrent.futures
# Third party imports
import requests
class Client:
"""
Used for sending commands t | o one or more ID | A containers over HTTP.
"""
def __init__(self, urls):
"""
>>> client = Client(['http://host-1:4001', 'http://host-2:4001'])
:param urls: List of addresses of IDA containers including the published port
"""
if urls is None or not any(urls):
raise ValueError('Invalide "urls" value')
self._urls = itertools.cycle(urls)
def send_command(self, command, timeout=None):
"""
Send a command to an IDA container via HTTP
:param command: The command to send, should start with idal or idal64
:param timeout: A timeout given for the command (optional)
:returns True if the command ran successfully, else false
"""
data_to_send = dict(command=command)
if timeout is not None:
data_to_send['timeout'] = timeout
response = requests.post('%s/ida/command' % next(self._urls), data=data_to_send)
return response.status_code == 200
def send_multiple_commands(self, commands, timeout=None, num_of_threads=4):
"""
Send a batch of commands asynchronously to an IDA container via HTTP
:param commands: An iterable of commands to send to the container
:param timeout: A timeout given for the command (optional)
:returns A dictionary where the key is the command and the value is True if succeeded, else false
"""
results = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=num_of_threads) as executor:
future_responses = {executor.submit(self.send_command, command, timeout): command for command in commands}
for response in concurrent.futures.as_completed(future_responses):
command = future_responses[response]
try:
results[command] = response.result()
except Exception as ex:
print('An exception occurred in command %s, The exception was %s' % (command, str(ex)))
return results
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='sfark-extractor',
version='0.1',
description='sfArk decompressor to sf2',
long_description=long_description,
# The project's main homepage.
url='https//github.com/maltouzes/sfark-extractor',
# Author details
author='Tony Maillefaud',
author_email='maltouzes@gmail.com',
# Choose your license
license='GPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this proje | ct? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
"License | :: OSI Approved :: GNU General Public License v3 (GPLv3)",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
"Operating System :: POSIX :: Linux",
"Natural Language :: English",
"Topic :: Sound",
],
# What does your project relate to?
keywords='sfArk to sf2 soundfont',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import io
import requests
import subprocess
import sys
DEFAULT_GLOBAL_FAUCET = 'https://signetfaucet.com/claim'
DEFAULT_GLOBAL_CAPTCHA = 'https://signetfaucet.com/captcha'
GLOBAL_FIRST_BLOCK_HASH = '00000086d6b2636cb2a392d45edc4ec544a10024d30141c9adf4bfd9de533b53'
# braille unicode block
BASE = 0x2800
BIT_PER_PIXEL = [
[0x01, 0x08],
[0x02, 0x10],
[0x04, 0x20],
[0x40, 0x80],
]
BW = 2
BH = 4
# imagemagick or compatible fork (used for converting SVG)
CONVERT = 'convert'
class PPMImage:
'''
Load a PPM image (Pillow-ish API).
'''
def __init__(self, f):
if f.readline() != b'P6\n':
raise ValueError('Invalid ppm format: header')
line = f.readline()
(width, height) = (int(x) for x in line.rstrip().split(b' '))
if f.readline() != b'255\n':
raise ValueError('Invalid ppm format: color depth')
data = f.read(width * height * 3)
stride = width * 3
self.size = (width, height)
self._grid = [[tuple(data[stride * y + 3 * x:stride * y + 3 * (x + 1)]) for x in range(width)] for y in range(height)]
def getpixel(self, pos):
return self._grid[pos[1]][pos[0]]
def print_image(img, threshold=128):
'''Print black-and-white image to terminal in braille unicode characters.'''
x_blocks = (img.size[0] + BW - 1) // BW
y_blocks = (img.size[1] + BH - 1) // BH
for yb in range(y_blocks):
line = []
for xb in range(x_blocks):
ch = BASE
for y in range(BH):
for x in range(BW):
try:
val = img.getpixel((xb * BW + x, yb * BH + y))
except IndexError:
| pass
else:
if val[0] < threshold:
ch |= BIT_PER_PIXEL[y][x]
line.a | ppend(chr(ch))
print(''.join(line))
parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing bitcoin-cli arguments.')
parser.add_argument('-c', '--cmd', dest='cmd', default='bitcoin-cli', help='bitcoin-cli command to use')
parser.add_argument('-f', '--faucet', dest='faucet', default=DEFAULT_GLOBAL_FAUCET, help='URL of the faucet')
parser.add_argument('-g', '--captcha', dest='captcha', default=DEFAULT_GLOBAL_CAPTCHA, help='URL of the faucet captcha, or empty if no captcha is needed')
parser.add_argument('-a', '--addr', dest='addr', default='', help='Bitcoin address to which the faucet should send')
parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any')
parser.add_argument('-n', '--amount', dest='amount', default='0.001', help='Amount to request (0.001-0.1, default is 0.001)')
parser.add_argument('-i', '--imagemagick', dest='imagemagick', default=CONVERT, help='Path to imagemagick convert utility')
parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to bitcoin-cli (default: -signet)')
args = parser.parse_args()
if args.bitcoin_cli_args == []:
args.bitcoin_cli_args = ['-signet']
def bitcoin_cli(rpc_command_and_params):
argv = [args.cmd] + args.bitcoin_cli_args + rpc_command_and_params
try:
return subprocess.check_output(argv).strip().decode()
except FileNotFoundError:
print('The binary', args.cmd, 'could not be found.')
exit(1)
except subprocess.CalledProcessError:
cmdline = ' '.join(argv)
print(f'-----\nError while calling "{cmdline}" (see output above).')
exit(1)
if args.faucet.lower() == DEFAULT_GLOBAL_FAUCET:
# Get the hash of the block at height 1 of the currently active signet chain
curr_signet_hash = bitcoin_cli(['getblockhash', '1'])
if curr_signet_hash != GLOBAL_FIRST_BLOCK_HASH:
print('The global faucet cannot be used with a custom Signet network. Please use the global signet or setup your custom faucet to use this functionality.\n')
exit(1)
else:
# For custom faucets, don't request captcha by default.
if args.captcha == DEFAULT_GLOBAL_CAPTCHA:
args.captcha = ''
if args.addr == '':
# get address for receiving coins
args.addr = bitcoin_cli(['getnewaddress', 'faucet', 'bech32'])
data = {'address': args.addr, 'password': args.password, 'amount': args.amount}
# Store cookies
# for debugging: print(session.cookies.get_dict())
session = requests.Session()
if args.captcha != '': # Retrieve a captcha
try:
res = session.get(args.captcha)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit(1)
# Convert SVG image to PPM, and load it
try:
rv = subprocess.run([args.imagemagick, '-', '-depth', '8', 'ppm:-'], input=res.content, check=True, capture_output=True)
except FileNotFoundError:
print('The binary', args.imagemagick, 'could not be found. Please make sure ImageMagick (or a compatible fork) is installed and that the correct path is specified.')
exit(1)
img = PPMImage(io.BytesIO(rv.stdout))
# Terminal interaction
print_image(img)
print('Enter captcha: ', end='')
data['captcha'] = input()
try:
res = session.post(args.faucet, data=data)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit(1)
# Display the output as per the returned status code
if res:
# When the return code is in between 200 and 400 i.e. successful
print(res.text)
elif res.status_code == 404:
print('The specified faucet URL does not exist. Please check for any server issues/typo.')
elif res.status_code == 429:
print('The script does not allow for repeated transactions as the global faucet is rate-limitied to 1 request/IP/day. You can access the faucet website to get more coins manually')
else:
print(f'Returned Error Code {res.status_code}\n{res.text}\n')
print('Please check the provided arguments for their validity and/or any possible typo.')
|
#!/usr/bin/python2.4
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for grit.format.policy_templates.writers.admx_writer."""
import os
import sys
import unittest
from xml.dom import minidom
class XmlWriterBaseTest(unittest.TestCase):
'''Base class for XML writer unit-tests.
'''
def GetXMLOfChildren(self, parent):
'''Returns the XML of all child nodes of the given parent node.
Args:
pare | nt: The XML of the children of this node will be returned.
Return: XML of the chrildren of the parent node.
'''
return ''.join(
child.toprettyxml(indent=' ') for child in parent.childNodes)
def AssertXMLEquals(self, output, expected_output):
'''Asserts if the passed XML arguements are equal.
| Args:
output: Actual XML text.
expected_output: Expected XML text.
'''
self.assertEquals(output.strip(), expected_output.strip())
|
__author__ = 'olga.ostapenko'
| import pytest
from fixture.application import Application
@pytest.fixture(scope="session")
def app(request):
fixture = Application()
| request.addfinalizer(fixture.destroy)
return fixture |
import os
import threading
from cachetools import LRUCache
from customUtilities.logger import logger
class Cache():
def __init__(self,cachefilename,CACHE_SIZE,logger=logger('detoursCache.log')):
self.lock = threading.RLock()
self.cachefilename = cachefilename
self.entry = LRUCache(maxsize=CACHE_SIZE)
self.logger=logger
self.hitcount=0
def hit(self):
self.lock.acquire(blocking=1)
try:
self.hitcount+=1
finally:
self.lock.release()
def reset(self):
self.lock.acquire(blocking=1)
try:
self.hitcount=0
finally:
self.lock.release()
def push(self,key,val):
self.lock.acquire(blocking=1)
try:
self.entry[key]=val
except:
return
finally:
self.lock.release()
def get(self,key):
self.lock.acquire(blocking=1)
try:
return self.entry[key]
except:
return False
finally:
self.lock.release()
def write_to_di | sk(self):
self.lock.acquire(blocking=1)
try:
cachefile = open(self.cachefilename,'w')
for key,val in self.entry.items():
print(key+'\t'+val,file=cachefile)
cachefile.close()
finally:
self.lock.release()
def load_from_disk(self):
self.lock.acquire(blocking=1)
try:
if os.path.exists(self.cachefilename):
with open(s | elf.cachefilename, 'r') as f:
for line in f:
if line == "":
continue
rline = line.strip()
splitvals=rline.split('\t')
if len(splitvals) == 2:
key=splitvals[0]
valstr=splitvals[1]
self.entry[key]=valstr
else:
continue
except:
self.logger.error("Failed to read existing cache file")
raise("Error in loading previous cache file")
finally:
self.lock.release() |
class intersperse:
'''Generator that inserts a value before each element of an iterable
'''
def __init__(self, value, iterable):
self.value = value
self.iterable = iterable
self.return_value = True
def __iter__(self):
return self
| def __next__(self):
if self.return_value:
self.next_value = next( self.iterable )
| r = self.value
else:
r = self.next_value
self.return_value = not self.return_value
return r
|
#MenuTitle: Freeze Placeholders
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Turn placeholders in current tab into current glyphs.
"""
try:
thisFont = Glyphs.font # frontmost font
currentTab = thisFont.currentTab # current edit tab, if any
| selectedGlyph = thisFont.selectedLayers[0].parent # active layers of selected glyphs
if currentTab:
currentTab.text = currentTab.text.replace(
"/Placeholder",
"/%s"%selectedGlyph.name
)
else:
Message(
title="Cannot Freeze Placeholders",
message="You must have an edit tab open, and a glyph selected. Otherwise, | the script cannot work.",
OKButton="Got it"
)
except Exception as e:
# brings macro window to front and clears its log:
Glyphs.clearLog()
import traceback
print(traceback.format_exc())
Message(
title="Freezing Placeholders Failed",
message="An error occurred during the execution of the script. Is a font open, a glyph selected? Check the Macro Window for a detailed error message.",
OKButton=None
)
|
from typing import Any, Callable, Tuple, Union
from packed import pack, unpack
import jj
from jj import default_app, default_handler
from jj.apps import BaseApp, create_app
from jj.http.codes import BAD_REQUEST, OK
from jj.http.methods import ANY, DELETE, GET, POST
from jj.matchers import LogicalMatcher, RequestMatcher, ResolvableMatcher, exists
from jj.requests import Request
from jj.resolvers import Registry, Resolver
from jj.responses import RelayResponse, Response, StreamResponse
from ._history import HistoryRepository
from ._remote_response import RemoteResponseType
__all__ = ("Mock",)
MatcherType = Union[RequestMatcher, LogicalMatcher]
class Mock(jj.App):
def __init__(self,
app_factory: Callable[..., BaseApp] = create_app,
resolver_factory: Callable[..., Resolver] = Resolver) -> None:
self._resolver = resolver_factory(Registry(), default_app, default_handler)
self._app = app_factory(resolver=self._resolver)
self._repo = HistoryRepository()
def _decode(self, payload: bytes) -> Tuple[str, MatcherType, RemoteResponseType]:
def resolver(cls: Any, **kwargs: Any) -> Any:
return cls.__unpacked__(**kwargs, resolver=self._resolver)
decoded = unpack(payload, {ResolvableMatcher: resolver})
handler_id = decoded.get("id")
assert isinstance(handler_id, str)
matcher = decoded.get("request")
assert isinstance(matcher, (RequestMatcher, LogicalMatcher))
response = decoded.get("response")
assert isinstance(response, (Response, RelayResponse))
return handler_id, matcher, response
@jj.match(POST, headers={"x-jj-remote-mock": exists})
async def register(self, request: Request) -> Response:
payload = await request.read()
try:
handler_id, matcher, response = self._decode(payload)
except Exception:
return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST})
async def handler(request: Request) -> RemoteResponseType:
return response.copy()
self._resolver.register_attribute("handler_id", handler_id, handler)
setattr(self._app.__class__, handler_id, matcher(handler))
return Response(status=OK, json={"status": OK})
@jj.match(DELETE, headers={"x-jj-remote-mock": exists})
async def deregister(self, request: Request) -> Response:
payload = await request.read()
try:
handler_id, *_ = self._decode(payload)
except Exception:
| return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST})
try:
delattr(self._app.__class__, handler_id)
except AttributeError:
pass
await self._repo.delete_by_tag(handler_id)
return Response(status=OK, json={"status": OK})
@jj.match(GET, headers={"x-jj-remote-mock": exists})
async def history(self, request: Request) -> Response:
payload = await request.read()
try:
handler_id, *_ = self._de | code(payload)
except Exception:
return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST})
history = await self._repo.get_by_tag(handler_id)
packed = pack(history)
return Response(status=OK, body=packed)
@jj.match(ANY)
async def resolve(self, request: Request) -> StreamResponse:
handler = await self._resolver.resolve(request, self._app)
response = await handler(request)
handler_id = self._resolver.get_attribute("handler_id", handler, default=None)
if handler_id:
await self._repo.add(request, response, tags=[handler_id])
return response
|
""" Users API URI specification """
from django.conf import settings
from django.conf.urls import url
from django.db import transaction
from edx_solutions_api_integration.users import views as users_views
from rest_framework.urlpatterns import format_suffix_patterns
COURSE_ID_PATTERN = settings.COURSE_ID_PATTERN
urlpatterns = [
url(r'^metrics/cities/$', users_views.UsersMetricsCitiesList.as_view(), name='apimgr-users-metrics-cities-list'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/courses/grades$',
users_views.UsersCoursesGradesList.as_view(), name='users-courses-grades-list'),
url(
r'^(?P<user_id>[a-zA-Z0-9]+)/courses/{}/grades$'.format(COURSE_ID_PATTERN),
transaction.non_atomic_requests(users_views.UsersCoursesGradesDetail.as_view()),
name='users-courses-grades-detail'
),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/courses/{}/metrics/social/$'.format(COURSE_ID_PATTERN),
users_views.UsersSocialMetrics.as_view(), name='users-social-metrics'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/courses/{}$'.format(COURSE_ID_PATTERN),
users_views.UsersCoursesDetail.as_view(), name='users-courses-detail'),
url(
r'^(?P<user_id>[a-zA-Z0-9]+)/courses/*$',
transaction.non_atomic_requests(users_views.UsersCoursesList.as_view()),
name='users-courses-list'
),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/groups/*$', users_views.UsersGroupsList.as_view(), name='users-groups-list'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/groups/(?P<group_id>[0-9]+)$',
users_views.UsersGroupsDetail.as_view(), name='users-groups-detail'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/preferences$',
users_views.UsersPreferences.as_view(), name='users-preferences-list'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/preferences/(?P<preference_id>[a-zA-Z0-9_]+)$',
users_views.UsersPreferencesDetail.as_view(), name='users-preferences-detail'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/organizations/$',
users_views.UsersOrganizationsList.as_view(), name='users-organizations-list'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/roles/(?P<role>[a-z_]+)/courses/{}$'.format(COURSE_ID_PATTERN),
users_views.UsersRolesCoursesDetail.as_view(), name='users-roles-courses-detail'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/roles/*$', users_views.UsersRolesList.as_view(), name='users-roles-list'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/workgroups/$',
us | ers_views.UsersWorkgroupsList.as_view(), name='users-workgroups | -list'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/notifications/(?P<msg_id>[0-9]+)/$',
users_views.UsersNotificationsDetail.as_view(), name='users-notifications-detail'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)$', users_views.UsersDetail.as_view(), name='apimgr-users-detail'),
url(r'^$', users_views.UsersList.as_view(), name='apimgr-users-list'),
url(r'mass-details/$', users_views.MassUsersDetailsList.as_view(), name='apimgr-mass-users-detail'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/courses/progress',
users_views.UsersCourseProgressList.as_view(), name='users-courses-progress'),
url(r'^integration-test-users/$', users_views.UsersListWithEnrollment.as_view(), name='integration-test-users'),
url(r'^(?P<user_id>[a-zA-Z0-9]+)/attributes/',
users_views.ClientSpecificAttributesView.as_view(), name='users-attributes'),
url(r'validate-token/$', users_views.TokenBasedUserDetails.as_view(),
name='validate-bearer-token'),
url(r'anonymous_id/$', users_views.UsersAnonymousId.as_view(),
name='user-anonymous-id'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
"""The 1-Wire component."""
import asyncio
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN, PLATFORMS
from .onewirehub import CannotConnect, OneWireHub
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up 1-Wire integrations."""
return True
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry):
"""Set up a 1-Wire proxy for a config entry."""
hass.data.setdefault(DOMAIN, {})
onewirehub = OneWireHub(hass)
try:
await onewirehub.initialize(config_entry)
except CannotConnect as exc:
raise ConfigEntryNotReady() from exc
hass.data[DOMAIN][config_entry.unique_id] = onewirehub
async def cleanup_registry() -> None:
# Get registries
device_registry, entity_registry = await asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
# Generate list of all device entries
registry_devices = [
entry.id
for entry in dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
]
# Remove devices that don't belong to any entity
for device_id in registry_devices:
if not er.async_entries_for_device(
entity_registry, device_id, include_disabled_entities=True
):
_LOGGER.debug(
"Removing device `%s` because it does not have any entities",
device_id,
)
device_registry.async_remove_device(device_id)
async def start_platforms() -> None:
"""Start platforms and cleanup devices."""
# wait until all required platforms are ready
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_setup(config_entry, platform)
for platform in PLATFORMS
]
| )
await cleanup_registry()
hass.async_create_task(start_platforms())
return True
async | def async_unload_entry(hass: HomeAssistantType, config_entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.unique_id)
return unload_ok
|
from ajenti.api import *
from api | import PackageInfo, PackageManager
@plugin
@rootcontext
@persistent
class BSDPackageManager (PackageManager | ):
platforms = ['freebsd']
def init(self):
self.upgradeable = []
def get_lists(self):
pass
def refresh(self):
pass
def search(self, query):
return []
def do(self, actions):
pass
|
"""This is a really rough implementation but demonstrates the
core ideas."""
import os
import unittest
try:
import maya
ISMAYA = True
except ImportError:
maya, ISMAYA = None, False
from mayaserver.client import start_process, create_client, sendrecv
class MayaTestCase(unittest.TestCase):
def _setUp(self):
cls = self.__class__
if hasattr(cls, '_setupRan'):
return
cls.reqport = start_process()
cls.reqsock = create_client(cls.reqport)
appendstr = 'import sys; sys.path.append(%r)' % (
os.path.dirname(__file__))
sendrecv(cls.reqsock, ('exec', appendstr))
cls.testmodule = cls.__module__
cls.testalias = cls.testmodule.replace('.', '_')
impstr = 'import %s as %s' % (cls.testmodule, cls.testalias)
sendrecv(cls.reqsock, ('exec', impstr))
MayaTestCase._setupRan = True
def run(self, result=None):
if ISMAYA:
unittest.TestCase.run(self, result)
return
def wrappedTest():
self.__testMethodName = self._testMethodName
try:
self._wrappedTest()
finally:
self._testMethodName = self.__testMethodName
self.setUp = lambda: None
self.tearDown = lambda: None
self._setUp()
setattr(self, self._testMethodName, wrappedTest)
unittest.TestCase.run(self, result)
def _wrappedTest(self):
strargs = dict(testmodule=self.testalias,
testcase=self.__class__.__name__,
| testfunc=self._testMethodName)
teststr = """tc = {testmodule}.{testcase}("{testfunc}")
try:
tc.setUp()
tc.{testfunc}()
finally:
tc.tearDown()""".format(**strargs)
try:
sendrecv(self.reqsock, ('exec', testst | r))
except RuntimeError as ex:
if 'AssertionError' in str(ex):
raise AssertionError(*ex.args)
raise
|
#!/usr/b | in/env python
class CapOpen(object):
def __init__(self, fn, mode='r', buf=-1):
self.file = open(fn, mode, buf)
def __str__(self):
return str(self.file)
de | f __repr__(self):
return `self.file`
def write(self, line):
return self.file.write(line.upper())
def __getattr__(self, attr):
return getattr(self.file, attr)
|
= df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 0, 0, 0, 1], dtype=float)
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actua | l, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transf | orm(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dr |
import renderdoc as rd
import rdtest
class VK_Robustness2(rdtest.TestCase):
demos_test_name = 'VK_Robustness2'
def check_capture(self):
action: rd.ActionDescription = self.find_action('vkCmdDraw')
self.controller.SetFrameEvent(action.eventId, True)
self.check_triangle()
rdtest.log.success('Triangle is rendered correctly')
vsin_ref = {
0: {
'vtx': 0,
'idx': 0,
'Position': [-0.5, -0.5, 0.0],
'Color': None,
'UV': None,
},
1: {
'vtx': 1,
'idx': 1,
'Position': [0.0, 0.5, 0.0],
'Color': None,
'UV': None,
},
2: {
'vtx': 2,
'idx': 2,
'Position': [0.5, -0.5, 0.0],
'Color': None,
'UV': None,
},
}
self.check_mesh_data(vsin_ref, self.get_vsin(action))
rdtest.log.success('Mesh input data is correct, including unbound VB')
postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, action.numIndices)
postvs_ref = {
0: {
'vtx': 0,
'idx': 0,
'gl_PerVertex_var.gl_Position': [-0.5, 0.5, 0.0, 1.0],
'vertOut.pos': [-0.5, 0.5, 0.0, 1.0],
'vertOut.col': [0.0, 0.0, 0.0, 0.0],
'vertOut.uv': [0.0, 0.0, 0.0, 1.0],
},
1: {
'vtx': 1,
'idx': 1,
'gl_PerVertex_var.gl_Position': [0.0, -0.5, 0.0, 1.0],
'vertOut.pos': [0.0, -0.5, 0.0, 1.0],
'vertOut.col': [0.0, 0.0, 0.0, 0.0],
'vertOut.uv': [0.0, 0.0, 0.0, 1.0],
},
2: {
'vtx': 2,
'idx': 2,
'gl_PerVertex_var.gl_P | osition': [0.5, 0.5, 0.0, 1.0],
'vertOut.pos': [0.5, 0.5, 0.0, 1.0],
'vertOut.col': [0.0, 0.0, 0.0, 0.0],
'vertOut.uv': [0 | .0, 0.0, 0.0, 1.0],
},
}
self.check_mesh_data(postvs_ref, postvs_data)
rdtest.log.success('Mesh output data is correct, including unbound VB')
pipe = self.controller.GetPipelineState()
refl = pipe.GetShaderReflection(rd.ShaderStage.Fragment)
mapping = pipe.GetBindpointMapping(rd.ShaderStage.Fragment)
for i, cb in enumerate(refl.constantBlocks):
cbuf = pipe.GetConstantBuffer(rd.ShaderStage.Fragment, i, 0)
var_check = rdtest.ConstantBufferChecker(
self.controller.GetCBufferVariableContents(pipe.GetGraphicsPipelineObject(),
pipe.GetShader(rd.ShaderStage.Fragment), rd.ShaderStage.Fragment, refl.entryPoint, i,
cbuf.resourceId, cbuf.byteOffset, cbuf.byteSize))
if cb.bufferBacked:
var_check.check('data').type(rd.VarType.Float).rows(1).cols(4).value([0.0, 0.0, 0.0, 0.0])
else:
val = [0, 0, 0, 0]
if self.find_action('robustBufferAccess2') is not None:
val[2] = 1000000
if self.find_action('robustImageAccess2') is not None:
val[0] = val[1] = 1000000
var_check.check('coord').type(rd.VarType.SInt).rows(1).cols(4).value(val)
rdtest.log.success('CBuffer {} at bindpoint {}.{}[0] contains the correct contents'
.format(cb.name, mapping.constantBlocks[i].bindset, mapping.constantBlocks[i].bind))
|
'''Simple utility functions that should really be in a C module'''
from math import *
from OpenGLContext.arrays import *
from OpenGLContext import vectorutilities
def rotMatrix( (x,y,z,a) ):
"""Given rotation as x,y,z,a (a in radians), return rotation matrix
Returns a 4x4 rotation matrix for the given rotation,
the matrix is a Numeric Python array.
x,y,z should be a unit vector.
"""
c = cos( a )
s = sin( a )
t = 1-c
R = array( [
[ t*x*x+c, t*x*y+s*z, t*x*z-s*y, 0],
[ t*x*y-s*z, t*y*y+c, t*y*z+s*x, 0],
[ t*x*z+s*y, t*y*z-s*x, t*z*z+c, 0],
[ 0, 0, 0, 1]
] )
return R
def crossProduct( first, second ):
"""Given 2 4-item vectors, return the cross product as a 4-item vector"""
x,y,z = vectorutilities.crossProduct( first, second )[0]
return [x,y,z,0]
def magnitude( vector ):
"""Given a 3 or 4-item vector, return the vector's magnitude"""
return vectorutilities.magnitude( vector[:3] )[0]
def normalise( vector ):
"""Given a 3 or 4-item vector, return a 3-item unit vector"""
return vectorutilities.normalise( vector[:3] )[0]
def pointNormal2Plane( point, normal ):
"""Create parametric equation of plane from point and normal
"""
point = asarray(point,'f')
normal = normalise(normal)
result = zeros((4,),'f')
result[:3] = normal
result[3] = - dot(normal, point)
return result
def plane2PointNormal( (a,b,c,d) ):
"""Get a point and normal from a plane equation"""
return asarray((-d*a,-d*b,-d*c),'f'), asarray((a,b,c),'f')
def combineNormals( normals, weights=None ):
"""Given set of N normals, return (weighted) combination"""
normals = asarray( normals,'d')
if weights:
weights = reshape(asarray( weights, 'f'),(len(weights),1))
final = sum(normals*weights, 0)
else:
final = sum(normals,0)
x,y,z = final
if x == y == z == 0.0:
| x,y,z = normals[0]
if x or y:
x,y,z = -x,-y,z
else:
x,y,z = -x,y,-z
return normalise( (x,y,z) )
def coplanar( points ):
"""Determine if points are coplanar
All sets of points < 4 are | coplanar
Otherwise, take the first two points and create vector
for all other points, take vector to second point,
calculate cross-product where the cross-product is
non-zero (not colinear), if the normalised cross-product
is all equal, the points are collinear...
"""
points = asarray( points, 'f' )
if len(points) < 4:
return True
a,b = points[:2]
vec1 = reshape(b-a,(1,3))
rest = points[2:] - b
vecs = vectorutilities.crossProduct(
rest,
vec1,
)
vecsNonZero = sometrue(vecs,1)
vecs = compress(vecsNonZero, vecs,0)
if not len(vecs):
return True
vecs = vectorutilities.normalise(vecs)
return allclose( vecs[0], vecs ) |
"""
sentry.client.celery.tasks
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more de | tails.
:license: BSD, see LICENSE for more details.
"""
from celery.decorators import task
from sentry.client.base import SentryClient
from sentry.client.celery import conf
@task(routing_key=conf.C | ELERY_ROUTING_KEY)
def send(data):
return SentryClient().send(**data)
|
# Note that this is not a valid measurement of tail latency. This uses the execution times we measure because they're convenient, but this does not include queueing time inside BitFunnel nor does it include head-of-line blocking queue waiting time on the queue into BitFunnel.
import csv
filename = "/tmp/QueryPipel | ineStatistics.csv"
times = []
with open(filename) as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'q | uadwords',
'cachelines',
'parse',
'plan',
'match']
for row in reader:
total_time = float(row[-1]) + float(row[-2]) + float(row[-3])
times.append(total_time)
times.sort(reverse=True)
idx_max = len(times) - 1
idx = [round(idx_max / 2),
round(idx_max / 10),
round(idx_max / 100),
round(idx_max / 1000),
0]
tails = [times[x] for x in idx]
print(tails)
|
#!/usr/bin/env python
from agate.aggregations.has_nulls import HasNulls
from agate.computations.base import Computation
from agate.data_types import Date, DateTime, Number, TimeDelta
from agate.exceptions import DataTypeError
from agate.warns import warn_null_calculation
class Change(Computation):
"""
Calculate the difference between two columns.
This calculation can be applied to :class:`.Number` columns to calculate
numbers. It can also be applied to :class:`.Date`, :class:`.DateTime`, and
:class:`.TimeDelta` columns to calculate time deltas.
:param before_column_name:
The name of a column containing the "before" values.
:param after_column_name:
The name of a column containing the "after" values.
"""
def __init__(self, before_column_name, after_column_name):
self._before_column_name = before_column_name
self._after_column_name = after_column_name
def get_computed_data_type(self, table):
before_column = table.columns[self._before_column_name]
if isinstance(before_column.data_type, Date):
return TimeDelta()
elif isinstance(before_column.data_type, DateTime):
return TimeDelta()
elif isinstance(before_column.data_type, TimeDelta):
return TimeDelta()
elif isinstance(before_column.data_type, Number):
return Number()
def validate(self, table):
before_column = table.columns[self._before_column_name]
after_column = table.columns[self._after_column_name]
for data_type in (Number, Date, DateTime, TimeDelta):
if isinstance(before_column.data_type, data_type):
if not isinstance(after_column.data_type, data_type):
raise DataTypeError('Specified columns must be of the same type')
if HasNulls(self._before_column_name).run(table):
warn_null_calculation(self, before_column)
if HasNulls(self._after_column_name).run(table):
warn_null_calculation(self, after_column)
return
raise DataTypeError('Change before and after columns must both contain data that is one of: Number, Date, DateTime or TimeDelta.')
| def run | (self, table):
new_column = []
for row in table.rows:
before = row[self._before_column_name]
after = row[self._after_column_name]
if before is not None and after is not None:
new_column.append(after - before)
else:
new_column.append(None)
return new_column
|
# Copyright 2017 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
| # limitations under the | License.
"""Base application-platform build configuration.
If applications wish to customize or extend build configuration information by
platform, they should add an <application-name>/configuration.py file to the
port directory of the platform they wish to customize
(e.g. linux/x64x11/cobalt/configuration.py for the `cobalt` application and the
`linux-x64x11` platform). This module should contain an class that extends the
class defined here.
"""
import os
class ApplicationConfiguration(object):
"""Base build configuration class for all Starboard applications.
Should be derived by application configurations.
"""
def __init__(self, platform_configuration, application_name,
application_directory):
"""Initialize ApplicationConfiguration.
Args:
platform_configuration: An instance of StarboardBuildConfiguration for the
platform being built.
application_name: The name of the application that is loading this
configuration.
application_directory: The absolute path of the directory containing the
application configuration being loaded.
"""
self._platform_configuration = platform_configuration
self._application_name = application_name
self._application_directory = application_directory
def GetName(self):
"""Gets the application name."""
return self._application_name
def GetDirectory(self):
"""Gets the directory of the application configuration."""
return self._application_directory
def GetPreIncludes(self):
"""Get a list of absolute paths to gypi files to include in order.
These files will be included by GYP before any processed .gyp file. The
files returned by this function will be included by GYP before any other
gypi files.
Returns:
An ordered list containing absolute paths to .gypi files.
"""
return []
def GetPostIncludes(self):
"""Get a list of absolute paths to gypi files to include in order.
These files will be included by GYP before any processed .gyp file. The
files return by this function will be included by GYP after any other gypi
files.
Returns:
An ordered list containing absolute paths to .gypi files.
"""
standard_gypi_path = os.path.join(self.GetDirectory(), 'configuration.gypi')
if os.path.isfile(standard_gypi_path):
return [standard_gypi_path]
return []
def GetEnvironmentVariables(self):
"""Get a Mapping of environment variable overrides.
The environment variables returned by this function are set before calling
GYP or GN and can be used to manipulate their behavior. They will override
any environment variables of the same name in the calling environment, or
any that are set by default or by the platform.
Returns:
A dictionary containing environment variables.
"""
return {}
def GetVariables(self, config_name):
"""Get a Mapping of GYP/GN variable overrides.
Get a Mapping of GYP/GN variable names to values. Any defined values will
override any values defined by default or by the platform. GYP or GN files
can then have conditionals that check the values of these variables.
Args:
config_name: The name of the starboard.tools.config build type.
Returns:
A Mapping of GYP/GN variables to be merged into the global Mapping
provided to GYP/GN.
"""
del config_name
return {}
def GetGeneratorVariables(self, config_name):
"""Get a Mapping of generator variable overrides.
Args:
config_name: The name of the starboard.tools.config build type.
Returns:
A Mapping of generator variable names and values.
"""
del config_name
return {}
def GetTestEnvVariables(self):
"""Gets a dict of environment variables needed by unit test binaries."""
return {}
def GetTestFilters(self):
"""Gets all tests to be excluded from a unit test run.
Returns:
A list of initialized starboard.tools.testing.TestFilter objects.
"""
return []
def GetTestTargets(self):
"""Gets all tests to be run in a unit test run.
Returns:
A list of strings of test target names.
"""
return []
def GetDefaultTargetBuildFile(self):
"""Gets the build file to build by default."""
return None
|
class Dynamics:
"""
a convenience class containing some dynamics
"""
ppppp = 10
pppp = 20
ppp = 30
pp = 40
p = 60
mp = 80
mf = 90
f = 100
ff = 110
fff = 120
ffff = 127
@classmethod
def from_string(cls, thestring):
"""
:param thestring: a string containing a symbolic volume indication
:return: the string mapped to a number
"""
lut = {
'ppppp': Dynamics.ppppp,
'pppp': Dynamics.pppp,
'ppp': Dynamics.ppp,
'pp': Dynamics.pp,
'p': | Dynamics.p,
'mp': Dy | namics.mp,
'mf': Dynamics.mf,
'f': Dynamics.f,
'ff': Dynamics.ff,
'fff': Dynamics.fff,
'ffff': Dynamics.ffff
}
if thestring in lut:
return lut[thestring]
else:
return 0
|
name='task3')
coll.add_task(self.task4, name='task4')
coll.add_task(self.contextualized, name='contextualized')
self.executor = Executor(collection=coll)
class init:
"__init__"
def allows_collection_and_config(self):
coll = Collection()
conf = Config()
e = Executor(collection=coll, config=conf)
assert e.collection is coll
assert e.config is conf
def uses_blank_config_by_default(self):
e = Executor(collection=Collection())
assert isinstance(e.config, Config)
def can_grant_access_to_core_arg_parse_result(self):
c = ParserContext()
ok_(Executor(collection=Collection(), core=c).core is c)
def core_arg_parse_result_defaults_to_None(self):
ok_(Executor(collection=Collection()).core is None)
class execute:
def base_case(self):
self.executor.execute('task1')
assert self.task1.body.called
def kwargs(self):
k = {'foo': 'bar'}
self.executor.execute(('task1', k))
args = self.task1.body.call_args[0]
kwargs = self.task1.body.call_args[1]
ok_(isinstance(args[0], Context))
eq_(len(args), 1)
eq_(kwargs['foo'], 'bar')
def contextualized_tasks_are_given_parser_context_arg(self):
self.executor.execute('contextualized')
args = self.contextualized.body.call_args[0]
eq_(len(args), 1)
ok_(isinstance(args[0], Context))
def default_tasks_called_when_no_tasks_specified(self):
# NOTE: when no tasks AND no default, Program will print global
# help. We just won't do anything at all, which is fine for now.
task = Task(Mock('default-task'))
coll = Collection()
coll.add_task(task, name='mytask', default=True)
execut | or = Executor(collection=coll)
executor.execute()
args = task.body.call_args[0]
ok_(isinstance(args[0], Context))
eq_(len(args), 1)
class basic_pre_post:
| "basic pre/post task functionality"
def pre_tasks(self):
self.executor.execute('task2')
eq_(self.task1.body.call_count, 1)
def post_tasks(self):
self.executor.execute('task4')
eq_(self.task1.body.call_count, 1)
def calls_default_to_empty_args_always(self):
pre_body, post_body = Mock(), Mock()
t1 = Task(pre_body)
t2 = Task(post_body)
t3 = Task(Mock(), pre=[t1], post=[t2])
e = Executor(collection=Collection(t1=t1, t2=t2, t3=t3))
e.execute(('t3', {'something': 'meh'}))
for body in (pre_body, post_body):
args = body.call_args[0]
eq_(len(args), 1)
ok_(isinstance(args[0], Context))
def _call_objs(self):
# Setup
pre_body, post_body = Mock(), Mock()
t1 = Task(pre_body)
t2 = Task(post_body)
t3 = Task(Mock(),
pre=[call(t1, 5, foo='bar')],
post=[call(t2, 7, biz='baz')],
)
c = Collection(t1=t1, t2=t2, t3=t3)
e = Executor(collection=c)
e.execute('t3')
# Pre-task asserts
args, kwargs = pre_body.call_args
eq_(kwargs, {'foo': 'bar'})
assert isinstance(args[0], Context)
eq_(args[1], 5)
# Post-task asserts
args, kwargs = post_body.call_args
eq_(kwargs, {'biz': 'baz'})
assert isinstance(args[0], Context)
eq_(args[1], 7)
def call_objs_play_well_with_context_args(self):
self._call_objs()
class deduping_and_chaining:
def chaining_is_depth_first(self):
expect('-c depth_first deploy', out="""
Cleaning HTML
Cleaning .tar.gz files
Cleaned everything
Making directories
Building
Deploying
Preparing for testing
Testing
""".lstrip())
def _expect(self, args, expected):
expect('-c integration {0}'.format(args), out=expected.lstrip())
class adjacent_hooks:
def deduping(self):
self._expect('biz', """
foo
bar
biz
post1
post2
""")
def no_deduping(self):
self._expect('--no-dedupe biz', """
foo
foo
bar
biz
post1
post2
post2
""")
class non_adjacent_hooks:
def deduping(self):
self._expect('boz', """
foo
bar
boz
post2
post1
""")
def no_deduping(self):
self._expect('--no-dedupe boz', """
foo
bar
foo
boz
post2
post1
post2
""")
# AKA, a (foo) (foo -> bar) scenario arising from foo + bar
class adjacent_top_level_tasks:
def deduping(self):
self._expect('foo bar', """
foo
bar
""")
def no_deduping(self):
self._expect('--no-dedupe foo bar', """
foo
foo
bar
""")
# AKA (foo -> bar) (foo)
class non_adjacent_top_level_tasks:
def deduping(self):
self._expect('foo bar', """
foo
bar
""")
def no_deduping(self):
self._expect('--no-dedupe foo bar', """
foo
foo
bar
""")
def deduping_treats_different_calls_to_same_task_differently(self):
body = Mock()
t1 = Task(body)
pre = [call(t1, 5), call(t1, 7), call(t1, 5)]
t2 = Task(Mock(), pre=pre)
c = Collection(t1=t1, t2=t2)
e = Executor(collection=c)
e.execute('t2')
# Does not call the second t1(5)
param_list = []
for body_call in body.call_args_list:
ok_(isinstance(body_call[0][0], Context))
param_list.append(body_call[0][1])
ok_(set(param_list) == set((5, 7)))
class collection_driven_config:
"Collection-driven config concerns"
def hands_collection_configuration_to_context(self):
@task
def mytask(ctx):
eq_(ctx.my_key, 'value')
c = Collection(mytask)
c.configure({'my_key': 'value'})
Executor(collection=c).execute('mytask')
def hands_task_specific_configuration_to_context(self):
@task
def mytask(ctx):
eq_(ctx.my_key, 'value')
@task
def othertask(ctx):
eq_(ctx.my_key, 'othervalue')
inner1 = Collection('inner1', mytask)
inner1.configure({'my_key': 'value'})
inner2 = Collection('inner2', othertask)
inner2.configure({'my_key': 'othervalue'})
c = Collection(inner1, inner2)
e = Executor(collection=c)
e.execute('inner1.mytask', 'inner2.othertask')
def subcollection_config_works_with_default_tasks(self):
@task(default=True)
def mytask(ctx):
eq_(ctx.my_key, 'value')
# Sets up a task "known as" sub.mytask which may be called as
# just 'sub' due to being default.
sub = Collection('sub', mytask=mytask)
sub.configure({'my_key': 'value'})
main = Collection(sub=sub)
# Execute via collection default 'task' name.
Executor(collection=main).execute('sub')
class returns_return_value_of_specified_task:
def base_case(self):
eq_(self.executor.execute('task1'), {self.task1: 7})
def with_pre_tasks(self):
eq_(
self.executor.execute('task2'),
{self.task1: 7, self.task2: 10}
)
def with_post_tasks(self):
eq_(
self.executor.execute('task4'),
{self.task1: 7, self.task4: 15}
)
class autoprinting:
def defaults_to_off_and_no_output(self):
expect("-c autoprint nope", out="")
def prints_return_value_to_stdout_when_on(self):
expect("-c autoprint yup", out="It's alive!\ |
__all__ = [
'null', 'Invalid', 'FieldsetErrors',
'Field', 'FieldFactory', 'Fieldset',
'field', 'fieldpreview', 'get_field_factory', 'get_field_preview',
'Term', 'Vocabulary',
'All','Function','Regex','Email','Range', 'Length','OneOf',
'CompositeField', 'CompositeError',
'InputField', 'OptionsField',
'VocabularyField', 'BaseChoiceField','BaseMultiChoiceField',
'TextField','IntegerField','FloatField',
'DecimalField','TextAreaField','FileField','LinesField','PasswordField',
'DateField','DateTimeField','RadioField','BoolField','ChoiceField',
'MultiChoiceField','MultiSelectField','TimezoneField',
'Form','FormWidgets',
'button','button2','Button','Buttons',
'AC_DEFAULT','AC_PRIMARY','AC_DANGER','AC_SUCCESS','AC_INFO','AC_WARNING',
'parse_date','includeme', 'reify',
]
from pyramid.decorator import reify
# validation
from .interfaces import null
from .interfaces import Invalid
# field
from .field import Field
from .field import FieldFactory
from .fieldset import Fieldset
from .fieldset import FieldsetErrors
# field registration
from .directives import field
from .directives import fieldpreview
from .directives import get_field_factory
from .directives import get_field_preview
# vocabulary
from .vocabulary import Term
from .vocabulary import Vocabulary
# validators
from .validator import All
from .validator import Function
from .validator import Regex
from .validator import Email
from .validator import Range
from .validator import Length
from .validator import OneOf
# helper class
from .field import InputField
# helper field classes
from .fields import VocabularyField
from .fields import BaseChoiceField
from .fields import BaseMultiChoiceField
# fields
from .fields import TextField
from .fields import IntegerField
from .fields import FloatField
from .fields import DecimalField
from .fields import TextAreaField
from .fields import FileField
from .fields import LinesField
from .fields import PasswordField
from .fields import DateField
from .fields import DateTimeField
from .fields import RadioField
from .fields import BoolField
from .fields import ChoiceField
from .fields import MultiChoiceField
from .fields import MultiSelectField
from .fields import TimezoneField
from .fields import OptionsField
# composite fields
from .composite import CompositeField
from .composite import CompositeError
# forms
from .form import Form
from .form import FormWidgets
# button
from .button import button
from .button import button2
from .button import Button
from .button import Buttons
from .button import AC_DEFAULT
from .button import AC_PRIMARY
from .button import AC_DANGER
from .button import AC_SUCCESS
from .button import AC_INFO
from .button import AC_WARNING
# iso date
from .iso8601 import parse_date
def includeme(config):
config.include('pyramid_chameleon')
config.include('djed.renderer')
config.include('djed.message')
# field
from .directives import add_field
| config.add_directive('provide_form_fi | eld', add_field)
# layers
config.add_layer('form', path='djed.form:templates/')
# scan
config.scan('djed.form')
|
# -*- coding: UTF-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.db | import models
from jsonfield import JSONField
PLACEHOLDER_IMAGE = "%simages/placeholder.png"%settings.STATIC_URL
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='_profile_cache')
twitter_image = models.CharField(max_length=255)
profile_imag | e = models.ImageField(upload_to='profile',blank=True,null=True)
twitter_data = JSONField(default={})
def __unicode__(self):
return u'%s' % self.user.username
@property
def location(self):
return u'%s' % self.twitter_data.get('location', None)
def image_or_placeholder(self):
return self.twitter_data.get('profile_image_url', PLACEHOLDER_IMAGE)
# import signals
from signals import create_profile
|
done': extra_move_quantity, 'product_uom_qty': 0})
extra_move_quantity -= extra_move_quantity
if extra_move_quantity == 0.0:
break
return extra_move | self
def _unreserve_initial_demand(self, new_move):
pass
def _action_done(self, cancel_backorder=False):
self.filtered(lambda move: move.state == 'draft')._action_confirm() # MRP allows scrapping draft moves
moves = self.exists().filtered(lambda x: x.state not in ('done', 'cancel'))
moves_todo = self.env['stock.move']
# Cancel moves where necessary ; we should do it before creating the extra moves because
# this operation could trigger a merge of moves.
for move in moves:
if move.quantity_done <= 0:
if float_compare(move.product_uom_qty, 0.0, precision_rounding=move.product_uom.rounding) == 0 or cancel | _backorder:
move._action_cancel()
# Create extra moves where necessary
for move in moves:
if move.state == 'cancel' or move.quantity_done <= 0:
continue
moves_todo |= move._create_extra_move()
moves_todo._check_company()
# Split moves where necessary and move quants
for move in moves_todo:
| # To know whether we need to create a backorder or not, round to the general product's
# decimal precision and not the product's UOM.
rounding = self.env['decimal.precision'].precision_get('Product Unit of Measure')
if float_compare(move.quantity_done, move.product_uom_qty, precision_digits=rounding) < 0:
# Need to do some kind of conversion here
qty_split = move.product_uom._compute_quantity(move.product_uom_qty - move.quantity_done, move.product_id.uom_id, rounding_method='HALF-UP')
new_move = move._split(qty_split)
move._unreserve_initial_demand(new_move)
if cancel_backorder:
self.env['stock.move'].browse(new_move)._action_cancel()
moves_todo.mapped('move_line_ids').sorted()._action_done()
# Check the consistency of the result packages; there should be an unique location across
# the contained quants.
for result_package in moves_todo\
.mapped('move_line_ids.result_package_id')\
.filtered(lambda p: p.quant_ids and len(p.quant_ids) > 1):
if len(result_package.quant_ids.filtered(lambda q: not float_is_zero(abs(q.quantity) + abs(q.reserved_quantity), precision_rounding=q.product_uom_id.rounding)).mapped('location_id')) > 1:
raise UserError(_('You cannot move the same package content more than once in the same transfer or split the same package into two location.'))
picking = moves_todo.mapped('picking_id')
moves_todo.write({'state': 'done', 'date': fields.Datetime.now(), 'delay_alert_date': False})
move_dests_per_company = defaultdict(lambda: self.env['stock.move'])
for move_dest in moves_todo.move_dest_ids:
move_dests_per_company[move_dest.company_id.id] |= move_dest
for company_id, move_dests in move_dests_per_company.items():
move_dests.sudo().with_company(company_id)._action_assign()
# We don't want to create back order for scrap moves
# Replace by a kwarg in master
if self.env.context.get('is_scrap'):
return moves_todo
if picking and not cancel_backorder:
picking._create_backorder()
return moves_todo
def unlink(self):
if any(move.state not in ('draft', 'cancel') for move in self):
raise UserError(_('You can only delete draft moves.'))
# With the non plannified picking, draft moves could have some move lines.
self.with_context(prefetch_fields=False).mapped('move_line_ids').unlink()
return super(StockMove, self).unlink()
def _prepare_move_split_vals(self, qty):
vals = {
'product_uom_qty': qty,
'procure_method': 'make_to_stock',
'move_dest_ids': [(4, x.id) for x in self.move_dest_ids if x.state not in ('done', 'cancel')],
'move_orig_ids': [(4, x.id) for x in self.move_orig_ids],
'origin_returned_move_id': self.origin_returned_move_id.id,
'price_unit': self.price_unit,
}
if self.env.context.get('force_split_uom_id'):
vals['product_uom'] = self.env.context['force_split_uom_id']
return vals
def _split(self, qty, restrict_partner_id=False):
""" Splits qty from move move into a new move
:param qty: float. quantity to split (given in product UoM)
:param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner.
:param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move
:returns: id of the backorder move created """
self = self.with_prefetch() # This makes the ORM only look for one record and not 300 at a time, which improves performance
if self.state in ('done', 'cancel'):
raise UserError(_('You cannot split a stock move that has been set to \'Done\'.'))
elif self.state == 'draft':
# we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in
# case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode.
raise UserError(_('You cannot split a draft move. It needs to be confirmed first.'))
if float_is_zero(qty, precision_rounding=self.product_id.uom_id.rounding) or self.product_qty <= qty:
return self.id
decimal_precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
# `qty` passed as argument is the quantity to backorder and is always expressed in the
# quants UOM. If we're able to convert back and forth this quantity in the move's and the
# quants UOM, the backordered move can keep the UOM of the move. Else, we'll create is in
# the UOM of the quants.
uom_qty = self.product_id.uom_id._compute_quantity(qty, self.product_uom, rounding_method='HALF-UP')
if float_compare(qty, self.product_uom._compute_quantity(uom_qty, self.product_id.uom_id, rounding_method='HALF-UP'), precision_digits=decimal_precision) == 0:
defaults = self._prepare_move_split_vals(uom_qty)
else:
defaults = self.with_context(force_split_uom_id=self.product_id.uom_id.id)._prepare_move_split_vals(qty)
if restrict_partner_id:
defaults['restrict_partner_id'] = restrict_partner_id
# TDE CLEANME: remove context key + add as parameter
if self.env.context.get('source_location_id'):
defaults['location_id'] = self.env.context['source_location_id']
new_move = self.with_context(rounding_method='HALF-UP').copy(defaults)
# FIXME: pim fix your crap
# Update the original `product_qty` of the move. Use the general product's decimal
# precision and not the move's UOM to handle case where the `quantity_done` is not
# compatible with the move's UOM.
new_product_qty = self.product_id.uom_id._compute_quantity(self.product_qty - qty, self.product_uom, round=False)
new_product_qty = float_round(new_product_qty, precision_digits=self.env['decimal.precision'].precision_get('Product Unit of Measure'))
self.with_context(do_not_unreserve=True, rounding_method='HALF-UP').write({'product_uom_qty': new_product_qty})
new_move = new_move._action_confirm(merge=False)
return new_move.id
def _recompute_state(self):
for move in self:
if move.state in ('cancel', 'done', 'draft'):
continue
elif move.reserved_availability == move.product_uom_qty:
|
#!/usr/bin/env python
# Copyright (C) 2014 Ioan Dragan
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys, os, time
import re
from os import path
folderList = []
def getProblems( arg ):
fin = open ("ProblemList.txt","r")
lines = fin.rea | dlines()
for ln in lines:
if ln.startswith(arg):
print ln.strip()+".p"
fin.close()
if __name__ == '__main__':
#load the files and create the list of problems with the according statistic | s
getProblems(sys.argv[1])
|
warning_cls = np.VisibleDeprecationWarning
class TestRankDeprecation(_DeprecationTestCase):
"""Test that np.rank is deprecated. The function should simply be
removed. The VisibleDeprecationWarning may become unnecessary.
"""
def test(self):
a = np.arange(10)
assert_warns(np.VisibleDeprecationWarning, np.rank, a)
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# Element comparison error (numpy array can't be compared).
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, as this is only to document
# that fact (it likely should be changed). This means that the
# following works (and returns False) due to dtype mismatch:
a == []
def test_void_dtype_equality_failures(self):
class NotArray(object):
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
struct1 = np.zeros(2, dtype="i4,i4") |
struct2 = np.zeros(2, dtype="i4,i4,i4")
assert_warns(FutureWarning, lambda: struct1 == 1)
assert_warns(FutureWarning, lambda: struct1 == struct2)
assert_warns(FutureWarning, lambda: struct1 != | 1)
assert_warns(FutureWarning, lambda: struct1 != struct2)
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
if sys.version_info[0] >= 3:
# py3
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
It will be quite a while before we can remove this, because, at the very
least, a lot of existing code uses the 'Z' modifier to avoid conversion
from local time to UTC, even if otherwise it handles time in a timezone
naive fashion.
"""
def test_string(self):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
@dec.skipif(not _has_pytz, "The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(np.datetime64, args=(dt,))
class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""View of non-C-contiguous arrays deprecated in 1.11.0.
The deprecation will not be raised for arrays that are both C and F
contiguous, as C contiguous is dominant. There are more such arrays
with relaxed stride checking than without so the deprecation is not
as visible with relaxed stride checking in force.
"""
def test_fortran_contiguous(self):
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
"""Invalid arguments to the ORDER parameter in array.flatten() should not be
allowed and should raise an error. However, in the interests of not breaking
code that may inadvertently pass invalid arguments to this parameter, a
DeprecationWarning will be issued instead for the time being to give developers
time to refactor relevant code.
"""
def test_flatten_array_non_string_arg(self):
x = np.zeros((3, 5))
self.message = ("Non-string object detected for "
"the array ordering. Please pass "
"in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=(np.pi,))
def test_flatten_array_invalid_string_arg(self):
# Tests that a DeprecationWarning is raised
# when a string of length greater than one
# starting with "C", "F", "A", or "K" (case-
# and unicode-insensitive) is passed in for
# the ORDER parameter. Otherwise, a TypeError
# will be raised!
x = np.zeros((3, 5))
self.message = ("Non length-one string passed "
"in for the array ordering. Please "
"pass in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=("FACK",))
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
in the interests of maintaining backwards compatibility, only a Deprecation-
Warning will be raised instead for the time being to give developers time to
refactor relevant code.
"""
def test_data_attr_assignment(self):
a = np.arange(10)
b = np.linspace(0, 1, 10)
self.message = ("Assigning the 'data' attribute is an "
"inherently unsafe operation and will "
"be removed in the future.")
self.assert_deprecated(a.__setattr__, args=('data', b.data))
class TestLinspaceInvalidNumParameter(_DeprecationTestCase):
"""Argument to the num parameter in linspace that cannot be
safely interpreted as an integer is deprecated in 1.12.0.
Argument to the num parameter in linspace that cannot be
safely interpreted as an integer should n |
# -*- coding: utf-8 -*-
'''
Usage:
setuserpass.py [-d] username password
Set a user's username/password, creating it
if it did not already exist.
Specifying -d on the commandline removes the user and in that
case a password is not necessary
'''
import sys
from hashlib import sha1
from werkzeug.security import generate_password_hash
from feeder import db
from feeder.models import get_user
from feeder import gauth
# Print help if required
args = sys.argv[1:]
if len(args) == 0 or '-h' in args:
exit(__doc__)
# Check delete flag
should_delete = False
if '-d' in args:
should_delete = True
args.remove('-d')
# Make sure enough arguments were specified
if not should_delete and len(args) < 2:
exit("Not enough arguments specified. Print help with -h")
elif should_delete and len(args) < 1:
exit("No username specified. Print help with -h")
if should_delete:
username = args[0]
else:
username, password = args
# Get User
user = get_user(username)
if should_delete:
| db.session.delete(user)
db.session.commit()
exit("Removed user {}".format(username))
# Generate a pass | word hash
# Make sure to use a byte string
try:
bpassword = password.encode('utf-8')
except AttributeError:
# Already bytestring
bpassword = password
# Then add the salt used by the android client
androidpassword = sha1(gauth.__ANDROID_SALT__ + bpassword)\
.hexdigest().lower()
# And finally salt it for real
user.passwordhash = generate_password_hash(androidpassword)
db.session.add(user)
db.session.commit()
exit("User updated")
|
import os
import tempfile
import numpy as np
from numpy.testing import assert_equal, raises, assert_allclose
from skimage import novice
from skimage.novice._novice import (array_to_xy_origin, xy_to_array_origin,
rgb_transpose)
from skimage import data_dir
from skimage._shared.utils import all_warnings
IMAGE_PATH = os.path.join(data_dir, "chelsea.png")
SMALL_IMAGE_PATH = os.path.join(data_dir, "block.png")
def _array_2d_to_RGBA(array):
return np.tile(array[:, :, np.newaxis], (1, 1, 4))
def test_xy_to_array_origin():
h, w = 3, 5
array = np.arange(h * w).reshape(h, w, 1)
out = xy_to_array_origin(array_to_xy_origin(array.copy()))
assert np.allclose(out, array)
def test_pic_info():
pic = novice.open(IMAGE_PATH)
assert_equal(pic.format, "png")
assert_equal(pic.path, os.path.abspath(IMAGE_PATH))
assert_equal(pic.size, (451, 300))
assert_equal(pic.width, 451)
assert_equal(pic.height, 300)
assert not pic.modified
assert_equal(pic.scale, 1)
def test_pixel_iteration():
pic = novice.open(SMALL_IMAGE_PATH)
num_pixels = sum(1 for p in pic)
assert_equal(num_pixels, pic.width * pic.height)
def test_modify():
pic = novice.open(SMALL_IMAGE_PATH)
assert_equal(pic.modified, False)
for p in pic:
if p.x < (pic.width / 2):
p.red /= 2
p.green /= 2
p.blue /= 2
for p in pic:
if p.x < (pic.width / 2):
assert p.red <= 128
assert p.green <= 128
assert p.blue <= 128
s = pic.size
with all_warnings(): # precision loss
pic.size = (pic.width / 2, pic.height / 2)
assert_equal(pic.size, (int(s[0] / 2), int(s[1] / 2)))
assert pic.modified
assert pic.path is None
def test_pixel_rgb():
pic = novice.Picture.from_size((3, 3), color=(10, 10, 10))
pixel = pic[0, 0]
pixel.rgb = np.arange(3)
assert_equal(pixel.rgb, np.arange(3))
for i, channel in enumerate((pixel.red, pixel.green, pixel.blue)):
assert_equal(channel, i)
pixel.red = 3
pixel.green = 4
pixel.blue = 5
assert_equal(pixel.rgb, np.arange(3) + 3)
for i, channel in enumerate((pixel.red, pixel.green, pixel.blue)):
assert_equal(channel, i + 3)
pixel.rgb = np.arange(4)
assert_equal(pixel.rgb, np.arange(3) | )
assert pic.arra | y.dtype == np.uint8
def test_pixel_rgba():
pic = novice.Picture.from_size((3, 3), color=(10, 10, 10))
pixel = pic[0, 0]
pixel.rgba = np.arange(4)
assert_equal(pixel.rgba, np.arange(4))
for i, channel in enumerate((pixel.red, pixel.green, pixel.blue, pixel.alpha)):
assert_equal(channel, i)
pixel.red = 3
pixel.green = 4
pixel.blue = 5
pixel.alpha = 6
assert_equal(pixel.rgba, np.arange(4) + 3)
for i, channel in enumerate((pixel.red, pixel.green, pixel.blue, pixel.alpha)):
assert_equal(channel, i + 3)
def test_pixel_rgb_float():
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.rgb = (1.1, 1.1, 1.1)
assert_equal(pixel.rgb, (1, 1, 1))
def test_pixel_rgba_float():
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.rgba = (1.1, 1.1, 1.1, 1.1)
assert_equal(pixel.rgba, (1, 1, 1, 1))
def test_modified_on_set():
pic = novice.Picture(SMALL_IMAGE_PATH)
pic[0, 0] = (1, 1, 1)
assert pic.modified
assert pic.path is None
def test_modified_on_set_pixel():
data = np.zeros(shape=(10, 5, 3), dtype=np.uint8)
pic = novice.Picture(array=data)
pixel = pic[0, 0]
pixel.green = 1
assert pic.modified
def test_update_on_save():
pic = novice.Picture(array=np.zeros((3, 3, 3)))
with all_warnings(): # precision loss
pic.size = (6, 6)
assert pic.modified
assert pic.path is None
fd, filename = tempfile.mkstemp(suffix=".jpg")
os.close(fd)
try:
pic.save(filename)
assert not pic.modified
assert_equal(pic.path, os.path.abspath(filename))
assert_equal(pic.format, "jpeg")
finally:
os.unlink(filename)
def test_indexing():
array = 128 * np.ones((10, 10, 3), dtype=np.uint8)
pic = novice.Picture(array=array)
pic[0:5, 0:5] = (0, 0, 0)
for p in pic:
if (p.x < 5) and (p.y < 5):
assert_equal(p.rgb, (0, 0, 0))
assert_equal(p.red, 0)
assert_equal(p.green, 0)
assert_equal(p.blue, 0)
pic[:5, :5] = (255, 255, 255)
for p in pic:
if (p.x < 5) and (p.y < 5):
assert_equal(p.rgb, (255, 255, 255))
assert_equal(p.red, 255)
assert_equal(p.green, 255)
assert_equal(p.blue, 255)
pic[5:pic.width, 5:pic.height] = (255, 0, 255)
for p in pic:
if (p.x >= 5) and (p.y >= 5):
assert_equal(p.rgb, (255, 0, 255))
assert_equal(p.red, 255)
assert_equal(p.green, 0)
assert_equal(p.blue, 255)
pic[5:, 5:] = (0, 0, 255)
for p in pic:
if (p.x >= 5) and (p.y >= 5):
assert_equal(p.rgb, (0, 0, 255))
assert_equal(p.red, 0)
assert_equal(p.green, 0)
assert_equal(p.blue, 255)
def test_picture_slice():
array = _array_2d_to_RGBA(np.arange(0, 10)[np.newaxis, :])
pic = novice.Picture(array=array)
x_slice = slice(3, 8)
subpic = pic[:, x_slice]
assert_allclose(subpic.array, array[x_slice, :])
def test_move_slice():
h, w = 3, 12
array = _array_2d_to_RGBA(np.linspace(0, 255, h * w).reshape(h, w))
array = array.astype(np.uint8)
pic = novice.Picture(array=array)
pic_orig = novice.Picture(array=array.copy())
# Move left cut of image to the right side.
cut = 5
rest = pic.width - cut
temp = pic[:cut, :]
temp.array = temp.array.copy()
pic[:rest, :] = pic[cut:, :]
pic[rest:, :] = temp
assert pic[rest:, :] == pic_orig[:cut, :]
assert pic[:rest, :] == pic_orig[cut:, :]
def test_negative_index():
n = 10
array = _array_2d_to_RGBA(np.arange(0, n)[np.newaxis, :])
# Test both x and y indices.
pic = novice.Picture(array=array)
assert pic[-1, 0] == pic[n - 1, 0]
pic = novice.Picture(array=rgb_transpose(array))
assert pic[0, -1] == pic[0, n - 1]
def test_negative_slice():
n = 10
array = _array_2d_to_RGBA(np.arange(0, n)[np.newaxis, :])
# Test both x and y slices.
pic = novice.Picture(array=array)
assert pic[-3:, 0] == pic[n - 3:, 0]
pic = novice.Picture(array=rgb_transpose(array))
assert pic[0, -3:] == pic[0, n - 3:]
def test_getitem_with_step():
h, w = 5, 5
array = _array_2d_to_RGBA(np.linspace(0, 255, h * w).reshape(h, w))
pic = novice.Picture(array=array)
sliced_pic = pic[::2, ::2]
assert sliced_pic == novice.Picture(array=array[::2, ::2])
@raises(IndexError)
def test_1d_getitem_raises():
pic = novice.Picture.from_size((1, 1))
pic[1]
@raises(IndexError)
def test_3d_getitem_raises():
pic = novice.Picture.from_size((1, 1))
pic[1, 2, 3]
@raises(IndexError)
def test_1d_setitem_raises():
pic = novice.Picture.from_size((1, 1))
pic[1] = 0
@raises(IndexError)
def test_3d_setitem_raises():
pic = novice.Picture.from_size((1, 1))
pic[1, 2, 3] = 0
@raises(IndexError)
def test_out_of_bounds_indexing():
pic = novice.open(SMALL_IMAGE_PATH)
pic[pic.width, pic.height]
@raises(ValueError)
def test_pixel_rgb_raises():
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.rgb = (-1, -1, -1)
@raises(ValueError)
def test_pixel_red_raises():
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.red = 256
@raises(ValueError)
def test_pixel_green_raises():
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.green = 256
@raises(ValueError)
def test_pixel_blue_raises():
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.blue = 256
@raises(ValueError)
def test_pixel_alpha_raises():
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.alpha = 256
if __name__ == '__main__':
np.testing.run_module_suite()
|
ruct
import time
import pandas as pd
from datetime import datetime
"""
This interface library is designed to be used from very different contexts.
The general idea is that the Mindwave modules in the headset (and other devices)
talk a common binary protocol, which is entirely one-sided from headset to device/
computer, with one exception (explained later). The means of transport however
does vary. The original MindWave headset had 2.4Ghz wireless connection, using a
proprietary USB dongle/receiver. This receiver is mounted as a serial console in
Linux. It also requires extra commands to connect and disconnect.
The MindWave mobile uses bluetooth, which I would recommend over the 2.4Ghz version.
There have been hacks with arduinos hooked up to the Thinkgear AM modules directly.
Not only are the technical means of data transport different, your application needs
one of several possible means of regularly reading the data.
In the EuroPython 2014 talk "Brainwaves for Hackers" I demonstrated a way to do this
in the IPython Notebook, and that only involved a blocking read from a bluetooth socket at
certain intervals. Pygame works the same way.
There are more sophisticated event loops out there, like in Kivy, Gevent or Tornado.
That are the reasons why there is a parser module that can be fed a stream of bytes.
You can add recorders to the parser, which take care of analyzing the parsed data.
There is for example one recorder which converts the parsed data into Pandas
Timeseries. But doing that dozens of times per second is too much work for weak
processors, like in the Raspberry Pi, so there you would probably derive your own
parser.
"""
def queue_to_series(a, freq="s"):
t = pd.date_range(end=datetime.now(), freq=freq, periods=len(a))
return pd.Series(a, index=t)
class ThinkGearParser(object):
def __init__(self, recorders=None):
self.recorders = []
if recorders is not None:
self.recorders += recorders
| self.input_data = ""
self.parser = self.parse()
self.parser.next()
def feed(self, data):
for c in data:
self.parser.send(ord(c))
for recorder in self.recorders:
recorder.finish_chunk()
s | elf.input_data += data
def dispatch_data(self, key, value):
for recorder in self.recorders:
recorder.dispatch_data(key, value)
def parse(self):
"""
This generator parses one byte at a time.
"""
i = 1
times = []
while 1:
byte = yield
if byte == 0xaa:
byte = yield # This byte should be "\aa" too
if byte == 0xaa:
# packet synced by 0xaa 0xaa
packet_length = yield
packet_code = yield
if packet_code == 0xd4:
# standing by
self.state = "standby"
elif packet_code == 0xd0:
self.state = "connected"
elif packet_code == 0xd2:
data_len = yield
headset_id = yield
headset_id += yield
self.dongle_state = "disconnected"
else:
self.sending_data = True
left = packet_length - 2
while left > 0:
if packet_code == 0x80: # raw value
row_length = yield
a = yield
b = yield
value = struct.unpack("<h", chr(b)+chr(a))[0]
self.dispatch_data("raw", value)
left -= 2
elif packet_code == 0x02: # Poor signal
a = yield
left -= 1
elif packet_code == 0x04: # Attention (eSense)
a = yield
if a > 0:
v = struct.unpack("b", chr(a))[0]
if 0 < v <= 100:
self.dispatch_data("attention", v)
left -= 1
elif packet_code == 0x05: # Meditation (eSense)
a = yield
if a > 0:
v = struct.unpack("b", chr(a))[0]
if 0 < v <= 100:
self.dispatch_data("meditation", v)
left -= 1
elif packet_code == 0x16: # Blink Strength
self.current_blink_strength = yield
left -= 1
elif packet_code == 0x83:
vlength = yield
self.current_vector = []
for row in range(8):
a = yield
b = yield
c = yield
value = a*255*255+b*255+c
left -= vlength
self.dispatch_data(
"bands", self.current_vector)
packet_code = yield
else:
pass # sync failed
else:
pass # sync failed
class TimeSeriesRecorder:
def __init__(self, file_name=None):
self.meditation = pd.Series()
self.attention = pd.Series()
self.raw = pd.Series()
self.blink = pd.Series()
self.poor_signal = pd.Series()
self.attention_queue = []
self.meditation_queue = []
self.poor_signal_queue = []
self.blink_queue = []
self.raw_queue = []
if file_name is not None:
self.store = pd.HDFStore(file_name)
else:
self.store = None
def dispatch_data(self, key, value):
if key == "attention":
self.attention_queue.append(value)
# Blink and "poor signal" is only sent when a blink or poor signal is detected
# So fake continuous signal as zeros.
self.blink_queue.append(0)
self.poor_signal_queue.append(0)
elif key == "meditation":
self.meditation_queue.append(value)
elif key == "raw":
self.raw_queue.append(value)
elif key == "blink":
self.blink_queue.append(value)
if len(self.blink_queue) > 0:
self.blink_queue[-1] = self.current_blink_strength
elif key == "poor_signal":
if len(self.poor_signal_queue) > 0:
self.poor_signal_queue[-1] = a
def record_meditation(self, attention):
self.meditation_queue.append()
def record_blink(self, attention):
self.blink_queue.append()
def finish_chunk(self):
""" called periodically to update the timeseries """
self.meditation = pd.concat(
[self.meditation, queue_to_series(self.meditation_queue, freq="s")])
self.attention = pd.concat(
[self.attention, queue_to_series(self.attention_queue, freq="s")])
self.blink = pd.concat(
[self.blink, queue_to_series(self.blink_queue, freq="s")])
self.raw = pd.concat(
[self.raw, queue_to_series(self.raw_queue, freq="1953U")])
self.poor_signal = pd.concat(
[self.poor_signal, queue_to_series(self.poor_signal_queue)])
self.attention_queue = []
self.meditation_queue = []
self.poor_signal_queue = []
self.blink_queue = []
self.raw_queue = []
if self.store is not None:
|
from epic.utils.helper_functions import lru_cache
from numpy import log
from scipy.stats import poisson
@lru_cache()
def compute_window_score(i, poisson_parameter):
# type: (int, float) -> float
# No enrichment; poisson param also average
if i < poisson_parameter:
return 0
p_value = poisson.pm | f(i, poisson_parameter)
if p_value > 0:
window_score = -log(p_value)
else:
# log of zero not defined
window_score = 1000
return window_sco | re
|
from django.db import models
from django.contrib.auth.models import User
# Core models contains models used across multiple apps
class NewsFeed(models.Model):
"""
Contains information about an RSS feed. If user is None, the feed is
global.
"""
name = models.CharField(max_length=255, null=True, blank=True)
description = models.CharField(max_length=255, null=True, blank=True)
url = models.CharField(max_length=255)
user = models.ForeignKey(User, related_name='feeds', null=True)
class Meta:
ordering = ['name']
class Alliance(models.Model):
"""Represents an alliance, data pulled from api"""
id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length=100)
shortname = models.CharField(max_length=100)
executor = models.ForeignKey('Corporation', blank=True, null=True, related_name='+')
def __unicode__(self):
return self.name
class Corporation(models.Model):
"""Represents a corporation, data pulled from api"""
id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length=100)
ticker = models.CharField(max_length=100)
alliance = models.ForeignKey(Alliance, null=True, blank=True, related_name='member_corps')
member_count = models.IntegerField()
def __unicode__(self):
return self.name
class ConfigEntry(models.Model):
"""A configuration setting that may be changed at runtime."""
name = models.CharField(max_length=32, unique=True)
value = models.CharField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User, related_name='settings', null=True, blank=True)
class MarketGroup(models.Model):
"""A market group from the Eve SDD."""
id = models.IntegerField(primary_key=True, db_column='marketGroupID')
name = models.CharField(max_length = 100, null=True, blank=True,
db_column='marketGroupName')
parentgroup = models.ForeignKey("self", related_name="childgroups",
blank=True, null=True, db_column='parentGroupID')
description = models.CharField(max_length = 200, null=True, blank=True)
hasTypes = models.IntegerField()
def __unicode__(self):
return self.name
class Meta:
db_table = 'invMarketGroups'
managed = False
class Type(models.Model):
"""A type from the Eve SDD invTypes table."""
id = models.IntegerField(primary_key=True, db_column='typeID')
name = models.CharField(max_length = 100, db_column='typeName')
description = models.TextField(blank=True, null=True)
volume = models.FloatField(blank=True, null=True)
marketgroup = models.ForeignKey(MarketGroup, null=True, blank=True, related_name="types",
db_column='marketGroupID')
published = models.IntegerField()
def __unicode__(self):
return self.name
class Meta:
db_table = 'invTypes'
managed = False
class Region(models.Model):
"""Core model for static region data"""
id = models.IntegerField(primary_key=True, db_column='regionID')
name = models.CharField(max_length=100, db_column='regionName')
x = models.FloatField()
y = models.FloatField()
z = models.FloatField()
def __unicode__(self):
return self.name
class Meta:
db_table = 'mapRegions'
managed = False
class Constellation(models.Model):
"""Core model for static constellation data, references Region"""
id = models.IntegerField(primary_key=True, db_column='constellationID')
name = models.CharField(max_length=100, db_column='constellationName')
region = models.ForeignKey(Region, related_name='constellations',
db_column='regionID')
x = models.FloatField()
y = models.FloatField()
z = models.FloatField()
def __unicode__(self):
return self.name
class Meta:
db_table = 'mapConstellations'
managed = False
class SystemData(models.Model):
"""Core model for static system data from the SDD, references Region and Constellation"""
id = models.IntegerField(primary_key=True, db_column='solarSystemID')
name = models.CharField(max_length=100, db_column='solarSystemName')
constellation = models.ForeignKey(Constellation, related_name='systems',
db_column='constellationID')
region = models.ForeignKey(Region, related_name='systems', db_column='regionID')
x = models.FloatField()
y = models.FloatField()
z = models.FloatField()
security = models.FloatField()
def __unicode__(self):
return self.name
class Meta:
db_table = 'mapSolarSystems'
managed = False
class StarbaseResourcePurpose(models.Model):
"""Core model for SDD invControlTowerResourcePurpose table."""
purpose = models.IntegerField(primary_key=True)
purposeText = models.CharField(max_length=100, blank=True, null=True)
def __unicode__(self):
return self.purposeText
class Meta:
db_table = 'invControlTowerResourcePurposes'
managed = False
class StarbaseResource(models.Model):
"""Core model for SDD invStarbaseResources table. Maps tower types
to their fuel"""
towerType = models.ForeignKey(Type, related_name='posesfueled',
db_column='controlTowerTypeID', primary_key=True)
resourceType = models.ForeignKey(Type, related_name='posfuel',
db_column='resourceTypeID')
purpose = models.ForeignKey(StarbaseResourcePurpose, related_name='usedby',
db_column='purpose', blank=True, null=True)
quantity = models.IntegerField(blank=True, null=True, db_column='quantity')
minSecurityLevel = models.FloatField(blank=True, null=True, db_column='minSecurityLevel')
def __unicode__(self):
return '%s %s' % (self.towerType.name, self.resourceType.name)
class Meta:
db_table = 'invControlTowerResources'
managed = False
class Location(models.Model):
"""Core model for SDD mapDenormalize table that generic locations map to."""
itemid = models.IntegerField(primary_key=True, db_column='itemID')
typeid = models.ForeignKey(Type, null=True, blank=True, related_name='mapentries',
db_column='typeID')
system = models.ForeignKey(SystemData, null=True, blank=True, related_name='mapentries',
db_column='solarSystemID')
constellation = models.ForeignKey(Constellation, null=True, blank=True,
related_name='mapentries', db_column='constellationID')
region = models.ForeignKey(Region, null=True, blank=True, related_name='mapentries',
db_column='regionID')
orbitparent = models.ForeignKey('Location', null=True, blank=True,
related_name='satellites', db_column=' | orbitID')
name = models.CharField(max_length=100, null=True, bla | nk=True, db_column='itemName')
x = models.FloatField(null=True, blank=True, db_column='x')
y = models.FloatField(null=True, blank=True, db_column='y')
z = models.FloatField(null=True, blank=True, db_column='z')
security = models.FloatField(null=True, blank=True, db_column='security')
class Meta:
db_table='mapDenormalize'
managed = False
class LocationWormholeClass(models.Model):
"""Core model for SDD mapLocationWormholeClasses used to generate system tables."""
location = models.ForeignKey(Location, primary_key=True, related_name="whclass",
db_column='locationID')
sysclass = models.IntegerField(null=True, blank=True, db_column='wormholeClassID')
class Meta:
db_table='mapLocationWormholeClasses'
managed = False
class SystemJump(models.Model):
"""Core model for SDD mapSolarSystemJumps used in A* calcs."""
fromregion = models.IntegerField(db_column="fromRegionID")
fromconstellation = models.IntegerField(db_column="fromConstellationID")
fromsystem = models.IntegerField(db_column="fromSolarSystemID", primary_key=True)
tosystem = models.Integer |
def suma_n_numeros(n):
return (p | ow(n,2)+n)/2
if __name__ == '__main__':
numero_piedras = int(input())
while(numero_piedras > 0):
i = 1;
while(suma_n_numeros(i)<numero_piedras):
i += 1
i -= 1
sobran = numero_piedras - suma_n_numeros(i)
print(str(i) + " " + str(int | (sobran)))
numero_piedras = int(input())
|
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new"))>0):
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
#select first group
self.select_group_by_index(index)
#submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def modify_first_group(self):
self.modify_group_by_index(0)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("grou | p page").click()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache =[]
for element in wd.find_elements_by_css_selector("span.gro | up"):
text=element.text
id=element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
|
#;+
#; NAME:
#; x_getsdssimg
#; Version 1.1
#;
#; PURPOSE:
#; Returns an Image by querying the SDSS website
#; Will use DSS2-red as a backup
#;
#; CALLING SEQUENCE:
#;
#; INPUTS:
#;
#; RETURNS:
#;
#; OUTPUTS:
#;
#; OPTIONAL KEYWORDS:
#;
#; OPTIONAL OUTPUTS:
#;
#; COMMENTS:
#;
#; EXAMPLES:
#;
#; PROCEDURES/FUNCTIONS CALLED:
#;
#; REVISION HISTORY:
#; 23-Apr-2014 Written by JXP
#;-
#;------------------------------------------------------------------------------
# Import libraries
from __future__ import print_function, absolute_import, division#, unicode_literals
import requests
import PIL
from PIL import Image
from cStringIO import StringIO
from astroquery.sdss import SDSS
from astropy.coordinates import SkyCoord
from astropy import units as u
from xastropy.xutils import xdebug as xdb
# Generate the SDSS URL (default is 202" on a side)
def sdsshttp(ra, dec, imsize, scale=0.39612, grid=None, label=None, invert=None):#, xs, ys):
# Pixels
npix = round(imsize*60./scale)
xs = npix
ys = npix
#from StringIO import StringIO
# Generate the http call
name1='http://skyservice.pha.jhu.edu/DR12/ImgCutout/'
name='getjpeg.aspx?ra='
name+=str(ra) #setting the ra
name+='&dec='
name+=str(dec) #setting the declination
name+='&scale='
name+=str(scale) #setting the scale
name+='&width='
name+=str(int(xs)) #setting the width
name+='&height='
name+=str(int(ys)) #setting the height
#------ Options
options = ''
if grid != None:
options+='G'
if label != None:
options+='L'
if invert != None:
options+='I'
if len(options) > 0:
name+='&opt='+options
name+='&query='
url = name1+name
return url
# Generate the SDSS URL (default is 202" on a side)
def dsshttp(ra, dec, imsize):
#https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&r=00:42:44.35&d=+41:16:08.6&e=J2000&h=15.0&w=15.0&f=gif&c=none&fov=NONE&v3=
Equinox = 'J2000'
dss = 'poss2ukstu_red'
url = "http://archive.stsci.edu/cgi-bin/dss_search?"
url += "v="+dss+'&r='+str(ra)+'&d='+str(dec)
url += "&e="+Equinox
url += '&h='+str(imsize)+"&w="+str(imsize)
url += "&f=gif"
url += "&c=none"
url += "&fov=NONE"
url += "&v3="
return url
# ##########################################
def getimg(ira, idec, imsize, BW=False, DSS=None):
''' Grab an SDSS image from the given URL, if possible
Parameters:
----------
ira: (float or Quantity) RA in decimal degrees
idec: (float or Quantity) DEC in decimal degrees
'''
# Strip units as need be
try:
ra = ira.value
except KeyError:
ra = ira
dec = idec
else:
dec = idec.value
# Get URL
if DSS == None: # Default
url = sdsshttp(ra,dec,imsize)
else:
url = dsshttp(ra,dec,imsize) # DSS
# Request
rtv = requests.get(url)
# Check against outside footprint [KLUDGY!!]
# Also had to turn off unicode!!
bad_900_1000 = '\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0\x02\x8a(\xa0'
if rtv.content[900:1000] == bad_900_1000:
print('getimg: Pulling from DSS instead of SDSS')
BW = 1
url = dsshttp(ra,dec,imsize) # DSS
rtv = requests.get(url)
img = Image.open(StringIO(rtv.content))
# B&W ?
if BW:
import PIL.ImageOps
img2 = img.convert("L")
img2 = PIL.ImageOps.invert(img2)
img = img2
return img, BW
# ##########################################
def get_spec_img(ra, dec):
from PIL import Image
from cStringIO import StringIO
# Coord
coord = SkyCoord(ra=ra*u.degree, dec=dec*u.degree)
# Query database
radius = 1*u.arcsec
spec_catalog = SDSS.query_region(coord,spectro=True, radius=radiu | s.to('degree'))
# Request
url = 'http://skyserver.sdss.org/dr12/en/get/SpecById.ashx?id='+str(int(spec_catalog['specobjid']))
rtv = requests.get(url)
img = Image.open(StringIO(rtv.content))
return img
# #############
# Call with RA/DEC (decimal degrees)
def radecd | (ra, dec):
import x_getsdssimg as x_gsdss
img = x_gsdss.getimg(ra,dec)
return img
|
from unittest import TestCase
from mock import patch
from railgun.engines.storage_engine import DummyEngine
class StorageEngineTestCase(TestCase):
def setUp(self):
self.config = {
'field1': 'value1',
'field2': 'value2'
}
def test_init(self):
wit | h patch('railgun.engines.storage_engine.DummyEngine.config_from_dict') as mock_config_from_dict:
DummyEngine(self.config)
self.assertIsNone(mock_confi | g_from_dict.assert_called_with(self.config))
def test_config_from_dict(self):
engine = DummyEngine(self.config)
new_config = {
'field1': 'value3',
'field2': 'value4'
}
engine.config_from_dict(new_config)
for key, value in new_config.iteritems():
self.assertEqual(engine.__getattribute__(key), value)
def test_config_from_dict_missing_field(self):
engine = DummyEngine(self.config)
new_config = {
'field1': 'value3'
}
self.assertRaises(ValueError, engine.config_from_dict, new_config)
def test_extra_params_in_config_are_added_to_engine(self):
self.config['newfield'] = "test"
engine = DummyEngine(self.config)
self.assertEqual("test", engine.newfield)
|
from selenium import webdriver
from time import sleep
driver = webdriver.Firefox()
driver.get('http://www.google.com')
sleep(2)
loginTextBox = driver.find_element_by_css_se | lector('.gLFyf.gsfi')
searchButton = driver.find_element_by_xpath('//input[@name="btnK"]')
loginTextBox.clear()
loginTextBox.send_keys("python")
sleep(2)
searchButt | on.click()
driver.quit() |
--------------------------------------
# Support for search
# -----------------------------------------------------------------------
def itemsVisibleCount(self):
"""
Return the number of items visible.
:rtype: int
"""
return self._itemsVisibleCount
def itemsHiddenCount(self):
"""
Return the number of items hidden.
:rtype: int
"""
return self._itemsHiddenCount
def setSearchText(self, text):
"""
Set the search widget text..
:type text: str
:rtype: None
"""
self.searchWidget().setText(text)
# -----------------------------------------------------------------------
# Support for custom preview widgets
# -----------------------------------------------------------------------
def setCreateWidget(self, widget):
"""
:type widget: QtWidgets.QWidget
:rtype: None
"""
self.setPreviewWidgetVisible(True)
self.itemsWidget().clearSelection()
# Force the preview pane to expand when creating a new item.
fsize, rsize, psize = self._splitter.sizes()
if psize < 150:
self.setSizes((fsize, rsize, 180))
self.setPreviewWidget(widget)
def clearPreviewWidget(self):
"""
Set the default preview widget.
"""
self._previewWidget = None
widget = studiolibrary.widgets.PlaceholderWidget()
self.setPreviewWidget(widget)
def updatePreviewWidget(self):
"""Update the current preview widget."""
self.setPreviewWidgetFromItem(self._currentItem, force=True)
def setPreviewWidgetFromItem(self, item, force=False):
"""
:type item: studiolibrary.LibraryItem
:rtype: None
"""
if not force and self._currentItem == item:
logger.debug("The current item preview widget is already set.")
return
self._currentIt | em = item
if item:
self.closePreviewWidget()
try:
item.showPreviewWidget(self)
except Exception as error:
self.showErrorMessage(error)
self.clearPreviewWidget()
raise
else:
self.clearPreviewWidget()
def previewWidget(self):
"""
Return the current preview widget.
:rtype: QtWidgets.QWidget
"""
return self._previewWidget
|
def setPreviewWidget(self, widget):
"""
Set the preview widget.
:type widget: QtWidgets.QWidget
:rtype: None
"""
if self._previewWidget == widget:
msg = 'Preview widget already contains widget "{0}"'
msg.format(widget)
logger.debug(msg)
else:
self.closePreviewWidget()
self._previewWidget = widget
if self._previewWidget:
self._previewFrame.layout().addWidget(self._previewWidget)
self._previewWidget.show()
def closePreviewWidget(self):
"""
Close and delete the preview widget.
:rtype: None
"""
layout = self._previewFrame.layout()
while layout.count():
item = layout.takeAt(0)
item.widget().hide()
item.widget().close()
item.widget().deleteLater()
self._previewWidget = None
# -----------------------------------------------------------------------
# Support for saving and loading the widget state
# -----------------------------------------------------------------------
def resetSettings(self):
"""
Reset the settings to the default settings.
:rtype: str
"""
self.setSettings(self.DEFAULT_SETTINGS)
def geometrySettings(self):
"""
Return the geometry values as a list.
:rtype: list[int]
"""
settings = (
self.window().geometry().x(),
self.window().geometry().y(),
self.window().geometry().width(),
self.window().geometry().height()
)
return settings
def setGeometrySettings(self, settings):
"""
Set the geometry of the widget with the given values.
:type settings: list[int]
:rtype: None
"""
x, y, width, height = settings
screenGeometry = QtWidgets.QApplication.desktop().screenGeometry()
screenWidth = screenGeometry.width()
screenHeight = screenGeometry.height()
if x <= 0 or y <= 0 or x >= screenWidth or y >= screenHeight:
self.centerWindow(width, height)
else:
self.window().setGeometry(x, y, width, height)
def settings(self):
"""
Return a dictionary with the widget settings.
:rtype: dict
"""
settings = {}
settings['dpi'] = self.dpi()
settings['kwargs'] = self._kwargs
settings['geometry'] = self.geometrySettings()
settings['paneSizes'] = self._splitter.sizes()
if self.theme():
settings['theme'] = self.theme().settings()
settings["library"] = self.library().settings()
settings["trashFolderVisible"] = self.isTrashFolderVisible()
settings["sidebarWidgetVisible"] = self.isFoldersWidgetVisible()
settings["previewWidgetVisible"] = self.isPreviewWidgetVisible()
settings["menuBarWidgetVisible"] = self.isMenuBarWidgetVisible()
settings["statusBarWidgetVisible"] = self.isStatusBarWidgetVisible()
settings['itemsWidget'] = self.itemsWidget().settings()
settings['searchWidget'] = self.searchWidget().settings()
settings['sidebarWidget'] = self.sidebarWidget().settings()
settings["recursiveSearchEnabled"] = self.isRecursiveSearchEnabled()
settings['filterByMenu'] = self._filterByMenu.settings()
settings["path"] = self.path()
return settings
def setSettings(self, settings):
"""
Set the widget settings from the given dictionary.
:type settings: dict
"""
defaults = copy.deepcopy(self.DEFAULT_SETTINGS)
settings = studiolibrary.update(defaults, settings)
isRefreshEnabled = self.isRefreshEnabled()
try:
self.setRefreshEnabled(False)
self.itemsWidget().setToastEnabled(False)
geometry = settings.get("geometry")
if geometry:
self.setGeometrySettings(geometry)
themeSettings = settings.get("theme")
if themeSettings:
self.setThemeSettings(themeSettings)
if not self.path():
path = settings.get("path")
if path and os.path.exists(path):
self.setPath(path)
dpi = settings.get("dpi", 1.0)
self.setDpi(dpi)
sizes = settings.get('paneSizes')
if sizes and len(sizes) == 3:
self.setSizes(sizes)
value = settings.get("sidebarWidgetVisible")
if value is not None:
self.setFoldersWidgetVisible(value)
value = settings.get("menuBarWidgetVisible")
if value is not None:
self.setMenuBarWidgetVisible(value)
value = settings.get("previewWidgetVisible")
if value is not None:
self.setPreviewWidgetVisible(value)
value = settings.get("statusBarWidgetVisible")
if value is not None:
self.setStatusBarWidgetVisible(value)
value = settings.get('searchWidget')
if value is not None:
self.searchWidget().setSettings(value)
value = settings.get("recursiveSearchEnabled")
if value is not None:
self.setRecursiveSearchEnabled(value)
value = settings.get('filterByMenu')
if value is not None:
self._filterByMenu.setSettings(value)
finally:
self.reloadStyleSheet()
self.setRefreshEnabled(isRe |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use th | is file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mocks for classes defined in webapp module.
|
Use this classes to test functionality depending on webapp framework.
"""
import StringIO
import urlparse
class MockHeaders(dict):
"""Mocks out headers in webapp.Request and webapp.Response."""
def add_header(self, key, value):
self[key] = value
class MockRequest(object):
"""Mocks out webapp.Request.
Use get()/set() to configure the query parameters for the request.
Public Members:
method: A string representing the request type. Defaults to 'GET'.
uri: A string representing the requested URI. Defaults to '/start'.
"""
uri = property(lambda self: self.url)
def __init__(self):
"""Initializer."""
self.method = 'GET'
self.scheme = 'http'
self.host = 'foo.com'
self._path = '/start'
self.params = {}
self.params_list = []
self.headers = MockHeaders()
self.body = ''
self.url = ''
self.path_qs = ''
self.update_properties()
self.environ = {}
def get_path(self):
return self._path
def set_path(self, value):
self._path = value
self.update_properties()
path = property(get_path, set_path)
def set_url(self, url):
"""Set full URL for the request.
Parses the URL and sets path, scheme, host and parameters correctly.
"""
o = urlparse.urlparse(url)
self.path = o.path
self.scheme = o.scheme or self.scheme
self.host = o.netloc or self.host
for (name, value) in urlparse.parse_qs(o.query).items():
assert len(value) == 1
self.set(name, value[0])
def get(self, argument_name, default_value='', allow_multiple=False):
"""Looks up the value of a query parameter.
Args:
argument_name: The query parameter key as a string.
default_value: The default query parameter value as a string if it was
not supplied.
allow_multiple: return a list of values with the given name
Returns:
If allow_multiple is False (which it is by default), we return the first
value with the given name given in the request. If it is True, we always
return an list.
"""
if argument_name not in self.params:
if allow_multiple:
return []
return default_value
if allow_multiple:
return list(self.params[argument_name])
if isinstance(self.params[argument_name], list):
return self.params[argument_name][0]
return self.params[argument_name]
def get_all(self, argument_name):
"""Returns a list of query parameters with the given name.
Args:
argument_name: the name of the query argument.
Returns:
A (possibly empty) list of values.
"""
if argument_name in self.params:
if isinstance(self.params[argument_name], list):
return self.params[argument_name]
else:
return [self.params[argument_name]]
return []
def get_range(self, name, min_value=None, max_value=None, default=0):
"""Parses the given int argument, limiting it to the given range.
Args:
name: the name of the argument
min_value: the minimum int value of the argument (if any)
max_value: the maximum int value of the argument (if any)
default: the default value of the argument if it is not given
Returns:
An int within the given range for the argument
"""
value = self.get(name, default)
if value is None:
return value
try:
value = int(value)
except ValueError:
value = default
if value is not None:
if max_value is not None:
value = min(value, max_value)
if min_value is not None:
value = max(value, min_value)
return value
def set(self, argument_name, value):
"""Sets the value of a query parameter.
Args:
argument_name: The string name of the query parameter.
value: The string value of the query parameter. Pass None to remove
query parameter.
"""
self.params_list = filter(lambda p: p[0] != argument_name, self.params_list)
if value is not None:
self.params[argument_name] = value
if type(value) == list:
for v in value:
self.params_list.append((argument_name, v))
else:
self.params_list.append((argument_name, value))
else:
del self.params[argument_name]
self.update_properties()
def relative_url(self, other_url, to_application=False):
"""Return an absolute (!) URL by combining self.path with other_url."""
url = '%s://%s/' % (self.scheme, self.host)
return urlparse.urljoin(url, other_url)
def update_properties(self):
"""Update url, path_qs property to be in sync with path and params."""
self.path_qs = self._path
params_qs = ''
for param_value_pair in self.params_list:
if params_qs:
params_qs += '&'
params_qs += param_value_pair[0] + "=" + param_value_pair[1]
if params_qs:
self.path_qs += '?' + params_qs
self.url = self.scheme + '://' + self.host + self.path_qs
def arguments(self):
"""Gets the set of argument names used in this request."""
return list(set(p[0] for p in self.params_list))
class MockResponse(object):
"""Mocks out webapp.Response.
Public Members:
out: A StringIO instance.
status: HTTP status code.
message: HTTP status message.
headers: A dict of HTTP response headers.
"""
def __init__(self):
self.out = StringIO.StringIO()
self.headers = MockHeaders()
self.status = 200
self.status_message = 'OK'
def set_status(self, status, message=None):
"""Sets the value of status.
Args:
status: HTTP status code.
message: HTTP status message.
"""
self.status = status
if message:
self.status_message = message
def has_error(self):
"""Indicates whether the response was an error response."""
return self.status >= 400
def clear(self):
"""Clears all data written to self.out."""
self.out.seek(0)
self.out.truncate(0)
|
data = {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}
with contextlib.nested(
self.port(arg_list=arg_list, **data),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_plugin_per_device'),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_nexus_for_net_create')
) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create):
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}}
req = self.new_update_request('ports', data, port['port']['id'])
# Note, due to mocking out the two model routines, response won't
# contain any useful data
req.get_response(self.api)
# Note that call_args_list is used instead of
# assert_called_once_with which requires exact match of arguments.
# This is because the mocked routines contain variable number of
# arguments and/or dynamic objects.
self.assertEqual(invoke_plugin_per_device.call_count, 2)
self.assertEqual(
invoke_plugin_per_device.call_args_list[0][0][0:2],
(const.VSWITCH_PLUGIN, 'update_port'))
self.assertEqual(
invoke_plugin_per_device.call_args_list[1][0][0:2],
(const.NEXUS_PLUGIN, 'delete_port'))
self.assertEqual(invoke_nexus_for_net_create.call_count, 1)
self.assertEqual(
invoke_nexus_for_net_create.call_args_list[0][0][1:],
(port['port']['tenant_id'], port['port']['network_id'],
port['port']['device_id'],
data['port'][portbindings.HOST_ID],))
def test_model_update_port_net_create_not_needed(self):
"""Test the model for update_port when no action is needed.
Mock the routines that call into the plugin code, and make sure that
VSWITCH plugin is called with correct arguments, while NEXUS plugin is
not called at all.
"""
arg_list = (portbindings.HOST_ID,)
data = {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}
with contextlib.nested(
self.port(arg_list=arg_list, **data),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_plugin_per_device'),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_nexus_for_net_create')
) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create):
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}}
req = self.new_update_request('ports', data, port['port']['id'])
# Note, due to mocking out the two model routines, response won't
# contain any useful data
req.get_response(self.api)
# Note that call_args_list is used instead of
# assert_called_once_with which requires exact match of arguments.
# This is because the mocked routines contain variable number of
# arguments and/or dynamic objects.
self.assertEqual(invoke_plugin_per_device.call_count, 1)
self.assertEqual(
invoke_plugin_per_device.call_args_list[0][0][0:2],
(const.VSWITCH_PLUGIN, 'update_port'))
self.assertFalse(invoke_nexus_for_net_create.called)
def verify_portbinding(self, host_id1, host_id2,
vlan, device_id, binding_port):
"""Verify a port binding entry in the DB is correct."""
self.assertEqual(host_id1, host_id2)
pb = nexus_db_v2.get_nexusvm_bindings(vlan, device_id)
self.assertEqual(len(pb), 1)
self.assertEqual(pb[0].port_id, binding_port)
self.assertEqual(pb[0].switch_ip, NEXUS_IP_ADDR)
def test_db_update_port_attach(self):
"""Test DB for update_port in attaching to an instance.
Query DB for the port binding entry corresponding to the search key
(vlan, device_id), and make sure that it's bound to correct switch port
"""
with self.port() as port:
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ctx = context.get_admin_context()
net = self._show('networks', res['port']['network_id'],
neutron_context=ctx)['network']
self.assertTrue(attributes.is_attr_set(
net.get(provider.SEGMENTATION_ID)))
vlan = net[provider.SEGMENTATION_ID]
self.assertEqual(vlan, VLAN_START)
self.verify_portbinding(res['port'][portbindings.HOST_ID],
data['port'][portbindings.HOST_ID],
vlan,
data['port']['device_id'],
NEXUS_PORT_1)
def test_db_update_port_migrate(self):
"""Test DB for update_port in migrating an instance.
Query DB for the port binding entry corresponding to the search key
(vlan, device_id), and make sure that it's bound to correct switch port
before and after the migration.
"""
arg_list = (portbindings.HOST_ID,)
data = {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}
with self.port(arg_list=arg_list, **data) as port:
ctx = context.get_admin_context()
net = self._show('networks', port['port']['network_id'],
neutron_context=ctx)['network']
self.assertTrue(attributes.is_attr_set(
net.get(provider.SEGMENTATION_ID)))
vlan = net[provider.SEGMENTATION_ID]
self.assertEqual(vlan, VLAN_START)
self.verify_portbinding(port['port'][portbindings.HOST_ID],
data[portbindings.HOST_ID],
vlan,
data['device_id'],
NEXUS_PORT_1)
new_data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}}
req = self.new_update_request('ports',
new_data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_re | sponse(self.api))
self.verify_portbinding(res['port'][portbindings.HOST_ID],
new_data['port'][portbindings.HOST_ID],
| vlan,
data['device_id'],
NEXUS_PORT_2)
def test_delete_ports_by_device_id_second_call_failure(self):
plugin_ref = self._get_plugin_ref()
self._test_delete_ports_by_device_id_second_call_failure(plugin_ref)
def test_delete_ports_ignores_port_not_found(self):
plugin_ref = self._get_plugin_ref()
self._test_delete_ports_ignores_port_not_found(plugin_ref)
class TestCiscoNetworksV2(CiscoNetworkPluginV2TestCase,
test_db_plugin.TestNetworksV2):
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
plugin_ref = self._get_plugin_ref()
orig = plugin_ref.create_network
#ensures the API choose the e |
import numpy as np
import os,sys
from scipy import optimize
import matplotlib.pyplot as plt
import matplotlib.dates as pltdates
from astropy.io import ascii
from datetime import datetime
#This script will fit a power law to the moving source synchrotron spectrum
#The new data location
#if sys.platform == 'darwin': BASEDIR='/Volumes/Transcend/MWA_DATA/'
if sys.platform == 'darwin': BASEDIR='/Users/kkozarev/Desktop/MWA_CME_project/MWA_DATA/'
if sys.platform == 'linux2': BASEDIR='/mnt/MWA_DATA/'
avgperiod='10sec'
datadir=BASEDIR+'max_spectra/normalized/'+avgperiod+'/'
polarization='XX'
#sourcetype={'1':'Moving','2':'Stationary'}
sourcetype={'1':'Moving'} #Do not modify!
#Read in the data
spectrafile='moving_source_normalized_spectra_'+polarization+'_'+avgperiod+'.txt'
#frequencies=[79.8,88.76,97.72,107.96,119.48,132.28,145.08]
frequencies=np.array([79.8,88.76,97.72,107.96,119.48,145.08])
logx=np.log10(frequencies)
spectradata=ascii.read(datadir+spectrafile,data_start=1)
fluxdata=np.array(spectradata)
amps=[]
indices=[]
times=[]
indexerrors=[]
for ii,rval in enumerate(fluxdata):
if ii > 0:
date,time,s1,e1,s2,e2,s3,e3,s4,e4,s5,e5,s6,e6,s7,e7=rval
dt=date + ' '+time
times.append(datetime.strptime(dt,"%Y/%m/%d %H:%M:%S"))
spectrum=np.array([s1,s2,s3,s4,s5,s7])
yerr=np.array([e1,e2,e3,e4,e5,e7])
logy=np.log10(spectrum)
logyerr = yerr / spectrum
p0 = [logy[0],1] # Initial guess for the parameters
fitfunc = lambda p, x: p[0] + p[1] * x # Target function
# Distance to the target function
#errfunc = lambda p, x, y: fitfunc(p, x) - y
#out = optimize.leastsq(errfunc, p0[:], args=(logx,logy), full_output=1)
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
out = optimize.leastsq(errfunc, p0[:], args=(logx,logy,logyerr), full_output=1)
p1 = out[0]
#print p1
covar = out[1]
index = p1[1]
indices.append(index)
amp = 10.0**p1[0]
amps.append(amp)
if covar is None: indexErr = 1.e-20
else: indexErr = np.sqrt(covar[0][0])
indexerrors.append(indexErr)
#print indices
#print indexerrors
#fit=amp*np.power(frequencies,index)
fig, ax = plt.subplots()
datefmt=pltdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(datefmt)
plt.plot_date(x=times,y=np.abs(indices),fmt='r-',drawstyle='steps-mid',linewidth=1)
plt.ylabel("Power law index")
plt.xlabel("Time of "+date)
plt.title("Moving Source Normalized Power Law Index")
plt.errorbar(times,np.abs(indices),yerr=indexerrors,fmt='o',markersize=0.1,linewidth=1)
plt.savefig(datadir+'normalized_moving_source_spectral_indices'+'_'+polarization+"_synchrotron_"+avgperiod+".png")
plt.close()
#plt.plot(frequencies,fit,'b-')
#plt.plot(frequencies,spectrum,'ro')
#plt.step(frequencies,spectrum,color='r',where='mid')
#plt.yscale('log')
#plt.xscale('log')
#plt.show()
# Plot the inferred electron power law.
elecplawindice | s=list(2*np.array(np.abs(indices))+1)
elecplawindiceserr=list(2*np.array(indexerrors))
fig, ax = plt.subplots()
datefmt=pltdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(datefmt)
plt.plot_date(x=times,y=elecplawindices,fmt='r-',drawstyle='steps- | mid',linewidth=1)
plt.ylabel("Inferred Electron Distribution Power Law Index")
plt.xlabel("Time of "+date)
plt.title("Moving Source Electron Distribution Power Law Index")
plt.errorbar(times,elecplawindices,yerr=elecplawindiceserr,fmt='o',markersize=0.1,linewidth=1)
plt.savefig(datadir+'normalized_moving_source_electron_distribution_indices'+'_'+polarization+"_synchrotron_"+avgperiod+".png")
plt.close()
# Plot the inferred degree of polarization.
plawind=np.array(elecplawindices)*(-1.)
degpol=list(((plawind+1.)/(plawind+(7./3.)))*100.)
#degpolerr=list(2*np.array(indexerrors))
fig, ax = plt.subplots()
datefmt=pltdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(datefmt)
plt.plot_date(x=times,y=degpol,fmt='r-',drawstyle='steps-mid',linewidth=1)
plt.ylabel("Polarization Degree, %")
plt.xlabel("Time of "+date)
plt.title("Moving Source Inferred Polarization Degree")
#plt.errorbar(times,elecplawindices,yerr=degpolerr,fmt='o',markersize=0.1,linewidth=1)
plt.savefig(datadir+'normalized_moving_source_electron_polarization_degree'+'_'+polarization+"_synchrotron_"+avgperiod+".png")
plt.close()
|
from django import template
register = template.Library()
@register | .inclusion_tag(file_name='bootstrap_messages.html')
def bootstrap_messages(messages, icon_remove_class=None):
"""
Render django.contrib.messages messages as bootstrap alert blocks.
Display only info, success, error and warning level messages.
messages - django.contrib.messages.storage.fallback.FallbackStorage instance.
icon_remove_class - if 'None' alert will not have close button. Use bootstrap glyphs class. Example: 'icon-remove'
Exa | mple of use:
{% bootstrap_messages messages 'icon-remove' %}
"""
return {'messages': messages, 'icon_remove_class': icon_remove_class}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.