commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
7bccd20523f96728db7a6b5fd23cb339787ecd3a | Bump to 1.1.4 | jnius/__init__.py | jnius/__init__.py | '''
Pyjnius
=======
Accessing Java classes from Python.
All the documentation is available at: http://pyjnius.readthedocs.org
'''
__version__ = '1.1.4'
from .jnius import * # noqa
from .reflect import * # noqa
from six import with_metaclass
# XXX monkey patch methods that cannot be in cython.
# Cython doesn't allow to set new attribute on methods it compiled
HASHCODE_MAX = 2 ** 31 - 1
class PythonJavaClass_(with_metaclass(MetaJavaBase, PythonJavaClass)):
@java_method('()I', name='hashCode')
def hashCode(self):
return id(self) % HASHCODE_MAX
@java_method('()Ljava/lang/String;', name='hashCode')
def hashCode_(self):
return '{}'.format(self.hashCode())
@java_method('()Ljava/lang/String;', name='toString')
def toString(self):
return repr(self)
@java_method('(Ljava/lang/Object;)Z', name='equals')
def equals(self, other):
return self.hashCode() == other.hashCode()
PythonJavaClass = PythonJavaClass_
# from https://gist.github.com/tito/09c42fb4767721dc323d
import os
if "ANDROID_ARGUMENT" in os.environ:
# on android, catch all exception to ensure about a jnius.detach
import threading
import jnius
orig_thread_run = threading.Thread.run
def jnius_thread_hook(*args, **kwargs):
try:
return orig_thread_run(*args, **kwargs)
finally:
jnius.detach()
threading.Thread.run = jnius_thread_hook
| '''
Pyjnius
=======
Accessing Java classes from Python.
All the documentation is available at: http://pyjnius.readthedocs.org
'''
__version__ = '1.1.4.dev0'
from .jnius import * # noqa
from .reflect import * # noqa
from six import with_metaclass
# XXX monkey patch methods that cannot be in cython.
# Cython doesn't allow to set new attribute on methods it compiled
HASHCODE_MAX = 2 ** 31 - 1
class PythonJavaClass_(with_metaclass(MetaJavaBase, PythonJavaClass)):
@java_method('()I', name='hashCode')
def hashCode(self):
return id(self) % HASHCODE_MAX
@java_method('()Ljava/lang/String;', name='hashCode')
def hashCode_(self):
return '{}'.format(self.hashCode())
@java_method('()Ljava/lang/String;', name='toString')
def toString(self):
return repr(self)
@java_method('(Ljava/lang/Object;)Z', name='equals')
def equals(self, other):
return self.hashCode() == other.hashCode()
PythonJavaClass = PythonJavaClass_
# from https://gist.github.com/tito/09c42fb4767721dc323d
import os
if "ANDROID_ARGUMENT" in os.environ:
# on android, catch all exception to ensure about a jnius.detach
import threading
import jnius
orig_thread_run = threading.Thread.run
def jnius_thread_hook(*args, **kwargs):
try:
return orig_thread_run(*args, **kwargs)
finally:
jnius.detach()
threading.Thread.run = jnius_thread_hook
| Python | 0.000084 |
6a27bd99352e4dc7f38c6f819a8a45b37c1a094c | Remove TODO to add requirements.txt | start-active-players.py | start-active-players.py | """
Start active players for the week
Ideas:
- Include the names of players who cannot be started
- And maybe the full roster on those dates
"""
import requests
from bs4 import BeautifulSoup
# TODO: Configure this somewhere better (as a direct argument to the script, probably
TEAM_URL = 'http://basketball.fantasysports.yahoo.com/nba/178276/6/'
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'
}
response = requests.get(TEAM_URL, headers=headers)
soup = BeautifulSoup(response.text)
inputs = soup.find(id='hiddens').findAll('input')
fields = {input['name']: input['value'] for input in inputs}
print(fields)
| """
Start active players for the week
Ideas:
- Include the names of players who cannot be started
- And maybe the full roster on those dates
TODO:
- Add required packages in requirements.txt
"""
import requests
from bs4 import BeautifulSoup
# TODO: Configure this somewhere better (as a direct argument to the script, probably
TEAM_URL = 'http://basketball.fantasysports.yahoo.com/nba/178276/6/'
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'
}
response = requests.get(TEAM_URL, headers=headers)
soup = BeautifulSoup(response.text)
inputs = soup.find(id='hiddens').findAll('input')
fields = {input['name']: input['value'] for input in inputs}
print(fields)
| Python | 0 |
a6390df0f4fb9c9402b1c795d4bf65765b793412 | Bump dallinger version | dallinger/version.py | dallinger/version.py | """Dallinger version number."""
__version__ = "5.0.6"
| """Dallinger version number."""
__version__ = "5.0.5"
| Python | 0 |
75d2f1aad9aa88926fce27d49c4e452eb571fc14 | Update the lexer | cycli/lexer.py | cycli/lexer.py | import re
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, Number, Token
__all__ = ["CypherLexer"]
class CypherLexer(RegexLexer):
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'\b(ABS|ACOS|ALLSHORTESTPATHS|ASIN|ATAN|ATAN2|AVG|CEIL|COALESCE|COLLECT'
r'|COS|COT|COUNT|DEGREES|E|ENDNODE|EXP|EXTRACT|FILTER|FLOOR'
r'|HAVERSIN|HEAD|ID|KEYS|LABELS|LAST|LEFT|LENGTH|LIKE|LOAD CSV|LOG|LOG10'
r'|LOWER|LTRIM|MAX|MIN|NODE|NODES|PERCENTILECONT|PERCENTILEDISC|PI|RADIANS'
r'|RAND|RANGE|REDUCE|REL|RELATIONSHIP|RELATIONSHIPS|REPLACE|RIGHT|ROUND|RTRIM'
r'|SHORTESTPATH|SIGN|SIN|SPLIT|SQRT|STARTNODE|STDEV|STDEVP|STR|SUBSTRING'
r'|SUM|TAIL|TAN|TIMESTAMP|TOFLOAT|TOINT|TRIM|TYPE|UPPER|ALL'
r'|AND|ANY|HAS|IN|NONE|NOT|OR|SINGLE|XOR|AS'
r'|ASC|ASCENDING|ASSERT|BY|CASE|COMMIT|CONSTRAINT|CREATE|CYPHER'
r'|DELETE|DESC|DESCENDING|DISTINCT|DROP|ELSE|END|EXPLAIN|FALSE|FIELDTERMINATOR'
r'|FOREACH|FROM|WITH HEADERS|IN|INDEX|IS|LIMIT|LOAD|MATCH|MERGE'
r'|NULL|ON|OPTIONAL|ORDER|PERIODIC|PROFILE|REMOVE|RETURN|SCAN|SET'
r'|SKIP|START|THEN|TRUE|UNION|UNIQUE|UNWIND|USING|WHEN|WHERE|WITH)\b', Keyword),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'[0-9]+', Name),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[-\)\]]-[>\(]|[<\)]-[-\(\[]|[\]\)]-|-[\(\[]|-->|<--|\]-|-\[', Token.Pattern),
(r'\.', Token.Pattern),
(r'\(|\)|\]|\[|{|}', Token.Pattern)
]
} | import re
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, Number, Token
__all__ = ["CypherLexer"]
class CypherLexer(RegexLexer):
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'\b(ABS|ACOS|ALLSHORTESTPATHS|ASIN|ATAN|ATAN2|AVG|CEIL|COALESCE|COLLECT'
r'|COS|COT|COUNT|DEGREES|E|ENDNODE|EXP|EXTRACT|FILTER|FLOOR'
r'|HAVERSIN|HEAD|ID|KEYS|LABELS|LAST|LEFT|LENGTH|LIKE|LOAD CSV|LOG|LOG10'
r'|LOWER|LTRIM|MAX|MIN|NODE|NODES|PERCENTILECONT|PERCENTILEDISC|PI|RADIANS'
r'|RAND|RANGE|REDUCE|REL|RELATIONSHIP|RELATIONSHIPS|REPLACE|RIGHT|ROUND|RTRIM'
r'|SHORTESTPATH|SIGN|SIN|SPLIT|SQRT|STARTNODE|STDEV|STDEVP|STR|SUBSTRING'
r'|SUM|TAIL|TAN|TIMESTAMP|TOFLOAT|TOINT|TRIM|TYPE|UPPER|ALL'
r'|AND|ANY|HAS|IN|NONE|NOT|OR|SINGLE|XOR|AS'
r'|ASC|ASCENDING|ASSERT|BY|CASE|COMMIT|CONSTRAINT|CREATE|CYPHER'
r'|DELETE|DESC|DESCENDING|DISTINCT|DROP|ELSE|END|EXPLAIN|FALSE|FIELDTERMINATOR'
r'|FOREACH|FROM|WITH HEADERS|IN|INDEX|IS|LIMIT|LOAD|MATCH|MERGE'
r'|NULL|ON|OPTIONAL|ORDER|PERIODIC|PROFILE|REMOVE|RETURN|SCAN|SET'
r'|SKIP|START|THEN|TRUE|UNION|UNIQUE|UNWIND|USING|WHEN|WHERE|WITH)\b', Keyword),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'[0-9]+', Name),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'-->|<--|\]->|<-\[|\)-\[|\]-\(|\[|\]-|\[|\]', Token.Pattern),
(r'\.', Token.Pattern),
(r'\(|\)', Token.Pattern)
]
} | Python | 0 |
a98096f129165be003294eaa5ad3596931c58ae7 | Use per-request database connections. | nmhive.py | nmhive.py | #!/usr/bin/env python
import json
import mailbox
import os
import tempfile
import urllib.request
import flask
import flask_cors
import notmuch
app = flask.Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
flask_cors.CORS(app)
TAG_PREFIX = os.getenv('NMBPREFIX', 'notmuch::')
NOTMUCH_PATH = None
@app.route('/tags', methods=['GET'])
def tags():
tags = set()
database = notmuch.Database(path=NOTMUCH_PATH)
try:
for t in database.get_all_tags():
if t.startswith(TAG_PREFIX):
tags.add(t[len(TAG_PREFIX):])
finally:
database.close()
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/mid/<message_id>', methods=['GET', 'POST'])
def message_id_tags(message_id):
if flask.request.method == 'POST':
tags = _TAGS.get(message_id, set())
new_tags = tags.copy()
for change in flask.request.get_json():
if change.startswith('+'):
new_tags.add(change[1:])
elif change.startswith('-'):
try:
new_tags.remove(change[1:])
except KeyError:
return flask.Response(status=400)
else:
return flask.Response(status=400)
_TAGS[message_id] = new_tags
return flask.Response(
response=json.dumps(sorted(new_tags)),
mimetype='application/json')
elif flask.request.method == 'GET':
try:
tags = _TAGS[message_id]
except KeyError:
return flask.Response(status=404)
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/gmane/<group>/<int:article>', methods=['GET'])
def gmane_message_id(group, article):
url = 'http://download.gmane.org/{}/{}/{}'.format(
group, article, article + 1)
response = urllib.request.urlopen(url=url, timeout=3)
mbox_bytes = response.read()
with tempfile.NamedTemporaryFile(prefix='nmbug-', suffix='.mbox') as f:
f.write(mbox_bytes)
mbox = mailbox.mbox(path=f.name)
_, message = mbox.popitem()
message_id = message['message-id']
return flask.Response(
response=message_id.lstrip('<').rstrip('>'),
mimetype='text/plain')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| #!/usr/bin/env python
import json
import mailbox
import os
import tempfile
import urllib.request
import flask
import flask_cors
import notmuch
app = flask.Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
flask_cors.CORS(app)
TAG_PREFIX = os.getenv('NMBPREFIX', 'notmuch::')
NOTMUCH = None
_TAGS = {}
@app.route('/tags', methods=['GET'])
def tags():
tags = set()
for t in NOTMUCH.get_all_tags():
if t.startswith(TAG_PREFIX):
tags.add(t[len(TAG_PREFIX):])
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/mid/<message_id>', methods=['GET', 'POST'])
def message_id_tags(message_id):
if flask.request.method == 'POST':
tags = _TAGS.get(message_id, set())
new_tags = tags.copy()
for change in flask.request.get_json():
if change.startswith('+'):
new_tags.add(change[1:])
elif change.startswith('-'):
try:
new_tags.remove(change[1:])
except KeyError:
return flask.Response(status=400)
else:
return flask.Response(status=400)
_TAGS[message_id] = new_tags
return flask.Response(
response=json.dumps(sorted(new_tags)),
mimetype='application/json')
elif flask.request.method == 'GET':
try:
tags = _TAGS[message_id]
except KeyError:
return flask.Response(status=404)
return flask.Response(
response=json.dumps(sorted(tags)),
mimetype='application/json')
@app.route('/gmane/<group>/<int:article>', methods=['GET'])
def gmane_message_id(group, article):
url = 'http://download.gmane.org/{}/{}/{}'.format(
group, article, article + 1)
response = urllib.request.urlopen(url=url, timeout=3)
mbox_bytes = response.read()
with tempfile.NamedTemporaryFile(prefix='nmbug-', suffix='.mbox') as f:
f.write(mbox_bytes)
mbox = mailbox.mbox(path=f.name)
_, message = mbox.popitem()
message_id = message['message-id']
return flask.Response(
response=message_id.lstrip('<').rstrip('>'),
mimetype='text/plain')
if __name__ == '__main__':
NOTMUCH = notmuch.Database(
path=None,
mode=notmuch.Database.MODE.READ_WRITE)
app.run(host='0.0.0.0')
| Python | 0 |
0983331773982b2bae6b92a0350a91aefbe6481e | Use the `note` style box. Refs #11725. | contrib/help_guide_version_notice.py | contrib/help_guide_version_notice.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from pkg_resources import resource_listdir
from trac.config import ListOption, Option
from trac.core import Component, implements
from trac.web.api import IRequestFilter
NOTICE_TEMPLATE = """\
{{{#!box note
This page documents the %(release)s (%(desc)s) release.
See [[%(alt_page)s]] if you need the %(alt_desc)s version.
}}}
"""
class HelpGuideVersionNotice(Component):
"""Adds a version notice to pages in the Help/Guide with a link to
the previous or current version of the page in the guide. The
WikiExtraPlugin needs to be installed for pretty rendering of the
notice using the `box` WikiProcessor.
"""
implements(IRequestFilter)
lts_release = Option('teo', 'lts_release', '0.12',
doc="Version of the LTS release of Trac.")
stable_release = Option('teo', 'stable_release', '1.0',
doc="Version of the stable release of Trac.")
dev_release = Option('teo', 'dev_release', '1.1',
doc="Version of the dev release of Trac.")
ignored_pages = ListOption('teo', 'ignored_pages',
'WikiStart, TitleIndex',
doc="List of pages to ignore.")
def __init__(self):
self.default_pages = resource_listdir('trac.wiki', 'default-pages')
for page in self.ignored_pages:
self.default_pages.remove(page)
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if data and 'page' in data and 'text' in data:
name = data['page'].name
notice = ""
if name in self.default_pages:
alt_page = self.lts_release + '/' + name
notice = NOTICE_TEMPLATE % {'release': self.stable_release,
'desc': 'latest stable',
'alt_page': alt_page,
'alt_desc': 'previous'}
elif name.startswith(self.lts_release) and \
name[len(self.lts_release)+1:] in self.default_pages:
alt_page = '../../' + name[len(self.lts_release)+1:]
notice = NOTICE_TEMPLATE % {'release': self.lts_release,
'desc': 'maintenance',
'alt_page': alt_page,
'alt_desc': 'latest stable'}
elif name.startswith(self.dev_release) and \
name[len(self.dev_release)+1:] in self.default_pages:
alt_page = '../../' + name[len(self.dev_release)+1:]
notice = NOTICE_TEMPLATE % {'release': self.dev_release,
'desc': 'development',
'alt_page': alt_page,
'alt_desc': 'latest stable'}
data['text'] = notice + data['text']
return template, data, content_type
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from pkg_resources import resource_listdir
from trac.config import ListOption, Option
from trac.core import Component, implements
from trac.web.api import IRequestFilter
NOTICE_TEMPLATE = """\
{{{#!box
**Note:** this page documents the %(release)s (%(desc)s) release.
See [[%(alt_page)s]] if you need the %(alt_desc)s version.
}}}
"""
class HelpGuideVersionNotice(Component):
"""Adds a version notice to pages in the Help/Guide with a link to
the previous or current version of the page in the guide. The
WikiExtraPlugin needs to be installed for pretty rendering of the
notice using the `box` WikiProcessor.
"""
implements(IRequestFilter)
lts_release = Option('teo', 'lts_release', '0.12',
doc="Version of the LTS release of Trac.")
stable_release = Option('teo', 'stable_release', '1.0',
doc="Version of the stable release of Trac.")
dev_release = Option('teo', 'dev_release', '1.1',
doc="Version of the dev release of Trac.")
ignored_pages = ListOption('teo', 'ignored_pages',
'WikiStart, TitleIndex',
doc="List of pages to ignore.")
def __init__(self):
self.default_pages = resource_listdir('trac.wiki', 'default-pages')
for page in self.ignored_pages:
self.default_pages.remove(page)
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if data and 'page' in data and 'text' in data:
name = data['page'].name
notice = ""
if name in self.default_pages:
alt_page = self.lts_release + '/' + name
notice = NOTICE_TEMPLATE % {'release': self.stable_release,
'desc': 'latest stable',
'alt_page': alt_page,
'alt_desc': 'previous'}
elif name.startswith(self.lts_release) and \
name[len(self.lts_release)+1:] in self.default_pages:
alt_page = '../../' + name[len(self.lts_release)+1:]
notice = NOTICE_TEMPLATE % {'release': self.lts_release,
'desc': 'maintenance',
'alt_page': alt_page,
'alt_desc': 'latest stable'}
elif name.startswith(self.dev_release) and \
name[len(self.dev_release)+1:] in self.default_pages:
alt_page = '../../' + name[len(self.dev_release)+1:]
notice = NOTICE_TEMPLATE % {'release': self.dev_release,
'desc': 'development',
'alt_page': alt_page,
'alt_desc': 'latest stable'}
data['text'] = notice + data['text']
return template, data, content_type
| Python | 0.000001 |
43b0201573a7bef18347c47f0434444a30edc5b1 | Use global parameters for port & fork_server | rosserial_python/nodes/serial_node.py | rosserial_python/nodes/serial_node.py | #!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import roslib; roslib.load_manifest("rosserial_python")
import rospy
from rosserial_python import SerialClient, RosSerialServer
import multiprocessing
import sys
if __name__=="__main__":
port_name = rospy.get_param('~port','/dev/ttyUSB0')
baud = int(rospy.get_param('~baud','57600'))
tcp_portnum = int(rospy.get_param('/rosserial_embeddedlinux/tcp_port', '11411'))
fork_server = rospy.get_param('/rosserial_embeddedlinux/fork_server', True)
sys.argv = rospy.myargv(argv=sys.argv)
#import pdb; pdb.set_trace()
if len(sys.argv) == 2 :
port_name = sys.argv[1]
if len(sys.argv) == 3 :
tcp_portnum = int(sys.argv[2])
if port_name == "tcp" :
server = RosSerialServer(tcp_portnum, fork_server)
rospy.loginfo("Waiting for socket connections on port %d" % tcp_portnum)
try:
server.listen()
except KeyboardInterrupt:
rospy.loginfo("got keyboard interrupt")
finally:
rospy.loginfo("Shutting down")
for process in multiprocessing.active_children():
rospy.loginfo("Shutting down process %r", process)
process.terminate()
process.join()
rospy.loginfo("All done")
else : # Use serial port
rospy.init_node("serial_node")
rospy.loginfo("ROS Serial Python Node")
rospy.loginfo("Connected on %s at %d baud" % (port_name,baud) )
client = SerialClient(port_name, baud)
try:
client.run()
except KeyboardInterrupt:
pass
| #!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import roslib; roslib.load_manifest("rosserial_python")
import rospy
from rosserial_python import SerialClient, RosSerialServer
import multiprocessing
import sys
if __name__=="__main__":
port_name = rospy.get_param('~port','/dev/ttyUSB0')
baud = int(rospy.get_param('~baud','57600'))
tcp_portnum = int(rospy.get_param('~tcp_port', '11411'))
fork_server = rospy.get_param('~fork_server', True)
sys.argv = rospy.myargv(argv=sys.argv)
#import pdb; pdb.set_trace()
if len(sys.argv) == 2 :
port_name = sys.argv[1]
if len(sys.argv) == 3 :
tcp_portnum = int(sys.argv[2])
if port_name == "tcp" :
server = RosSerialServer(tcp_portnum, fork_server)
rospy.loginfo("Waiting for socket connections on port %d" % tcp_portnum)
try:
server.listen()
except KeyboardInterrupt:
rospy.loginfo("got keyboard interrupt")
finally:
rospy.loginfo("Shutting down")
for process in multiprocessing.active_children():
rospy.loginfo("Shutting down process %r", process)
process.terminate()
process.join()
rospy.loginfo("All done")
else : # Use serial port
rospy.init_node("serial_node")
rospy.loginfo("ROS Serial Python Node")
rospy.loginfo("Connected on %s at %d baud" % (port_name,baud) )
client = SerialClient(port_name, baud)
try:
client.run()
except KeyboardInterrupt:
pass
| Python | 0 |
33d1e3f1d94c7af29742619df42983507e067568 | Add VotableModelMixin | board/models.py | board/models.py | import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
nick = models.CharField(max_length=16)
def __str__(self):
return str(self.user)
class Board(models.Model):
name = models.CharField(max_length=16)
slug = models.SlugField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('board_post_list', kwargs={'board': self.slug})
class Category(models.Model):
board = models.ForeignKey('Board', related_name='categories')
name = models.CharField(max_length=8)
slug = models.SlugField()
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class VotableModelMixin:
@property
def votes(self):
vd = dict()
vd['upvote'] = self._votes.filter(vote=Vote.UPVOTE).count()
vd['downvote'] = self._votes.filter(vote=Vote.DOWNVOTE).count()
vd['total'] = vd['upvote'] - vd['downvote']
return vd
class Post(VotableModelMixin, models.Model):
user = models.ForeignKey(User, blank=True, null=True, related_name='posts')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
board = models.ForeignKey('Board', related_name='posts')
category = models.ForeignKey('Category', blank=True, null=True, related_name='posts')
title = models.CharField(max_length=32)
contents = models.TextField()
tags = models.ManyToManyField(Tag, blank=True, null=True)
viewcount = models.PositiveIntegerField(default=0)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', kwargs={'pk': self.id})
def save(self, *args, **kwargs):
if not kwargs.pop('auto_now', False):
self.modified_time = datetime.datetime.now()
super(Post, self).save(*args, **kwargs)
class Comment(VotableModelMixin, models.Model):
post = models.ForeignKey('Post', related_name='comments')
comment = models.ForeignKey('self', related_name='subcomments', blank=True, null=True)
user = models.ForeignKey(User, blank=True, null=True, related_name='comments')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
contents = models.TextField()
created_time = models.DateTimeField(auto_now_add=True)
class Vote(models.Model):
DOWNVOTE = 0
UPVOTE = 1
VOTE_CHOICES = (
(DOWNVOTE, 'Not recommend'),
(UPVOTE, 'Recommend'),
)
post = models.ForeignKey('Post', blank=True, null=True, related_name='_votes')
comment = models.ForeignKey('Comment', blank=True, null=True, related_name='_votes')
user = models.ForeignKey(User, blank=True, null=True, related_name='_votes')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
vote = models.PositiveSmallIntegerField(choices=VOTE_CHOICES)
class Announcement(models.Model):
post = models.OneToOneField('Post', related_name='announcement')
boards = models.ManyToManyField('Board', related_name='announcements')
| import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
nick = models.CharField(max_length=16)
def __str__(self):
return str(self.user)
class Board(models.Model):
name = models.CharField(max_length=16)
slug = models.SlugField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('board_post_list', kwargs={'board': self.slug})
class Category(models.Model):
board = models.ForeignKey('Board', related_name='categories')
name = models.CharField(max_length=8)
slug = models.SlugField()
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class Post(models.Model):
user = models.ForeignKey(User, blank=True, null=True, related_name='posts')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
board = models.ForeignKey('Board', related_name='posts')
category = models.ForeignKey('Category', blank=True, null=True, related_name='posts')
title = models.CharField(max_length=32)
contents = models.TextField()
tags = models.ManyToManyField(Tag, blank=True, null=True)
viewcount = models.PositiveIntegerField(default=0)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField()
@property
def votes(self):
vd = dict()
vd['upvote'] = self._votes.filter(vote=Vote.UPVOTE).count()
vd['downvote'] = self._votes.filter(vote=Vote.DOWNVOTE).count()
vd['total'] = vd['upvote'] - vd['downvote']
return vd
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', kwargs={'pk': self.id})
def save(self, *args, **kwargs):
if not kwargs.pop('auto_now', False):
self.modified_time = datetime.datetime.now()
super(Post, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey('Post', related_name='comments')
comment = models.ForeignKey('self', related_name='subcomments', blank=True, null=True)
user = models.ForeignKey(User, blank=True, null=True, related_name='comments')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
contents = models.TextField()
created_time = models.DateTimeField(auto_now_add=True)
class Vote(models.Model):
DOWNVOTE = 0
UPVOTE = 1
VOTE_CHOICES = (
(DOWNVOTE, 'Not recommend'),
(UPVOTE, 'Recommend'),
)
post = models.ForeignKey('Post', blank=True, null=True, related_name='_votes')
comment = models.ForeignKey('Comment', blank=True, null=True, related_name='_votes')
user = models.ForeignKey(User, blank=True, null=True, related_name='_votes')
ipaddress = models.GenericIPAddressField(protocol='IPv4')
vote = models.PositiveSmallIntegerField(choices=VOTE_CHOICES)
class Announcement(models.Model):
post = models.OneToOneField('Post', related_name='announcement')
boards = models.ManyToManyField('Board', related_name='announcements')
| Python | 0 |
56fee8518b9022b854f0bb300e8e44ec84539a29 | Fix callback to send a bogus message which will close the connection | bokeh/client.py | bokeh/client.py | '''
'''
from __future__ import absolute_import, print_function
import logging
import random
log = logging.getLogger(__name__)
from tornado import gen
from tornado.httpclient import HTTPRequest
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.websocket import websocket_connect
from bokeh.server.exceptions import MessageError, ProtocolError, ValidationError
from bokeh.server.protocol.receiver import Receiver
from bokeh.server.protocol import Protocol
class ClientSession(object):
def __init__(self, url="ws://localhost:8888/ws", callbacks=None):
self._request = HTTPRequest(url, headers={"bokeh-protocol-version": "1.0"})
self._callbacks = callbacks
self._session_id = None
self._protocol = Protocol("1.0")
self._receiver = Receiver(self._protocol)
self._client = None
def connect(self):
loop = IOLoop.instance()
loop.add_callback(self._run)
try:
loop.start()
except KeyboardInterrupt:
if self._client is not None:
self._client.close(1000, "user interruption")
def send_message(self, message):
sent = message.send(self._client)
log.debug("Sent %r [%d bytes]", message, sent)
@gen.coroutine
def _run(self):
yield self._connect_async()
yield self._worker()
@gen.coroutine
def _connect_async(self):
self._client = yield websocket_connect(self._request)
@gen.coroutine
def _worker(self):
while True:
fragment = yield self._client.read_message()
if fragment is None:
# XXX Tornado doesn't give us the code and reason
log.info("Connection closed by server")
break
try:
message = yield self._receiver.consume(fragment)
except (MessageError, ProtocolError, ValidationError) as e:
log.error("%r", e)
raise e
if message:
log.debug("Received %r", message)
if message.msgtype is 'ACK':
self._session_id = message.header['sessid']
self._start_callbacks()
IOLoop.instance().stop()
def _callback_wrapper(self, func):
def wrapper():
func(self)
return wrapper
def _start_callbacks(self):
for cb, period in self._callbacks:
if period:
PeriodicCallback(self._callback_wrapper(cb),
period * 1000, # ms
).start()
else:
IOLoop.instance().add_callback(self._callback_wrapper(cb))
def foo(cli):
msg = Protocol("1.0").create('SERVER-INFO-REQ', cli._session_id)
cli.send_message(msg)
def bar(cli):
msg = Protocol("1.0").create('PULL-DOC-REQ', cli._session_id, "some_doc")
cli.send_message(msg)
def quux(cli):
log.info("Sending deliberately bogus message")
cli._client.write_message(b"xx", binary=True)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
session = ClientSession(callbacks=[(foo, 0.8), (bar, 3.0), (quux, 30.0)])
session.connect()
| '''
'''
from __future__ import absolute_import, print_function
import logging
import random
log = logging.getLogger(__name__)
from tornado import gen
from tornado.httpclient import HTTPRequest
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.websocket import websocket_connect
from bokeh.server.exceptions import MessageError, ProtocolError, ValidationError
from bokeh.server.protocol.receiver import Receiver
from bokeh.server.protocol import Protocol
class ClientSession(object):
def __init__(self, url="ws://localhost:8888/ws", callbacks=None):
self._request = HTTPRequest(url, headers={"bokeh-protocol-version": "1.0"})
self._callbacks = callbacks
self._session_id = None
self._protocol = Protocol("1.0")
self._receiver = Receiver(self._protocol)
self._client = None
def connect(self):
loop = IOLoop.instance()
loop.add_callback(self._run)
try:
loop.start()
except KeyboardInterrupt:
if self._client is not None:
self._client.close(1000, "user interruption")
def send_message(self, message):
sent = message.send(self._client)
log.debug("Sent %r [%d bytes]", message, sent)
@gen.coroutine
def _run(self):
yield self._connect_async()
yield self._worker()
@gen.coroutine
def _connect_async(self):
self._client = yield websocket_connect(self._request)
@gen.coroutine
def _worker(self):
while True:
fragment = yield self._client.read_message()
if fragment is None:
# XXX Tornado doesn't give us the code and reason
log.info("Connection closed by server")
break
try:
message = yield self._receiver.consume(fragment)
except (MessageError, ProtocolError, ValidationError) as e:
log.error("%r", e)
raise e
if message:
log.debug("Received %r", message)
if message.msgtype is 'ACK':
self._session_id = message.header['sessid']
self._start_callbacks()
IOLoop.instance().stop()
def _callback_wrapper(self, func):
def wrapper():
func(self)
return wrapper
def _start_callbacks(self):
for cb, period in self._callbacks:
if period:
PeriodicCallback(self._callback_wrapper(cb),
period * 1000, # ms
).start()
else:
IOLoop.instance().add_callback(self._callback_wrapper(cb))
def foo(cli):
msg = Protocol("1.0").create('SERVER-INFO-REQ', cli._session_id)
cli.send_message(msg)
def bar(cli):
msg = Protocol("1.0").create('PULL-DOC-REQ', cli._session_id, "some_doc")
cli.send_message(msg)
def quux(cli):
log.warn("Deliberately wrong session ID")
msg = Protocol("1.0").create('SERVER-INFO-REQ', 'wrongsessid')
cli.send_message(msg)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
session = ClientSession(callbacks=[(foo, 0.8), (bar, 3.0), (quux, 30.0)])
session.connect()
| Python | 0 |
7ed8de3d15941c683ae70c15a6ce50bbe29a6580 | remove unused field from books | books/models.py | books/models.py | from django.db import models
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import (FieldPanel,
InlinePanel)
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
# Create your models here.
class Book(Page):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
revision = models.CharField(max_length=255, blank=True, null=True)
description = RichTextField(blank=True)
cover_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
publish_date = models.DateField(blank=True, null=True)
isbn_10 = models.IntegerField(blank=True, null=True)
isbn_13 = models.CharField(max_length=255, blank=True, null=True)
content_panels = Page.content_panels + [
FieldPanel('revision'),
FieldPanel('description', classname="full"),
ImageChooserPanel('cover_image'),
FieldPanel('publish_date'),
FieldPanel('isbn_10'),
FieldPanel('isbn_13'),
]
api_fields = ('created',
'updated',
'revision',
'description',
'cover_image',
'publish_date',
'isbn_10',
'isbn_13') | from django.db import models
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import (FieldPanel,
InlinePanel)
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
# Create your models here.
class Book(Page):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
revision = models.CharField(max_length=255, blank=True, null=True)
description = RichTextField(blank=True)
cover_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
publish_date = models.DateField(blank=True, null=True)
isbn_10 = models.IntegerField(blank=True, null=True)
isbn_13 = models.CharField(max_length=255, blank=True, null=True)
content_panels = Page.content_panels + [
FieldPanel('name'),
FieldPanel('revision'),
FieldPanel('description', classname="full"),
ImageChooserPanel('cover_image'),
FieldPanel('publish_date'),
FieldPanel('isbn_10'),
FieldPanel('isbn_13'),
]
api_fields = ('created',
'updated',
'revision',
'description',
'cover_image',
'publish_date',
'isbn_10',
'isbn_13') | Python | 0.000001 |
1a40dd2724a4a6364f0786fc5ac5f93d37daeaa0 | add NoTestDataError | judgesite/task.py | judgesite/task.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import io
import json
import shutil
import subprocess
import os
from config import conf
from models import save_result
class NoTestDataException(Exception):
def __init__(self, value=None):
self.value = value
def __str__(self):
return repr(self.value)
class JudgeTask(object):
def __init__(self, message):
task = json.loads(message)
self.submit_type = task["submit_type"]
self.status_id = str(task["status_id"])
self.code = task["code"]
self.language = task["language"]
self.testdata_id = str(task["testdata_id"])
self.time_limit = str(task["time_limit"])
self.memory_limit = str(task["memory_limit"])
self.result = ""
self.run_time = 0
self.run_memory = 0
self.others = ""
def go(self):
self._clean_files()
try:
self._prepare_temp_dir()
self._dump_code_to_file()
self._prepare_testdata_file()
except NoTestDataException, e:
self.result = 'NoTestDataError'
except Exception, e:
raise e
else:
self._run()
self._read_result()
self._save_result()
self._clean_files()
def _prepare_temp_dir(self):
logging.info("Prepare temp dir")
os.mkdir(conf.tmp_path)
def _dump_code_to_file(self):
logging.info("Dump code to file")
filename = "Main." + self.language
self.code_file = os.path.join(conf.tmp_path, filename)
code_file = io.open(self.code_file, 'w', encoding='utf8')
code_file.write(self.code)
code_file.close()
def _prepare_testdata_file(self):
logging.info("Prepare testdata")
input_file = os.path.join(
conf.testdata_path, self.testdata_id, "in.in")
output_file = os.path.join(
conf.testdata_path, self.testdata_id, "out.out")
testdata_exists = (
os.path.exists(input_file), os.path.exists(output_file))
if not testdata_exists[0] or not testdata_exists[1]:
raise NoTestDataException(testdata_exists)
shutil.copy(input_file, conf.tmp_path)
shutil.copy(output_file, conf.tmp_path)
def _run(self):
logging.info("GO!GO!GO!")
commands = ["sudo", "./Core", "-c", self.code_file, "-t",
self.time_limit, "-m", self.memory_limit, "-d",
conf.tmp_path]
subprocess.call(commands)
def _read_result(self):
logging.info("Read result")
result_file = open(os.path.join(conf.tmp_path, "result.txt"), 'r')
self.result = result_file.readline().strip()
self.run_time = result_file.readline().strip()
self.run_memory = result_file.readline().strip()
self.others = result_file.read()
def _save_result(self):
logging.info("Save result")
save_result(status_id=self.status_id,
type=self.submit_type,
run_time=self.run_time,
run_memory=self.run_memory,
compiler_output=self.others,
status=self.result)
def _clean_files(self):
logging.info("Clean files")
if os.path.exists(conf.tmp_path):
shutil.rmtree(conf.tmp_path)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import io
import json
import shutil
import subprocess
import os
from config import conf
from models import save_result
class JudgeTask(object):
def __init__(self, message):
task = json.loads(message)
self.submit_type = task["submit_type"]
self.status_id = str(task["status_id"])
self.code = task["code"]
self.language = task["language"]
self.testdata_id = str(task["testdata_id"])
self.time_limit = str(task["time_limit"])
self.memory_limit = str(task["memory_limit"])
def go(self):
self._clean_files()
self._prepare_temp_dir()
self._dump_code_to_file()
self._prepare_testdata_file()
self._run()
self._read_result()
self._save_result()
self._clean_files()
def _prepare_temp_dir(self):
logging.info("Prepare temp dir")
os.mkdir(conf.tmp_path)
def _dump_code_to_file(self):
logging.info("Dump code to file")
filename = "Main." + self.language
self.code_file = os.path.join(conf.tmp_path, filename)
code_file = io.open(self.code_file, 'w', encoding='utf8')
code_file.write(self.code)
code_file.close()
def _prepare_testdata_file(self):
logging.info("Prepare testdata")
input_file = os.path.join(conf.testdata_path, self.testdata_id, "in.in")
output_file = os.path.join(conf.testdata_path, self.testdata_id, "out.out")
shutil.copy(input_file, conf.tmp_path)
shutil.copy(output_file, conf.tmp_path)
def _run(self):
logging.info("GO!GO!GO!")
commands = ["sudo", "./Core", "-c", self.code_file, "-t",
self.time_limit, "-m", self.memory_limit, "-d",
conf.tmp_path]
subprocess.call(commands)
def _read_result(self):
logging.info("Read result")
result_file = open(os.path.join(conf.tmp_path, "result.txt"), 'r')
self.result = result_file.readline().strip()
self.run_time = result_file.readline().strip()
self.run_memory = result_file.readline().strip()
self.others = result_file.read()
def _save_result(self):
logging.info("Save result")
save_result(status_id=self.status_id,
type=self.submit_type,
run_time=self.run_time,
run_memory=self.run_memory,
compiler_output=self.others,
status=self.result)
def _clean_files(self):
logging.info("Clean files")
if os.path.exists(conf.tmp_path):
shutil.rmtree(conf.tmp_path)
| Python | 0.000001 |
e0b3e23d4890a10f8bca4c699e5a9cd6294fee29 | add xpub | keepkey-for-mn.py | keepkey-for-mn.py | #!/usr/bin/env python3
import sys, os
sys.path.append( os.path.join( os.path.dirname(__file__), '.' ) )
sys.path.append( os.path.join( os.path.dirname(__file__), '.', 'dashlib' ) )
from config import *
from keepkeylib.client import KeepKeyClient
from keepkeylib.transport_hid import HidTransport
import keepkeylib.ckd_public as bip32
def main():
# List all connected KeepKeys on USB
devices = HidTransport.enumerate()
# Check whether we found any
if len(devices) == 0:
print('No KeepKey found')
return
# Use first connected device
transport = HidTransport(devices[0])
# Creates object for manipulating KeepKey
client = KeepKeyClient(transport)
# Print out KeepKey's features and settings
# print(client.features)
keypath = mpath
bip32_path = client.expand_path(keypath)
# xpub to use
#print('xpub/tpub --> ' + bip32.serialize(client.get_public_node(bip32_path).node, 0x043587CF))
print('xpub/tpub --> ' + bip32.serialize(client.get_public_node(bip32_path).node, ( 0x0488B21E if MAINNET else 0x043587CF )))
for i in range(max_gab):
child_path = '%s%s' % (keypath + '/', str(i))
address = client.get_address(coin_name, client.expand_path(child_path))
print (coin_name +' address:', child_path, address)
client.close()
if __name__ == '__main__':
main()
# end | #!/usr/bin/env python3
import sys, os
sys.path.append( os.path.join( os.path.dirname(__file__), '.' ) )
sys.path.append( os.path.join( os.path.dirname(__file__), '.', 'dashlib' ) )
from config import *
from keepkeylib.client import KeepKeyClient
from keepkeylib.transport_hid import HidTransport
import keepkeylib.ckd_public as bip32
def main():
# List all connected KeepKeys on USB
devices = HidTransport.enumerate()
# Check whether we found any
if len(devices) == 0:
print('No KeepKey found')
return
# Use first connected device
transport = HidTransport(devices[0])
# Creates object for manipulating KeepKey
client = KeepKeyClient(transport)
# Print out KeepKey's features and settings
# print(client.features)
keypath = mpath
bip32_path = client.expand_path(keypath)
# xpub to use
print('xpub --> ' + bip32.serialize(client.get_public_node(bip32_path).node, 0x043587CF))
for i in range(max_gab):
child_path = '%s%s' % (keypath + '/', str(i))
address = client.get_address(coin_name, client.expand_path(child_path))
print (coin_name +' address:', child_path, address)
client.close()
if __name__ == '__main__':
main()
# end | Python | 0 |
658de49626b6ef8b199bee6502bc62abebaa0803 | Use an assert for an error that is concerning developers | sale_condition_template/sale_order.py | sale_condition_template/sale_order.py | # -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm, fields
class SaleOrder(orm.Model):
"""Add text condition"""
_inherit = "sale.order"
_columns = {
'condition_template1_id': fields.many2one(
'base.condition.template',
'Template Top conditions'),
'condition_template2_id': fields.many2one(
'base.condition.template',
'Template Bottom conditions'),
'note1': fields.html('Top conditions'),
'note2': fields.html('Bottom conditions'),
}
def set_condition(self, cr, uid, cond_id, field_name, partner_id):
if not cond_id:
return {'value': {field_name: ''}}
cond_obj = self.pool['base.condition.template']
text = cond_obj.get_value(cr, uid, cond_id, partner_id)
return {'value': {field_name: text}}
def set_note1(self, cr, uid, so_id, cond_id, partner_id):
return self.set_condition(cr, uid, cond_id, 'note1', partner_id)
def set_note2(self, cr, uid, so_id, cond_id, partner_id):
return self.set_condition(cr, uid, cond_id, 'note2', partner_id)
def action_invoice_create(self, cr, user, order_id,
grouped=False,
states=['confirmed', 'done', 'exception'],
date_inv=False, context=None):
# function is design to return only one id
invoice_obj = self.pool['account.invoice']
inv_id = super(SaleOrder, self).action_invoice_create(
cr, user, order_id, grouped, states, date_inv, context=context)
invoice = invoice_obj.browse(cr, user, inv_id, context=context)
if isinstance(order_id, (tuple, list)):
assert len(order_id) == 1, "1 ID expected, got: %s" % (order_id, )
order_id = order_id[0]
order = self.browse(cr, user, order_id, context=context)
inv_data = {'condition_template1_id': order.condition_template1_id.id,
'condition_template2_id': order.condition_template2_id.id,
'note1': order.note1,
'note2': order.note2}
invoice.write(inv_data, context=context)
return inv_id
| # -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm, fields
from openerp.tools.translate import _
class SaleOrder(orm.Model):
"""Add text condition"""
_inherit = "sale.order"
_columns = {
'condition_template1_id': fields.many2one(
'base.condition.template',
'Template Top conditions'),
'condition_template2_id': fields.many2one(
'base.condition.template',
'Template Bottom conditions'),
'note1': fields.html('Top conditions'),
'note2': fields.html('Bottom conditions'),
}
def set_condition(self, cr, uid, cond_id, field_name, partner_id):
if not cond_id:
return {'value': {field_name: ''}}
cond_obj = self.pool['base.condition.template']
text = cond_obj.get_value(cr, uid, cond_id, partner_id)
return {'value': {field_name: text}}
def set_note1(self, cr, uid, so_id, cond_id, partner_id):
return self.set_condition(cr, uid, cond_id, 'note1', partner_id)
def set_note2(self, cr, uid, so_id, cond_id, partner_id):
return self.set_condition(cr, uid, cond_id, 'note2', partner_id)
def action_invoice_create(self, cr, user, order_id,
grouped=False,
states=['confirmed', 'done', 'exception'],
date_inv=False, context=None):
# function is design to return only one id
invoice_obj = self.pool['account.invoice']
inv_id = super(SaleOrder, self).action_invoice_create(
cr, user, order_id, grouped, states, date_inv, context=context)
invoice = invoice_obj.browse(cr, user, inv_id, context=context)
if isinstance(order_id, list):
if len(order_id) > 1:
raise orm.except_osv(
_('action_invoice_create can only receive one id'),
_('action_invoice_create can only receive one id'))
order_id = order_id[0]
order = self.browse(cr, user, order_id, context=context)
inv_data = {'condition_template1_id': order.condition_template1_id.id,
'condition_template2_id': order.condition_template2_id.id,
'note1': order.note1,
'note2': order.note2}
invoice.write(inv_data, context=context)
return inv_id
| Python | 0.000002 |
6fc68abdb48134f4e647f0a1d69becd374d1147f | add missing Python file encoding | brasilcomvc/accounts/admin.py | brasilcomvc/accounts/admin.py | # coding: utf8
from __future__ import unicode_literals
from django.contrib import admin
from .models import User, UserAddress
class UserAdmin(admin.ModelAdmin):
class UserAddressInline(admin.StackedInline):
model = UserAddress
list_display = ('email', 'full_name', 'username',)
fieldsets = (
('Informações Pessoais', {
'fields': ('full_name', 'username', 'email',),
}),
('Informações Profissionais', {
'fields': ('job_title', 'bio',),
}),
('Notificações', {
'fields': ('email_newsletter',),
}),
)
inlines = (UserAddressInline,)
admin.site.register(User, UserAdmin)
| from django.contrib import admin
from .models import User, UserAddress
class UserAdmin(admin.ModelAdmin):
class UserAddressInline(admin.StackedInline):
model = UserAddress
list_display = ('email', 'full_name', 'username',)
fieldsets = (
('Informações Pessoais', {
'fields': ('full_name', 'username', 'email',),
}),
('Informações Profissionais', {
'fields': ('job_title', 'bio',),
}),
('Notificações', {
'fields': ('email_newsletter',),
}),
)
inlines = (UserAddressInline,)
admin.site.register(User, UserAdmin)
| Python | 0.000014 |
ddb3f3cb33bab10113dbf290c65b9919339fdd72 | Update artman version | synthtool/gcp/gapic_generator.py | synthtool/gcp/gapic_generator.py | from pathlib import Path
import tempfile
import platform
from synthtool import _tracked_paths
from synthtool import log
from synthtool import shell
from synthtool.sources import git
ARTMAN_VERSION = '0.12.0'
GOOGLEAPIS_URL: str = 'git@github.com:googleapis/googleapis.git'
GOOGLEAPIS_PRIVATE_URL: str = (
'git@github.com:googleapis/googleapis-private.git')
class GAPICGenerator:
def __init__(self, private: bool = False):
# Docker on mac by default cannot use the default temp file location
# instead use the more standard *nix /tmp location\
if platform.system() == 'Darwin':
tempfile.tempdir = '/tmp'
self._ensure_dependencies_installed()
# clone google apis to temp
# git clone git@github.com:googleapis/googleapis.git
if not private:
googleapis_url = GOOGLEAPIS_URL
else:
googleapis_url = GOOGLEAPIS_PRIVATE_URL
self.googleapis = git.clone(googleapis_url)
def py_library(self, service: str, version: str, **kwargs) -> Path:
'''
Generates the Python Library files using artman/GAPIC
returns a `Path` object
library: path to library. 'google/cloud/speech'
version: version of lib. 'v1'
'''
return self._generate_code(service, version, 'python', **kwargs)
def _generate_code(self, service, version, language,
config_path=None, artman_output_name=None):
# map the language to the artman argument and subdir of genfiles
GENERATE_FLAG_LANGUAGE = {
'python': ('python_gapic', 'python'),
'nodejs': ('nodejs_gapic', 'js'),
'ruby': ('ruby_gapic', 'ruby'),
}
if language not in GENERATE_FLAG_LANGUAGE:
raise ValueError("provided language unsupported")
gapic_arg, gen_language = GENERATE_FLAG_LANGUAGE[language]
# Ensure docker image
log.debug("Pulling artman docker image.")
shell.run(['docker', 'pull', f'googleapis/artman:ARTMAN_VERSION'])
# Run the code generator.
# $ artman --config path/to/artman_api.yaml generate python_gapic
if config_path is None:
config_path = (
Path('google/cloud') / service
/ f"artman_{service}_{version}.yaml")
elif Path(config_path).is_absolute():
config_path = Path(config_path).relative_to('/')
else:
config_path = Path('google/cloud') / service / Path(config_path)
if not (self.googleapis/config_path).exists():
raise FileNotFoundError(
f"Unable to find configuration yaml file: {config_path}.")
subprocess_args = ['artman', '--config', config_path, 'generate',
gapic_arg]
log.info(f"Running generator.")
result = shell.run(subprocess_args, cwd=self.googleapis)
if result.returncode:
raise Exception(f"Failed to generate from {config_path}")
# Expect the output to be in the artman-genfiles directory.
# example: /artman-genfiles/python/speech-v1
if artman_output_name is None:
artman_output_name = f"{service}-{version}"
genfiles_dir = self.googleapis/'artman-genfiles'/gen_language
genfiles = genfiles_dir/artman_output_name
if not genfiles.exists():
raise FileNotFoundError(
f"Unable to find generated output of artman: {genfiles}.")
_tracked_paths.add(genfiles)
return genfiles
def _ensure_dependencies_installed(self):
log.debug("Ensuring dependencies")
dependencies = ['docker', 'git', 'artman']
failed_dependencies = []
for dependency in dependencies:
return_code = shell.run(
['which', dependency], check=False).returncode
if return_code:
failed_dependencies.append(dependency)
if failed_dependencies:
raise EnvironmentError(
f"Dependencies missing: {', '.join(failed_dependencies)}")
shell.run(['docker', 'pull', f'googleapis/artman:ARTMAN_VERSION'])
# TODO: Install artman in a virtualenv.
| from pathlib import Path
import tempfile
import platform
from synthtool import _tracked_paths
from synthtool import log
from synthtool import shell
from synthtool.sources import git
GOOGLEAPIS_URL: str = 'git@github.com:googleapis/googleapis.git'
GOOGLEAPIS_PRIVATE_URL: str = (
'git@github.com:googleapis/googleapis-private.git')
class GAPICGenerator:
def __init__(self, private: bool = False):
# Docker on mac by default cannot use the default temp file location
# instead use the more standard *nix /tmp location\
if platform.system() == 'Darwin':
tempfile.tempdir = '/tmp'
self._ensure_dependencies_installed()
# clone google apis to temp
# git clone git@github.com:googleapis/googleapis.git
if not private:
googleapis_url = GOOGLEAPIS_URL
else:
googleapis_url = GOOGLEAPIS_PRIVATE_URL
self.googleapis = git.clone(googleapis_url)
def py_library(self, service: str, version: str, **kwargs) -> Path:
'''
Generates the Python Library files using artman/GAPIC
returns a `Path` object
library: path to library. 'google/cloud/speech'
version: version of lib. 'v1'
'''
return self._generate_code(service, version, 'python', **kwargs)
def _generate_code(self, service, version, language,
config_path=None, artman_output_name=None):
# map the language to the artman argument and subdir of genfiles
GENERATE_FLAG_LANGUAGE = {
'python': ('python_gapic', 'python'),
'nodejs': ('nodejs_gapic', 'js'),
'ruby': ('ruby_gapic', 'ruby'),
}
if language not in GENERATE_FLAG_LANGUAGE:
raise ValueError("provided language unsupported")
gapic_arg, gen_language = GENERATE_FLAG_LANGUAGE[language]
# Ensure docker image
log.debug("Pulling artman docker image.")
shell.run(['docker', 'pull', 'googleapis/artman:0.10.1'])
# Run the code generator.
# $ artman --config path/to/artman_api.yaml generate python_gapic
if config_path is None:
config_path = (
Path('google/cloud') / service
/ f"artman_{service}_{version}.yaml")
elif Path(config_path).is_absolute():
config_path = Path(config_path).relative_to('/')
else:
config_path = Path('google/cloud') / service / Path(config_path)
if not (self.googleapis/config_path).exists():
raise FileNotFoundError(
f"Unable to find configuration yaml file: {config_path}.")
subprocess_args = ['artman', '--config', config_path, 'generate',
gapic_arg]
log.info(f"Running generator.")
result = shell.run(subprocess_args, cwd=self.googleapis)
if result.returncode:
raise Exception(f"Failed to generate from {config_path}")
# Expect the output to be in the artman-genfiles directory.
# example: /artman-genfiles/python/speech-v1
if artman_output_name is None:
artman_output_name = f"{service}-{version}"
genfiles_dir = self.googleapis/'artman-genfiles'/gen_language
genfiles = genfiles_dir/artman_output_name
if not genfiles.exists():
raise FileNotFoundError(
f"Unable to find generated output of artman: {genfiles}.")
_tracked_paths.add(genfiles)
return genfiles
def _ensure_dependencies_installed(self):
log.debug("Ensuring dependencies")
dependencies = ['docker', 'git', 'artman']
failed_dependencies = []
for dependency in dependencies:
return_code = shell.run(
['which', dependency], check=False).returncode
if return_code:
failed_dependencies.append(dependency)
if failed_dependencies:
raise EnvironmentError(
f"Dependencies missing: {', '.join(failed_dependencies)}")
shell.run(['docker', 'pull', 'googleapis/artman:0.11.0'])
# TODO: Install artman in a virtualenv.
| Python | 0.000002 |
56cbbef7b8bbfa31445dad1561c4014804250fd5 | fix test | kyototycoon/test/test_kyototycoon.py | kyototycoon/test/test_kyototycoon.py | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from nose.plugins.attrib import attr
# 3p
import requests
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
config = {
'instances': [{
'report_url': 'http://localhost:1978/rpc/report',
'tags': ['optional:tag1']
}]
}
METRICS = [
'kyototycoon.threads',
'kyototycoon.connections_per_s',
'kyototycoon.ops.get.hits_per_s',
'kyototycoon.ops.get.misses_per_s',
'kyototycoon.ops.set.hits_per_s',
'kyototycoon.ops.set.misses_per_s',
'kyototycoon.ops.del.hits_per_s',
'kyototycoon.ops.del.misses_per_s',
'kyototycoon.records',
'kyototycoon.size',
'kyototycoon.ops.get.total_per_s',
'kyototycoon.ops.get.total_per_s',
'kyototycoon.ops.set.total_per_s',
'kyototycoon.ops.set.total_per_s',
'kyototycoon.ops.del.total_per_s',
'kyototycoon.ops.del.total_per_s',
# 'kyototycoon.replication.delay', # Since I am not spinning up multiple servers, this should be 0
]
@attr(requires='kyototycoon')
class TestKyototycoon(AgentCheckTest):
"""Basic Test for kyototycoon integration."""
CHECK_NAME = 'kyototycoon'
def setUp(self):
dat = {
'dddd': 'dddd'
}
headers = {
'X-Kt-Mode': 'set'
}
for x in range(0, 100):
requests.put('http://localhost:1978', data=dat, headers=headers)
requests.get('http://localhost:1978')
def test_check(self):
"""
Testing Kyototycoon check.
"""
self.run_check_twice(config)
for mname in METRICS:
if mname == 'kyototycoon.records' or 'kyototycoon.size':
self.assertMetric(mname, count=1, at_least=0, tags=['optional:tag1', 'db:0'])
else:
self.assertMetric(mname, count=1, at_least=0, tags=['optional:tag1'])
self.assertServiceCheck('kyototycoon.can_connect', status=AgentCheck.OK, tags=['optional:tag1'], at_least=1)
self.coverage_report()
| # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from nose.plugins.attrib import attr
# 3p
import requests
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
config = {
'instances': [{
'report_url': 'http://localhost:1978/rpc/report',
'tags': ['optional:tag1']
}]
}
METRICS = [
'kyototycoon.threads',
'kyototycoon.connections_per_s',
'kyototycoon.ops.get.hits_per_s',
'kyototycoon.ops.get.misses_per_s',
'kyototycoon.ops.set.hits_per_s',
'kyototycoon.ops.set.misses_per_s',
'kyototycoon.ops.del.hits_per_s',
'kyototycoon.ops.del.misses_per_s',
'kyototycoon.records',
'kyototycoon.size',
'kyototycoon.ops.get.total_per_s',
'kyototycoon.ops.get.total_per_s',
'kyototycoon.ops.set.total_per_s',
'kyototycoon.ops.set.total_per_s',
'kyototycoon.ops.del.total_per_s',
'kyototycoon.ops.del.total_per_s',
# 'kyototycoon.replication.delay', # Since I am not spinning up multiple servers, this should be 0
]
@attr(requires='kyototycoon')
class TestKyototycoon(AgentCheckTest):
"""Basic Test for kyototycoon integration."""
CHECK_NAME = 'kyototycoon'
def setUp(self):
dat = {
'dddd': 'dddd'
}
headers = {
'X-Kt-Mode': 'set'
}
for x in range(0, 100):
requests.put('http://localhost:1978', data=dat, headers=headers)
requests.get('http://localhost:1978')
def test_check(self):
"""
Testing Kyototycoon check.
"""
self.run_check_twice(config)
for mname in METRICS:
self.assertMetric(mname, count=1, at_least=0, tags=['optional:tag1'])
self.assertServiceCheck('kyototycoon.can_connect', status=AgentCheck.OK, tags=['optional:tag1'], at_least=1)
self.coverage_report()
| Python | 0.000002 |
fab3936bacbb961f2214ea5d1dce913c1635ab2c | Commit inicial de una rama para la versión 7.0. Todos los módulos fueron marcados como no instalables mientras no se compruebe que funcionan o migren, para poder usarlos o probarlos se debería poner el atributo installable de los ficheros __openerp__.py de cada módulo a True | l10n_es_account_asset/__openerp__.py | l10n_es_account_asset/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Assets Management",
"version" : "1.0",
"depends" : ["account"],
"author" : "OpenERP S.A.",
"description": """Financial and accounting asset management.
This Module manages the assets owned by a company or an individual. It will keep track of depreciation's occurred on
those assets. And it allows to create Move's of the depreciation lines.
""",
"website" : "http://www.openerp.com",
"category" : "Accounting & Finance",
"init_xml" : [
],
"demo_xml" : [
],
'test': ['test/account_asset.yml',
],
"update_xml" : [
"security/account_asset_security.xml",
"security/ir.model.access.csv",
"account_asset_wizard.xml",
"wizard/account_asset_change_duration_view.xml",
"wizard/wizard_asset_compute_view.xml",
"account_asset_view.xml",
"account_asset_invoice_view.xml",
"report/account_asset_report_view.xml",
],
"active": False,
"installable": False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Assets Management",
"version" : "1.0",
"depends" : ["account"],
"author" : "OpenERP S.A.",
"description": """Financial and accounting asset management.
This Module manages the assets owned by a company or an individual. It will keep track of depreciation's occurred on
those assets. And it allows to create Move's of the depreciation lines.
""",
"website" : "http://www.openerp.com",
"category" : "Accounting & Finance",
"init_xml" : [
],
"demo_xml" : [
],
'test': ['test/account_asset.yml',
],
"update_xml" : [
"security/account_asset_security.xml",
"security/ir.model.access.csv",
"account_asset_wizard.xml",
"wizard/account_asset_change_duration_view.xml",
"wizard/wizard_asset_compute_view.xml",
"account_asset_view.xml",
"account_asset_invoice_view.xml",
"report/account_asset_report_view.xml",
],
"active": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0.999886 |
b94bfcbc579af9e08c4ac42466cd0c33a6d8a529 | add textCNN model to __init__.py (#1421) | tensor2tensor/models/__init__.py | tensor2tensor/models/__init__.py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models defined in T2T. Imports here force registration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
# pylint: disable=unused-import
from tensor2tensor.layers import modalities # pylint: disable=g-import-not-at-top
from tensor2tensor.models import basic
from tensor2tensor.models import bytenet
from tensor2tensor.models import distillation
from tensor2tensor.models import image_transformer
from tensor2tensor.models import image_transformer_2d
from tensor2tensor.models import lstm
from tensor2tensor.models import mtf_image_transformer
from tensor2tensor.models import mtf_resnet
from tensor2tensor.models import mtf_transformer
from tensor2tensor.models import mtf_transformer2
from tensor2tensor.models import neural_gpu
from tensor2tensor.models import resnet
from tensor2tensor.models import revnet
from tensor2tensor.models import shake_shake
from tensor2tensor.models import slicenet
from tensor2tensor.models import text_cnn
from tensor2tensor.models import transformer
from tensor2tensor.models import vanilla_gan
from tensor2tensor.models import xception
from tensor2tensor.models.research import adafactor_experiments
from tensor2tensor.models.research import aligned
from tensor2tensor.models.research import attention_lm
from tensor2tensor.models.research import attention_lm_moe
from tensor2tensor.models.research import autoencoders
from tensor2tensor.models.research import cycle_gan
from tensor2tensor.models.research import gene_expression
from tensor2tensor.models.research import glow
from tensor2tensor.models.research import lm_experiments
from tensor2tensor.models.research import moe_experiments
from tensor2tensor.models.research import multiquery_paper
from tensor2tensor.models.research import rl
from tensor2tensor.models.research import similarity_transformer
from tensor2tensor.models.research import super_lm
from tensor2tensor.models.research import transformer_moe
from tensor2tensor.models.research import transformer_nat
from tensor2tensor.models.research import transformer_parallel
from tensor2tensor.models.research import transformer_revnet
from tensor2tensor.models.research import transformer_sketch
from tensor2tensor.models.research import transformer_symshard
from tensor2tensor.models.research import transformer_vae
from tensor2tensor.models.research import universal_transformer
from tensor2tensor.models.research import vqa_attention
from tensor2tensor.models.research import vqa_recurrent_self_attention
from tensor2tensor.models.research import vqa_self_attention
from tensor2tensor.models.video import basic_deterministic
from tensor2tensor.models.video import basic_recurrent
from tensor2tensor.models.video import basic_stochastic
from tensor2tensor.models.video import emily
from tensor2tensor.models.video import epva
from tensor2tensor.models.video import next_frame_glow
from tensor2tensor.models.video import savp
from tensor2tensor.models.video import sv2p
from tensor2tensor.models.video import svg_lp
from tensor2tensor.utils import registry
# pylint: enable=unused-import
def model(name):
return registry.model(name)
| # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models defined in T2T. Imports here force registration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
# pylint: disable=unused-import
from tensor2tensor.layers import modalities # pylint: disable=g-import-not-at-top
from tensor2tensor.models import basic
from tensor2tensor.models import bytenet
from tensor2tensor.models import distillation
from tensor2tensor.models import image_transformer
from tensor2tensor.models import image_transformer_2d
from tensor2tensor.models import lstm
from tensor2tensor.models import mtf_image_transformer
from tensor2tensor.models import mtf_resnet
from tensor2tensor.models import mtf_transformer
from tensor2tensor.models import mtf_transformer2
from tensor2tensor.models import neural_gpu
from tensor2tensor.models import resnet
from tensor2tensor.models import revnet
from tensor2tensor.models import shake_shake
from tensor2tensor.models import slicenet
from tensor2tensor.models import transformer
from tensor2tensor.models import vanilla_gan
from tensor2tensor.models import xception
from tensor2tensor.models.research import adafactor_experiments
from tensor2tensor.models.research import aligned
from tensor2tensor.models.research import attention_lm
from tensor2tensor.models.research import attention_lm_moe
from tensor2tensor.models.research import autoencoders
from tensor2tensor.models.research import cycle_gan
from tensor2tensor.models.research import gene_expression
from tensor2tensor.models.research import glow
from tensor2tensor.models.research import lm_experiments
from tensor2tensor.models.research import moe_experiments
from tensor2tensor.models.research import multiquery_paper
from tensor2tensor.models.research import rl
from tensor2tensor.models.research import similarity_transformer
from tensor2tensor.models.research import super_lm
from tensor2tensor.models.research import transformer_moe
from tensor2tensor.models.research import transformer_nat
from tensor2tensor.models.research import transformer_parallel
from tensor2tensor.models.research import transformer_revnet
from tensor2tensor.models.research import transformer_sketch
from tensor2tensor.models.research import transformer_symshard
from tensor2tensor.models.research import transformer_vae
from tensor2tensor.models.research import universal_transformer
from tensor2tensor.models.research import vqa_attention
from tensor2tensor.models.research import vqa_recurrent_self_attention
from tensor2tensor.models.research import vqa_self_attention
from tensor2tensor.models.video import basic_deterministic
from tensor2tensor.models.video import basic_recurrent
from tensor2tensor.models.video import basic_stochastic
from tensor2tensor.models.video import emily
from tensor2tensor.models.video import epva
from tensor2tensor.models.video import next_frame_glow
from tensor2tensor.models.video import savp
from tensor2tensor.models.video import sv2p
from tensor2tensor.models.video import svg_lp
from tensor2tensor.utils import registry
# pylint: enable=unused-import
def model(name):
return registry.model(name)
| Python | 0.000139 |
251a91c1bf245b3674c2612149382a0f1e18dc98 | Add tests for getrpcinfo | test/functional/interface_rpc.py | test/functional/interface_rpc.py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
if __name__ == '__main__':
RPCInterfaceTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def run_test(self):
self.test_batch_request()
if __name__ == '__main__':
RPCInterfaceTest().main()
| Python | 0 |
46c09fd75c6f45d68cd722cd3a12b88d04257083 | Add tests for getrpcinfo | test/functional/interface_rpc.py | test/functional/interface_rpc.py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
if __name__ == '__main__':
RPCInterfaceTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def run_test(self):
self.test_batch_request()
if __name__ == '__main__':
RPCInterfaceTest().main()
| Python | 0 |
35cf00b5f05f4b1df8b40b7edc3aac76534c8903 | enable some reduction tests | test/python/tests/test_reduce.py | test/python/tests/test_reduce.py | import util
class test_reduce_views:
""" Test reduction of all kind of views"""
def init(self):
for cmd, ndim in util.gen_random_arrays("R", 4, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for i in range(ndim):
yield (cmd, i)
for i in range(ndim):
yield (cmd, -i)
def test_reduce(self, (cmd, axis)):
cmd += "res = M.add.reduce(a, axis=%d)" % axis
return cmd
class test_reduce_sum:
""" Test reduction of sum() and prod()"""
def init(self):
for cmd, ndim in util.gen_random_arrays("R", 3, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for op in ["sum", "prod"]:
yield (cmd, op)
def test_func(self, (cmd, op)):
cmd += "res = M.%s(a)" % op
return cmd
def test_method(self, (cmd, op)):
cmd += "res = a.%s()" % op
return cmd
class test_reduce_primitives:
def init(self):
for op in ["add", "multiply", "minimum", "maximum"]:
yield (op, "np.float64")
for op in ["bitwise_or", "bitwise_xor"]:
yield (op, "np.uint64")
for op in ["add", "logical_or", "logical_and", "logical_xor"]:
yield (op, "np.bool")
def test_vector(self, (op, dtype)):
cmd = "R = bh.random.RandomState(42); a = R.random(10, dtype=%s, bohrium=BH); " % dtype
cmd += "res = M.%s.reduce(a)" % op
return cmd
| import util
class tes1t_reduce_views:
""" Test reduction of all kind of views"""
def init(self):
for cmd, ndim in util.gen_random_arrays("R", 4, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for i in range(ndim):
yield (cmd, i)
for i in range(ndim):
yield (cmd, -i)
def test_reduce(self, (cmd, axis)):
cmd += "res = M.add.reduce(a, axis=%d)" % axis
return cmd
class tes1t_reduce_sum:
""" Test reduction of sum() and prod()"""
def init(self):
for cmd, ndim in util.gen_random_arrays("R", 3, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for op in ["sum", "prod"]:
yield (cmd, op)
def test_func(self, (cmd, op)):
cmd += "res = M.%s(a)" % op
return cmd
def test_method(self, (cmd, op)):
cmd += "res = a.%s()" % op
return cmd
class test_reduce_primitives:
def init(self):
for op in ["add", "multiply", "minimum", "maximum"]:
yield (op, "np.float64")
for op in ["bitwise_or", "bitwise_xor"]:
yield (op, "np.uint64")
for op in ["add", "logical_or", "logical_and", "logical_xor"]:
yield (op, "np.bool")
def test_vector(self, (op, dtype)):
cmd = "R = bh.random.RandomState(42); a = R.random(10, dtype=%s, bohrium=BH); " % dtype
cmd += "res = M.%s.reduce(a)" % op
return cmd
| Python | 0.000036 |
b2266a2640d542fa6f9734fa9565a7521d06f1b0 | Bump again | bulbs/__init__.py | bulbs/__init__.py | __version__ = "0.11.4"
| __version__ = "0.11.3"
| Python | 0 |
aee41bac296eece9c30565c5824db9a019833ee0 | Add decorator docs | calm/decorator.py | calm/decorator.py | """
This module defines general decorators to define the Calm Application.
"""
from calm.resource import Resource
from calm.ex import DefinitionError
def produces(resource_type):
"""Decorator to specify what kind of Resource the handler produces."""
if not isinstance(resource_type, Resource):
raise DefinitionError('@produces value should be of type Resource.')
def decor(func):
"""
The function wrapper.
It checks whether the function is already defined as a Calm handler or
not and sets the appropriate attribute based on that. This is done in
order to not enforce a particular order for the decorators.
"""
if getattr(func, 'handler_def', None):
func.handler_def.produces = resource_type
else:
func.produces = resource_type
return func
return decor
def consumes(resource_type):
"""Decorator to specify what kind of Resource the handler consumes."""
if not isinstance(resource_type, Resource):
raise DefinitionError('@consumes value should be of type Resource.')
def decor(func):
"""
The function wrapper.
It checks whether the function is already defined as a Calm handler or
not and sets the appropriate attribute based on that. This is done in
order to not enforce a particular order for the decorators.
"""
if getattr(func, 'handler_def', None):
func.handler_def.consumes = resource_type
else:
func.consumes = resource_type
return func
return decor
|
def produces(resource_type):
def decor(func):
if getattr(func, 'handler_def', None):
func.handler_def.produces = resource_type
else:
func.produces = resource_type
return func
return decor
def consumes(resource_type):
def decor(func):
if getattr(func, 'handler_def', None):
func.handler_def.consumes = resource_type
else:
func.consumes = resource_type
return func
return decor
| Python | 0.000001 |
6f0b75f0561563926afc37dca8451f886e2e2d4f | Handle unicode data like cdbdump | cdblib/cdbdump.py | cdblib/cdbdump.py | from __future__ import print_function
import argparse
import sys
import six
import cdblib
def cdbdump(parsed_args, **kwargs):
# Read binary data from stdin by default
stdin = kwargs.get('stdin')
if stdin is None:
stdin = sys.stdin if six.PY2 else sys.stdin.buffer
# Print text data to stdout by default
stdout = kwargs.get('stdout')
if stdout is None:
stdout = sys.stdout if six.PY2 else sys.stdout.buffer
# Consume stdin and parse the cdb file
reader_cls = cdblib.Reader64 if vars(parsed_args)['64'] else cdblib.Reader
data = stdin.read()
reader = reader_cls(data)
# Dump the file's contents to the ouput stream
for key, value in reader.iteritems():
item = (
b'+',
str(len(key)).encode('ascii'),
b',',
str(len(value)).encode('ascii'),
b':',
key,
b'->',
value,
b'\n',
)
stdout.write(b''.join(item))
# Print final newline
stdout.write(b'\n')
def main(args=None):
args = sys.argv[1:] if (args is None) else args
parser = argparse.ArgumentParser(
description=(
"Python version of djb's cdbdump. "
"Supports standard 32-bit cdb files as well as 64-bit variants."
)
)
parser.add_argument(
'-64', action='store_true', help='Use non-standard 64-bit file offsets'
)
parsed_args = parser.parse_args(args)
cdbdump(parsed_args)
if __name__ == '__main__':
main()
| from __future__ import print_function
import argparse
import sys
import six
import cdblib
def cdbdump(parsed_args, **kwargs):
# Read binary data from stdin by default
stdin = kwargs.get('stdin')
if stdin is None:
stdin = sys.stdin if six.PY2 else sys.stdin.buffer
# Print text data to stdout by default
stdout = kwargs.get('stdout', sys.stdout)
encoding = kwargs.get('encoding', sys.getdefaultencoding())
# Consume stdin and parse the cdb file
reader_cls = cdblib.Reader64 if vars(parsed_args)['64'] else cdblib.Reader
data = stdin.read()
reader = reader_cls(data)
# Dump the file's contents to the ouput stream
for key, value in reader.iteritems():
item = '+{:d},{:d}:{:s}->{:s}'.format(
len(key),
len(value),
key.decode(encoding),
value.decode(encoding)
)
print(item, file=stdout)
# Print final newline
print()
def main(args=None):
args = sys.argv[1:] if (args is None) else args
parser = argparse.ArgumentParser(
description=(
"Python version of djb's cdbdump. "
"Supports standard 32-bit cdb files as well as 64-bit variants."
)
)
parser.add_argument(
'-64', action='store_true', help='Use non-standard 64-bit file offsets'
)
parsed_args = parser.parse_args(args)
cdbdump(parsed_args)
if __name__ == '__main__':
main()
| Python | 0.999719 |
9e22b82b9f5848ae3bfc8def66fe7b3d23c8f5b8 | Change Alfred date of posting to be iso8601 compatible. | jobs/spiders/alfred.py | jobs/spiders/alfred.py | import json
import urlparse
import dateutil.parser
import scrapy
from jobs.items import JobsItem
class AlfredSpider(scrapy.Spider):
name = "alfred"
start_urls = ['https://api.alfred.is/api/v3/web/open/jobs?cat=0&limit=100&page=0']
def parse(self, response):
# we're using an api rather than scraping a website so we need to grok the json response
content = json.loads(response.text)
# each job under the 'data' key refers to companies listed in the `included` key, so to make
# it easy to get at the data we make a dict keyed to the id of the company
included_data = {entry['id']: entry for entry in content['included']}
for job in content['data']:
job_id = job['id']
company_id = job['relationships']['brand']['data']['id']
item = JobsItem()
item['spider'] = self.name
item['company'] = included_data[company_id]['attributes']['name']
item['url'] = urlparse.urljoin('https://alfred.is/starf/', job_id)
api_url = urlparse.urljoin('https://api.alfred.is/api/v3/web/open/jobs/', job_id)
request = scrapy.Request(api_url, callback=self.parse_specific_job)
request.meta['item'] = item
yield request
def parse_specific_job(self, response):
content = json.loads(response.text)
job = content['data']['attributes']
item = response.meta['item']
item['title'] = job['title']
item['posted'] = job['start']
item['deadline'] = dateutil.parser.parse(job['deadline']).isoformat()
yield item
| import json
import urlparse
import scrapy
from jobs.items import JobsItem
from jobs.spiders.visir import decode_date_string
class AlfredSpider(scrapy.Spider):
name = "alfred"
start_urls = ['https://api.alfred.is/api/v3/web/open/jobs?cat=0&limit=100&page=0']
def parse(self, response):
# we're using an api rather than scraping a website so we need to grok the json response
content = json.loads(response.text)
# each job under the 'data' key refers to companies listed in the `included` key, so to make
# it easy to get at the data we make a dict keyed to the id of the company
included_data = {entry['id']: entry for entry in content['included']}
for job in content['data']:
job_id = job['id']
company_id = job['relationships']['brand']['data']['id']
item = JobsItem()
item['spider'] = self.name
item['company'] = included_data[company_id]['attributes']['name']
item['url'] = urlparse.urljoin('https://alfred.is/starf/', job_id)
api_url = urlparse.urljoin('https://api.alfred.is/api/v3/web/open/jobs/', job_id)
request = scrapy.Request(api_url, callback=self.parse_specific_job)
request.meta['item'] = item
yield request
def parse_specific_job(self, response):
content = json.loads(response.text)
job = content['data']['attributes']
item = response.meta['item']
item['title'] = job['title']
item['posted'] = job['start']
item['deadline'] = decode_date_string(job['deadline'])
yield item
| Python | 0 |
4b1ba931091448b4e5d980cb0695b4a8aa85b459 | Use markets.ft.com instead of fixer.io | bumblebee/modules/currency.py | bumblebee/modules/currency.py | # -*- coding: UTF-8 -*-
# pylint: disable=C0111,R0903
"""Displays currency exchange rates. Currently, displays currency between GBP and USD/EUR only.
Requires the following python packages:
* requests
Parameters:
* currency.interval: Interval in minutes between updates, default is 1.
* currency.source: Source currency (defaults to "GBP")
* currency.destination: Comma-separated list of destination currencies (defaults to "USD,EUR")
* currency.sourceformat: String format for source formatting; Defaults to "{}: {}" and has two variables,
the base symbol and the rate list
* currency.destinationdelimiter: Delimiter used for separating individual rates (defaults to "|")
Note: source and destination names right now must correspond to the names used by the API of https://markets.ft.com
"""
import bumblebee.input
import bumblebee.output
import bumblebee.engine
import json
import time
try:
import requests
from requests.exceptions import RequestException
except ImportError:
pass
SYMBOL = {
"GBP": u"£", "EUR": u"€", "USD": u"$"
}
API_URL = "https://markets.ft.com/data/currencies/ajax/conversion?baseCurrency={}&comparison={}"
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(engine, config,
bumblebee.output.Widget(full_text=self.price)
)
self._data = {}
self._interval = int(self.parameter("interval", 1))
self._base = self.parameter("source", "GBP")
self._symbols = self.parameter("destination", "USD,EUR").split(",")
self._nextcheck = 0
def price(self, widget):
if self._data == {}:
return "?"
rates = []
for sym, rate in self._data.items():
rates.append(u"{}{}".format(rate, SYMBOL[sym] if sym in SYMBOL else sym))
basefmt = u"{}".format(self.parameter("sourceformat", "{}: {}"))
ratefmt = u"{}".format(self.parameter("destinationdelimiter", "|"))
return basefmt.format(SYMBOL[self._base] if self._base in SYMBOL else self._base, ratefmt.join(rates))
def update(self, widgets):
timestamp = int(time.time())
if self._nextcheck < int(time.time()):
self._data = {}
self._nextcheck = int(time.time()) + self._interval*60
for symbol in self._symbols:
url = API_URL.format(self._base, symbol)
try:
response = requests.get(url).json()
self._data[symbol] = response['data']['exchangeRate']
except Exception:
pass
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| # -*- coding: UTF-8 -*-
# pylint: disable=C0111,R0903
"""Displays currency exchange rates. Currently, displays currency between GBP and USD/EUR only.
Requires the following python packages:
* requests
Parameters:
* currency.interval: Interval in minutes between updates, default is 1.
* currency.source: Source currency (defaults to "GBP")
* currency.destination: Comma-separated list of destination currencies (defaults to "USD,EUR")
* currency.sourceformat: String format for source formatting; Defaults to "{}: {}" and has two variables,
the base symbol and the rate list
* currency.destinationdelimiter: Delimiter used for separating individual rates (defaults to "|")
Note: source and destination names right now must correspond to the names used by the API of http://fixer.io
"""
import bumblebee.input
import bumblebee.output
import bumblebee.engine
import json
import time
try:
import requests
from requests.exceptions import RequestException
except ImportError:
pass
SYMBOL = {
"GBP": u"£", "EUR": u"€", "USD": u"$"
}
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(engine, config,
bumblebee.output.Widget(full_text=self.price)
)
self._data = {}
self._interval = int(self.parameter("interval", 1))
self._base = self.parameter("source", "GBP")
self._symbols = self.parameter("destination", "USD,EUR")
self._nextcheck = 0
def price(self, widget):
if self._data == {}:
return "?"
rates = []
for sym in self._data["rates"]:
rates.append(u"{}{}".format(self._data["rates"][sym], SYMBOL[sym] if sym in SYMBOL else sym))
basefmt = u"{}".format(self.parameter("sourceformat", "{}: {}"))
ratefmt = u"{}".format(self.parameter("destinationdelimiter", "|"))
return basefmt.format(SYMBOL[self._base] if self._base in SYMBOL else self._base, ratefmt.join(rates))
def update(self, widgets):
timestamp = int(time.time())
if self._nextcheck < int(time.time()):
self._data = {}
self._nextcheck = int(time.time()) + self._interval*60
url = "http://api.fixer.io/latest?symbols={}&base={}".format(self._symbols, self._base)
try:
self._data = json.loads(requests.get(url).text)
except Exception:
pass
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0 |
210c2cf58c246c3733542b8fee7c3eb9fe5d860d | bump version | callisto/delivery/__init__.py | callisto/delivery/__init__.py | __version__ = '0.4.1'
| __version__ = '0.4.0'
| Python | 0 |
623ce2d8624a1a04156a35ae762d29a19fbc7b52 | fix broken docstring | ceph_deploy/util/templates.py | ceph_deploy/util/templates.py |
ceph_repo = """
[ceph]
name=Ceph packages for $basearch
baseurl={repo_url}/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
[ceph-noarch]
name=Ceph noarch packages
baseurl={repo_url}/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
[ceph-source]
name=Ceph source packages
baseurl={repo_url}/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
"""
def custom_repo(**kw):
"""
Repo files need special care in that a whole line should not be present
if there is no value for it. Because we were using `format()` we could
not conditionally add a line for a repo file. So the end result would
contain a key with a missing value (say if we were passing `None`).
For example, it could look like::
[ceph repo]
name= ceph repo
proxy=
gpgcheck=
Which breaks. This function allows us to conditionally add lines,
preserving an order and be more careful.
Previously, and for historical purposes, this is how the template used
to look::
custom_repo =
[{repo_name}]
name={name}
baseurl={baseurl}
enabled={enabled}
gpgcheck={gpgcheck}
type={_type}
gpgkey={gpgkey}
proxy={proxy}
"""
lines = []
# by using tuples (vs a dict) we preserve the order of what we want to
# return, like starting with a [repo name]
tmpl = (
('reponame', '[%s]'),
('baseurl', 'baseurl=%s'),
('enabled', 'enabled=%s'),
('gpgcheck', 'gpgcheck=%s'),
('_type', 'type=%s'),
('gpgkey', 'gpgkey=%s'),
('proxy', 'proxy=%s'),
)
for line in tmpl:
tmpl_key, tmpl_value = line # key values from tmpl
# ensure that there is an actual value (not None nor empty string)
if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''):
lines.append(tmpl_value % kw.get(tmpl_key))
return '\n'.join(lines)
|
ceph_repo = """
[ceph]
name=Ceph packages for $basearch
baseurl={repo_url}/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
[ceph-noarch]
name=Ceph noarch packages
baseurl={repo_url}/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
[ceph-source]
name=Ceph source packages
baseurl={repo_url}/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
"""
def custom_repo(**kw):
"""
Repo files need special care in that a whole line should not be present
if there is no value for it. Because we were using `format()` we could
not conditionally add a line for a repo file. So the end result would
contain a key with a missing value (say if we were passing `None`).
For example, it could look like::
[ceph repo]
name= ceph repo
proxy=
gpgcheck=
Which breaks. This function allows us to conditionally add lines,
preserving an order and be more careful.
Previously, and for historical purposes, this is how the template used
to look::
custom_repo = """
[{repo_name}]
name={name}
baseurl={baseurl}
enabled={enabled}
gpgcheck={gpgcheck}
type={_type}
gpgkey={gpgkey}
proxy={proxy}
"""
"""
lines = []
# by using tuples (vs a dict) we preserve the order of what we want to
# return, like starting with a [repo name]
tmpl = (
('reponame', '[%s]'),
('baseurl', 'baseurl=%s'),
('enabled', 'enabled=%s'),
('gpgcheck', 'gpgcheck=%s'),
('_type', 'type=%s'),
('gpgkey', 'gpgkey=%s'),
('proxy', 'proxy=%s'),
)
for line in tmpl:
tmpl_key, tmpl_value = line # key values from tmpl
# ensure that there is an actual value (not None nor empty string)
if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''):
lines.append(tmpl_value % kw.get(tmpl_key))
return '\n'.join(lines)
| Python | 0 |
f4b8246aead0657e0f997773efed5fbc2147cce7 | add '# noqa' to imports to make flake8 happy | ceph_medic/remote/__init__.py | ceph_medic/remote/__init__.py | import mon # noqa
import osd # noqa
import common # noqa
import functions # noqa
import commands # noqa
| import mon
import osd
import common
import functions
import commands
| Python | 0 |
20e8ef6bd68100a70b9d50013630ff71d8b7ec94 | Support wildcard matches on coverage/junit results | changes/artifacts/__init__.py | changes/artifacts/__init__.py | from __future__ import absolute_import, print_function
from .manager import Manager
from .coverage import CoverageHandler
from .xunit import XunitHandler
manager = Manager()
manager.register(CoverageHandler, ['coverage.xml', '*.coverage.xml'])
manager.register(XunitHandler, ['xunit.xml', 'junit.xml', '*.xunit.xml', '*.junit.xml'])
| from __future__ import absolute_import, print_function
from .manager import Manager
from .coverage import CoverageHandler
from .xunit import XunitHandler
manager = Manager()
manager.register(CoverageHandler, ['coverage.xml'])
manager.register(XunitHandler, ['xunit.xml', 'junit.xml'])
| Python | 0 |
3109b98d8f8befdb927828908bd213000cf9ae51 | Handle empty result for previous runs/test failures in origin finder | changes/utils/originfinder.py | changes/utils/originfinder.py | from __future__ import absolute_import
from collections import defaultdict
from changes.constants import Result, Status
from changes.models import Job, TestGroup
def first(key, iterable):
for x in iterable:
if key(x):
return x
return None
def find_failure_origins(job, test_failures):
"""
Attempt to find originating causes of failures.
Returns a mapping of {TestGroup.name_sha: Job}.
"""
project = job.project
if not test_failures:
return {}
# find any existing failures in the previous runs
# to do this we first need to find the last passing job
last_pass = Job.query.filter(
Job.project == project,
Job.date_created <= job.date_created,
Job.status == Status.finished,
Job.result == Result.passed,
Job.id != job.id,
Job.patch == None, # NOQA
).order_by(Job.date_created.desc()).first()
if last_pass is None:
return {}
# We have to query all runs between job and last_pass, but we only
# care about runs where the suite failed. Because we're paranoid about
# performance, we limit this to 100 results.
previous_runs = Job.query.filter(
Job.project == project,
Job.date_created <= job.date_created,
Job.date_created >= last_pass.date_created,
Job.status == Status.finished,
Job.result.in_([Result.failed, Result.passed]),
Job.id != job.id,
Job.id != last_pass.id,
Job.patch == None, # NOQA
).order_by(Job.date_created.desc())[:100]
if not previous_runs:
return {}
# we now have a list of previous_runs so let's find all test failures in
# these runs
queryset = TestGroup.query.filter(
TestGroup.job_id.in_(b.id for b in previous_runs),
TestGroup.result == Result.failed,
TestGroup.num_leaves == 0,
TestGroup.name_sha.in_(t.name_sha for t in test_failures),
)
previous_test_failures = defaultdict(set)
for t in queryset:
previous_test_failures[t.job_id].add(t.name_sha)
failures_at_job = dict()
searching = set(t for t in test_failures)
last_checked_run = job
for p_job in previous_runs:
p_job_failures = previous_test_failures[p_job.id]
# we have to copy the set as it might change size during iteration
for f_test in list(searching):
if f_test.name_sha not in p_job_failures:
failures_at_job[f_test] = last_checked_run
searching.remove(f_test)
last_checked_run = p_job
for f_test in searching:
failures_at_job[f_test] = last_checked_run
return failures_at_job
| from __future__ import absolute_import
from collections import defaultdict
from changes.constants import Result, Status
from changes.models import Job, TestGroup
def first(key, iterable):
for x in iterable:
if key(x):
return x
return None
def find_failure_origins(job, test_failures):
"""
Attempt to find originating causes of failures.
Returns a mapping of {TestGroup.name_sha: Job}.
"""
project = job.project
# find any existing failures in the previous runs
# to do this we first need to find the last passing job
last_pass = Job.query.filter(
Job.project == project,
Job.date_created <= job.date_created,
Job.status == Status.finished,
Job.result == Result.passed,
Job.id != job.id,
Job.patch == None, # NOQA
).order_by(Job.date_created.desc()).first()
if last_pass is None:
return {}
# We have to query all runs between job and last_pass, but we only
# care about runs where the suite failed. Because we're paranoid about
# performance, we limit this to 100 results.
previous_runs = Job.query.filter(
Job.project == project,
Job.date_created <= job.date_created,
Job.date_created >= last_pass.date_created,
Job.status == Status.finished,
Job.result.in_([Result.failed, Result.passed]),
Job.id != job.id,
Job.id != last_pass.id,
Job.patch == None, # NOQA
).order_by(Job.date_created.desc())[:100]
# we now have a list of previous_runs so let's find all test failures in
# these runs
queryset = TestGroup.query.filter(
TestGroup.job_id.in_(b.id for b in previous_runs),
TestGroup.result == Result.failed,
TestGroup.num_leaves == 0,
TestGroup.name_sha.in_(t.name_sha for t in test_failures),
)
previous_test_failures = defaultdict(set)
for t in queryset:
previous_test_failures[t.job_id].add(t.name_sha)
failures_at_job = dict()
searching = set(t for t in test_failures)
last_checked_run = job
for p_job in previous_runs:
p_job_failures = previous_test_failures[p_job.id]
# we have to copy the set as it might change size during iteration
for f_test in list(searching):
if f_test.name_sha not in p_job_failures:
failures_at_job[f_test] = last_checked_run
searching.remove(f_test)
last_checked_run = p_job
for f_test in searching:
failures_at_job[f_test] = last_checked_run
return failures_at_job
| Python | 0 |
0bf6441863433575aebcbd0b238d27d95830c015 | Fix .iob converter (closes #3620) | spacy/cli/converters/iob2json.py | spacy/cli/converters/iob2json.py | # coding: utf8
from __future__ import unicode_literals
import re
from cytoolz import partition_all
from ...gold import iob_to_biluo
def iob2json(input_data, n_sents=10, *args, **kwargs):
"""
Convert IOB files into JSON format for use with train cli.
"""
sentences = read_iob(input_data.split("\n"))
docs = merge_sentences(sentences, n_sents)
return docs
def read_iob(raw_sents):
sentences = []
for line in raw_sents:
if not line.strip():
continue
tokens = [re.split("[^\w\-]", line.strip())]
if len(tokens[0]) == 3:
words, pos, iob = zip(*tokens)
elif len(tokens[0]) == 2:
words, iob = zip(*tokens)
pos = ["-"] * len(words)
else:
raise ValueError(
"The iob/iob2 file is not formatted correctly. Try checking whitespace and delimiters."
)
biluo = iob_to_biluo(iob)
sentences.append(
[
{"orth": w, "tag": p, "ner": ent}
for (w, p, ent) in zip(words, pos, biluo)
]
)
sentences = [{"tokens": sent} for sent in sentences]
paragraphs = [{"sentences": [sent]} for sent in sentences]
docs = [{"id": 0, "paragraphs": [para]} for para in paragraphs]
return docs
def merge_sentences(docs, n_sents):
merged = []
for group in partition_all(n_sents, docs):
group = list(group)
first = group.pop(0)
to_extend = first["paragraphs"][0]["sentences"]
for sent in group[1:]:
to_extend.extend(sent["paragraphs"][0]["sentences"])
merged.append(first)
return merged
| # coding: utf8
from __future__ import unicode_literals
import re
from ...gold import iob_to_biluo
from ...util import minibatch
def iob2json(input_data, n_sents=10, *args, **kwargs):
"""
Convert IOB files into JSON format for use with train cli.
"""
docs = []
for group in minibatch(docs, n_sents):
group = list(group)
first = group.pop(0)
to_extend = first["paragraphs"][0]["sentences"]
for sent in group[1:]:
to_extend.extend(sent["paragraphs"][0]["sentences"])
docs.append(first)
return docs
def read_iob(raw_sents):
sentences = []
for line in raw_sents:
if not line.strip():
continue
# tokens = [t.split("|") for t in line.split()]
tokens = [re.split("[^\w\-]", line.strip())]
if len(tokens[0]) == 3:
words, pos, iob = zip(*tokens)
elif len(tokens[0]) == 2:
words, iob = zip(*tokens)
pos = ["-"] * len(words)
else:
raise ValueError(
"The iob/iob2 file is not formatted correctly. Try checking whitespace and delimiters."
)
biluo = iob_to_biluo(iob)
sentences.append(
[
{"orth": w, "tag": p, "ner": ent}
for (w, p, ent) in zip(words, pos, biluo)
]
)
sentences = [{"tokens": sent} for sent in sentences]
paragraphs = [{"sentences": [sent]} for sent in sentences]
docs = [{"id": 0, "paragraphs": [para]} for para in paragraphs]
return docs
| Python | 0 |
f190916a828ab4b8ecf16cc6a82ebf3cf8f821e1 | Add a test for executing specs with tags | spec/execution_with_tags_spec.py | spec/execution_with_tags_spec.py | from mamba import description, before, context, it
from doublex import Spy
from expects import expect, be_true, be_false
from mamba import reporter, runnable
from mamba.example import Example
from mamba.example_group import ExampleGroup
from spec.object_mother import an_example_group
TAGS = ['any_tag']
with description('Example execution using tags') as self:
with before.each:
self.reporter = Spy(reporter.Reporter)
self.example_group = an_example_group()
self.example_with_tags = Example(lambda x: x,
parent=self.example_group,
tags=TAGS)
self.other_example = Example(lambda x: x, parent=self.example_group)
with context('when tag is included in example tags'):
with it('executes example'):
self.example_with_tags.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.example_with_tags.was_run).to(be_true)
with context('when tag is not included in example tags'):
with it('does not execute example'):
self.other_example.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.other_example.was_run).to(be_false)
with context('when tag is included in example_group tags'):
with it('executes children'):
self.example_group = ExampleGroup('any example_group', tags=TAGS)
self.example = Example(lambda x: x)
self.other_example = Example(lambda x: x)
self.example_group.append(self.example)
self.example_group.append(self.other_example)
self.example_group.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.example.was_run).to(be_true)
expect(self.other_example.was_run).to(be_true)
| from mamba import description, before, context, it
from doublex import Spy
from expects import expect, be_true, be_false
from mamba import reporter, runnable
from mamba.example import Example
from mamba.example_group import ExampleGroup
from spec.object_mother import an_example_group
TAGS = ['any_tag']
with description('Example execution using tags') as self:
with before.each:
self.reporter = Spy(reporter.Reporter)
self.example_group = an_example_group()
self.example = Example(lambda x: x, parent=self.example_group,
tags=TAGS)
self.other_example = Example(lambda x: x, parent=self.example_group)
with context('when tag is included in example tags'):
with it('executes example'):
self.example.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.example.was_run).to(be_true)
with context('when tag is not included in example tags'):
with it('does not execute example'):
self.other_example.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.other_example.was_run).to(be_false)
with context('when tag is included in example_group tags'):
with it('executes children'):
self.example_group = ExampleGroup('any example_group', tags=TAGS)
self.example = Example(lambda x: x)
self.other_example = Example(lambda x: x)
self.example_group.append(self.example)
self.example_group.append(self.other_example)
self.example_group.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.example.was_run).to(be_true)
expect(self.other_example.was_run).to(be_true)
| Python | 0 |
cb1912234a058dd95c20cd765771552e76224c7a | fix c3 settings | chipyprj/chipyprj/settings.py | chipyprj/chipyprj/settings.py | """
Django settings for chipyprj project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f0&n*v0r5yi8j*aylxzdre*4l1oa#+bvbjrow_nx$lylati!yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chipyapp'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'chipyprj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chipyprj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'mentorship',
'USER': 'mentorship',
'PASSWORD': 'mentorship123',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR+'/static-root/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
) | """
Django settings for chipyprj project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f0&n*v0r5yi8j*aylxzdre*4l1oa#+bvbjrow_nx$lylati!yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chipyapp'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'chipyprj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chipyprj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'mentorship',
'USER': 'mentorship',
'PASSWORD': 'mentorship123',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR+'/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
) | Python | 0.000002 |
887bba729cd4f4f7391ac6f08ab7601976bcd1ca | Update __init__.py | templated_email/__init__.py | templated_email/__init__.py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from templated_email.backends.vanilla_django import TemplateBackend
import six
import warnings
warnings.filterwarnings('error', 'django.utils.importlib')
try:
# Django <= 1.7
from django.utils.importlib import import_module
except:
# Django >= 1.8
from importlib import import_module
def get_connection(backend=None, template_prefix=None, template_suffix=None,
fail_silently=False, **kwargs):
"""Load a templated e-mail backend and return an instance of it.
If backend is None (default) settings.TEMPLATED_EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
# This method is mostly a copy of the backend loader present in
# django.core.mail.get_connection
klass_path = backend or getattr(settings, 'TEMPLATED_EMAIL_BACKEND',
TemplateBackend)
if isinstance(klass_path, six.string_types):
try:
# First check if class name is omitted and we have module in settings
mod = import_module(klass_path)
klass_name = 'TemplateBackend'
except ImportError as e:
# Fallback to class name
try:
mod_name, klass_name = klass_path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError as e:
raise ImproperlyConfigured(
('Error importing templated email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
else:
klass = klass_path
return klass(fail_silently=fail_silently, template_prefix=template_prefix,
template_suffix=template_suffix, **kwargs)
def get_templated_mail(template_name, context, from_email=None, to=None,
cc=None, bcc=None, headers=None,
template_prefix=None, template_suffix=None,
template_dir=None, file_extension=None):
"""Returns a templated EmailMessage instance without a connection using
the django templating backend."""
template_prefix = template_prefix or template_dir
template_suffix = template_suffix or file_extension
templater = TemplateBackend(template_prefix=template_prefix,
template_suffix=template_suffix)
return templater.get_email_message(template_name, context,
from_email=from_email, to=to,
cc=cc, bcc=bcc, headers=headers,
template_prefix=template_prefix,
template_suffix=template_suffix)
def send_templated_mail(template_name, from_email, recipient_list, context,
cc=None, bcc=None, fail_silently=False, connection=None,
headers=None, template_prefix=None,
template_suffix=None, **kwargs):
"""Easy wrapper for sending a templated email to a recipient list.
Final behaviour of sending depends on the currently selected engine.
See BackendClass.send.__doc__
"""
connection = connection or get_connection(template_prefix=template_prefix,
template_suffix=template_suffix)
return connection.send(template_name, from_email, recipient_list, context,
cc=cc, bcc=bcc, fail_silently=fail_silently,
headers=headers, **kwargs)
| from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from templated_email.backends.vanilla_django import TemplateBackend
import warnings
warnings.filterwarnings('error', 'django.utils.importlib')
try:
# Django <= 1.7
from django.utils.importlib import import_module
except:
# Django >= 1.8
from importlib import import_module
import six
def get_connection(backend=None, template_prefix=None, template_suffix=None,
fail_silently=False, **kwargs):
"""Load a templated e-mail backend and return an instance of it.
If backend is None (default) settings.TEMPLATED_EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
# This method is mostly a copy of the backend loader present in
# django.core.mail.get_connection
klass_path = backend or getattr(settings, 'TEMPLATED_EMAIL_BACKEND',
TemplateBackend)
if isinstance(klass_path, six.string_types):
try:
# First check if class name is omitted and we have module in settings
mod = import_module(klass_path)
klass_name = 'TemplateBackend'
except ImportError as e:
# Fallback to class name
try:
mod_name, klass_name = klass_path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError as e:
raise ImproperlyConfigured(
('Error importing templated email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
else:
klass = klass_path
return klass(fail_silently=fail_silently, template_prefix=template_prefix,
template_suffix=template_suffix, **kwargs)
def get_templated_mail(template_name, context, from_email=None, to=None,
cc=None, bcc=None, headers=None,
template_prefix=None, template_suffix=None,
template_dir=None, file_extension=None):
"""Returns a templated EmailMessage instance without a connection using
the django templating backend."""
template_prefix = template_prefix or template_dir
template_suffix = template_suffix or file_extension
templater = TemplateBackend(template_prefix=template_prefix,
template_suffix=template_suffix)
return templater.get_email_message(template_name, context,
from_email=from_email, to=to,
cc=cc, bcc=bcc, headers=headers,
template_prefix=template_prefix,
template_suffix=template_suffix)
def send_templated_mail(template_name, from_email, recipient_list, context,
cc=None, bcc=None, fail_silently=False, connection=None,
headers=None, template_prefix=None,
template_suffix=None, **kwargs):
"""Easy wrapper for sending a templated email to a recipient list.
Final behaviour of sending depends on the currently selected engine.
See BackendClass.send.__doc__
"""
connection = connection or get_connection(template_prefix=template_prefix,
template_suffix=template_suffix)
return connection.send(template_name, from_email, recipient_list, context,
cc=cc, bcc=bcc, fail_silently=fail_silently,
headers=headers, **kwargs)
| Python | 0.000072 |
d57fb3ca8c1f4329c8ac90cb785b27123d98aee5 | Bump the version to 0.3.1 | backlog/__init__.py | backlog/__init__.py | """A Simple Note Manager"""
from __future__ import absolute_import
from backlog.backlog import Backlog
__version__ = '0.3.1'
| """A Simple Note Manager"""
from __future__ import absolute_import
from backlog.backlog import Backlog
__version__ = '0.3.0'
| Python | 0.999999 |
eef0663675b741d32f399bdbca1a95b943a1fb68 | Create a script that uses an autoencoder to extract bow | bag-of-words/bow.py | bag-of-words/bow.py | #!/usr/bin/env python
"""This module contains function and classes relevant with the computation of
a bag of words model. At start we suppose that suitable descriptors for our
dataset are already extracted. Subsequently we procceed to the second step of
quantization, in this step we use a clustering algorithm such as Kmeas to
create our visual vocabulary. At the end in the final step we represent all our
features according to the previously caclulated vocabulary.
"""
import numpy as np
from sklearn.cluster import KMeans
class Encoding:
"""This class is responsible for computing a Bag of Words model"""
def __init__(self, n_codewords, iterations, clusterer=KMeans):
"""Initialize the class instance.
Parameters:
-----------
n_codewords: int
The number of clusters to be created. Each cluster's
centroid corresponds to a codeword.
iterations: int
The maximum number of iterations performed by the
clusterer.
clusterer: callable
A callable that when given the number of clusters it
returns a clusterer that implements the fit and predict
method.
"""
self.n_codewords = n_codewords
self.iterations = iterations
self._clusterer = clusterer(
n_clusters=n_codewords,
max_iter=iterations
)
@property
def centroids(self):
"""The centroids of the encoding"""
return self._clusterer.cluster_centers_.copy()
@centroids.setter
def centroids(self, centroids):
self._clusterer.cluster_centers_ = centroids.copy()
def fit(self, data):
"""Build a visual dictionary for the Bag of Words model.
Apply a clustering algorithm to the data, the default option is Kmeans,
in order to create a suitable visual vocabulary. If Kmeans is chosen,
every centroid corresponds to a visual codeword of our vocabulary
Parameters:
-----------
data: array_like
Data of datapoints used to create visual vocabulary.
"""
# Compute clustering
self._clusterer.fit(data)
def encode(self, data, density):
"""Encode a list of data using the learnt Bag of Words model
Parameters:
-----------
data: array_like
List of data points that will be encoded using the already
computed Bag of Words model
"""
# If there are no features for a specific video return a zero array
if len(data) == 0:
return np.zeros(self.n_codewords)
# Represent each datapoint as histogram. When n_codewords is sequence
# bins arguement corresponds to bin edges, this is the reason why we
# add 1. Moreover we subtract 0.5 so that each bin's label is in the
# middle of it's corresponding bin.
hist, edges = np.histogram(
self._clusterer.predict(data),
bins=np.arange(self.n_codewords + 1) - .5,
density=density
)
return hist
| #!/usr/bin/env python
"""This module contains function and classes relevant with the computation of
a bag of words model. At start we suppose that suitable descriptors for our
dataset are already extracted. Subsequently we procceed to the second step of
quantization, in this step we use a clustering algorithm such as Kmeas to
create our visual vocabulary. At the end in the final step we represent all our
features according to the previously caclulated vocabulary.
"""
import numpy as np
from sklearn.cluster import KMeans
class Encoding:
"""This class is responsible for computing a Bag of Words model"""
def __init__(self, n_codewords, iterations, clusterer=KMeans):
"""Initialize the class instance.
Parameters:
-----------
n_codewords: int
The number of clusters to be created. Each cluster's
centroid corresponds to a codeword.
iterations: int
The maximum number of iterations performed by the
clusterer.
clusterer: callable
A callable that when given the number of clusters it
returns a clusterer that implements the fit and predict
method.
"""
self.n_codewords = n_codewords
self.iterations = iterations
self._clusterer = clusterer(
n_clusters=n_codewords,
max_iter=iterations
)
def fit(self, data):
"""Build a visual dictionary for the Bag of Words model.
Apply a clustering algorithm to the data, the default option is Kmeans,
in order to create a suitable visual vocabulary. If Kmeans is chosen,
every centroid corresponds to a visual codeword of our vocabulary
Parameters:
-----------
data: array_like
Data of datapoints used to create visual vocabulary.
"""
# Compute clustering
self._clusterer.fit(data)
def encode(self, data, density):
"""Encode a list of data using the learnt Bag of Words model
Parameters:
-----------
data: array_like
List of data points that will be encoded using the already
computed Bag of Words model
"""
# If there are no features for a specific video return a zero array
if len(data) == 0:
return np.zeros(self.n_codewords)
# Represent each datapoint as histogram. When n_codewords is sequence
# bins arguement corresponds to bin edges, this is the reason why we
# add 1. Moreover we subtract 0.5 so that each bin's label is in the
# middle of it's corresponding bin.
hist, edges = np.histogram(
self._clusterer.predict(data),
bins=np.arange(self.n_codewords + 1) - .5,
density=density
)
return hist
| Python | 0.000001 |
0f782215e58eba53b72667bffde667f4d03a0d4a | Update version. | client/version.py | client/version.py | __version__ = '0.2.0'
| __version__ = '0.1.9'
| Python | 0 |
6749060a7546b7dee3c6e643c7dfad4db7934061 | package for release | cliez/__init__.py | cliez/__init__.py | # -*- coding: utf-8 -*-
version = "1.6.10"
version_info = (1, 6, 10)
| # -*- coding: utf-8 -*-
version = "1.6.9"
version_info = (1, 6, 9)
| Python | 0 |
27c7270a170a8eb3c2720390ab6e95d6bf16f8e3 | fix option name to driver (#24) | cligj/__init__.py | cligj/__init__.py | # cligj
# Shared arguments and options.
import click
from .features import normalize_feature_inputs
# Arguments.
# Multiple input files.
files_in_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS...")
# Multiple files, last of which is an output file.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS... OUTPUT")
# Features from files, command line args, or stdin.
# Returns the input data as an iterable of GeoJSON Feature-like
# dictionaries.
features_in_arg = click.argument(
'features',
nargs=-1,
callback=normalize_feature_inputs,
metavar="FEATURES...")
# Options.
verbose_opt = click.option(
'--verbose', '-v',
count=True,
help="Increase verbosity.")
quiet_opt = click.option(
'--quiet', '-q',
count=True,
help="Decrease verbosity.")
# Format driver option.
format_opt = click.option(
'-f', '--format', '--driver', 'driver',
default='GTiff',
help="Output format driver")
# JSON formatting options.
indent_opt = click.option(
'--indent',
type=int,
default=None,
help="Indentation level for JSON output")
compact_opt = click.option(
'--compact/--not-compact',
default=False,
help="Use compact separators (',', ':').")
# Coordinate precision option.
precision_opt = click.option(
'--precision',
type=int,
default=-1,
help="Decimal precision of coordinates.")
# Geographic (default), projected, or Mercator switch.
projection_geographic_opt = click.option(
'--geographic',
'projection',
flag_value='geographic',
default=True,
help="Output in geographic coordinates (the default).")
projection_projected_opt = click.option(
'--projected',
'projection',
flag_value='projected',
help="Output in dataset's own, projected coordinates.")
projection_mercator_opt = click.option(
'--mercator',
'projection',
flag_value='mercator',
help="Output in Web Mercator coordinates.")
# Feature collection or feature sequence switch.
sequence_opt = click.option(
'--sequence/--no-sequence',
default=False,
help="Write a LF-delimited sequence of texts containing individual "
"objects or write a single JSON text containing a feature "
"collection object (the default).")
use_rs_opt = click.option(
'--rs/--no-rs',
'use_rs',
default=False,
help="Use RS (0x1E) as a prefix for individual texts in a sequence "
"as per http://tools.ietf.org/html/draft-ietf-json-text-sequence-13 "
"(default is False).")
# GeoJSON output mode option.
def geojson_type_collection_opt(default=False):
return click.option(
'--collection',
'geojson_type',
flag_value='collection',
default=default,
help="Output as GeoJSON feature collection(s).")
def geojson_type_feature_opt(default=False):
return click.option(
'--feature',
'geojson_type',
flag_value='feature',
default=default,
help="Output as GeoJSON feature(s).")
def geojson_type_bbox_opt(default=False):
return click.option(
'--bbox',
'geojson_type',
flag_value='bbox',
default=default,
help="Output as GeoJSON bounding box array(s).")
| # cligj
# Shared arguments and options.
import click
from .features import normalize_feature_inputs
# Arguments.
# Multiple input files.
files_in_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS...")
# Multiple files, last of which is an output file.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS... OUTPUT")
# Features from files, command line args, or stdin.
# Returns the input data as an iterable of GeoJSON Feature-like
# dictionaries.
features_in_arg = click.argument(
'features',
nargs=-1,
callback=normalize_feature_inputs,
metavar="FEATURES...")
# Options.
verbose_opt = click.option(
'--verbose', '-v',
count=True,
help="Increase verbosity.")
quiet_opt = click.option(
'--quiet', '-q',
count=True,
help="Decrease verbosity.")
# Format driver option.
format_opt = click.option(
'-f', '--format', '--driver',
default='GTiff',
help="Output format driver")
# JSON formatting options.
indent_opt = click.option(
'--indent',
type=int,
default=None,
help="Indentation level for JSON output")
compact_opt = click.option(
'--compact/--not-compact',
default=False,
help="Use compact separators (',', ':').")
# Coordinate precision option.
precision_opt = click.option(
'--precision',
type=int,
default=-1,
help="Decimal precision of coordinates.")
# Geographic (default), projected, or Mercator switch.
projection_geographic_opt = click.option(
'--geographic',
'projection',
flag_value='geographic',
default=True,
help="Output in geographic coordinates (the default).")
projection_projected_opt = click.option(
'--projected',
'projection',
flag_value='projected',
help="Output in dataset's own, projected coordinates.")
projection_mercator_opt = click.option(
'--mercator',
'projection',
flag_value='mercator',
help="Output in Web Mercator coordinates.")
# Feature collection or feature sequence switch.
sequence_opt = click.option(
'--sequence/--no-sequence',
default=False,
help="Write a LF-delimited sequence of texts containing individual "
"objects or write a single JSON text containing a feature "
"collection object (the default).")
use_rs_opt = click.option(
'--rs/--no-rs',
'use_rs',
default=False,
help="Use RS (0x1E) as a prefix for individual texts in a sequence "
"as per http://tools.ietf.org/html/draft-ietf-json-text-sequence-13 "
"(default is False).")
# GeoJSON output mode option.
def geojson_type_collection_opt(default=False):
return click.option(
'--collection',
'geojson_type',
flag_value='collection',
default=default,
help="Output as GeoJSON feature collection(s).")
def geojson_type_feature_opt(default=False):
return click.option(
'--feature',
'geojson_type',
flag_value='feature',
default=default,
help="Output as GeoJSON feature(s).")
def geojson_type_bbox_opt(default=False):
return click.option(
'--bbox',
'geojson_type',
flag_value='bbox',
default=default,
help="Output as GeoJSON bounding box array(s).")
| Python | 0 |
05f45992e871dc0d98613fb31269c43e21869414 | Add envy help command | cloudenvy/main.py | cloudenvy/main.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
from cloudenvy.config import EnvyConfig
from cloudenvy.commands.envy_up import EnvyUp
from cloudenvy.commands.envy_list import EnvyList
from cloudenvy.commands.envy_provision import EnvyProvision
from cloudenvy.commands.envy_snapshot import EnvySnapshot
from cloudenvy.commands.envy_ip import EnvyIP
from cloudenvy.commands.envy_scp import EnvySCP
from cloudenvy.commands.envy_dotfiles import EnvyDotfiles
from cloudenvy.commands.envy_ssh import EnvySSH
from cloudenvy.commands.envy_destroy import EnvyDestroy
from cloudenvy.commands.envy_run import EnvyRun
def _build_parser():
parser = argparse.ArgumentParser(
description='Launch a virtual machine in an openstack environment.')
parser.add_argument('-v', '--verbosity', action='count',
help='increase output verbosity')
parser.add_argument('-c', '--cloud', action='store',
help='specify which cloud to use')
subparsers = parser.add_subparsers(title='Available commands:')
# Load up all of the subparser classes
EnvyUp(subparsers)
EnvyList(subparsers)
EnvyProvision(subparsers)
EnvySnapshot(subparsers)
EnvyIP(subparsers)
EnvySCP(subparsers)
EnvyDotfiles(subparsers)
EnvySSH(subparsers)
EnvyDestroy(subparsers)
EnvyRun(subparsers)
def find_command_help(config, args):
if args.command:
subparsers.choices[args.command].print_help()
else:
parser.print_help()
help_subparser = subparsers.add_parser('help',
help='Display help information for a specfiic command')
help_subparser.add_argument('command', action='store', nargs='?')
help_subparser.set_defaults(func=find_command_help)
return parser
def main():
parser = _build_parser()
args = parser.parse_args()
config = EnvyConfig(args).get_config()
if args.verbosity == 3:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.DEBUG)
elif args.verbosity == 2:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.INFO)
elif args.verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
args.func(config, args)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
from cloudenvy.config import EnvyConfig
from cloudenvy.commands.envy_up import EnvyUp
from cloudenvy.commands.envy_list import EnvyList
from cloudenvy.commands.envy_provision import EnvyProvision
from cloudenvy.commands.envy_snapshot import EnvySnapshot
from cloudenvy.commands.envy_ip import EnvyIP
from cloudenvy.commands.envy_scp import EnvySCP
from cloudenvy.commands.envy_dotfiles import EnvyDotfiles
from cloudenvy.commands.envy_ssh import EnvySSH
from cloudenvy.commands.envy_destroy import EnvyDestroy
from cloudenvy.commands.envy_run import EnvyRun
def _build_parser():
parser = argparse.ArgumentParser(
description='Launch a virtual machine in an openstack environment.')
parser.add_argument('-v', '--verbosity', action='count',
help='increase output verbosity')
parser.add_argument('-c', '--cloud', action='store',
help='specify which cloud to use')
subparsers = parser.add_subparsers(title='Available commands:')
# Load up all of the subparser classes
EnvyUp(subparsers)
EnvyList(subparsers)
EnvyProvision(subparsers)
EnvySnapshot(subparsers)
EnvyIP(subparsers)
EnvySCP(subparsers)
EnvyDotfiles(subparsers)
EnvySSH(subparsers)
EnvyDestroy(subparsers)
EnvyRun(subparsers)
return parser
def main():
parser = _build_parser()
args = parser.parse_args()
config = EnvyConfig(args).get_config()
if args.verbosity == 3:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.DEBUG)
elif args.verbosity == 2:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.INFO)
elif args.verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
args.func(config, args)
| Python | 0.999924 |
fabcd790578e904b3bff34fdf6e91edb4906a4e2 | Add missing comma in compat.gyp | compat/compat.gyp | compat/compat.gyp | # Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_compat',
'type': 'static_library',
'sources': [
'mac/AvailabilityMacros.h',
'mac/kern/exc_resource.h',
'mac/mach/mach.h',
'mac/mach-o/getsect.cc',
'mac/mach-o/getsect.h',
'mac/mach-o/loader.h',
'mac/sys/resource.h',
'non_mac/mach/mach.h',
'non_win/dbghelp.h',
'non_win/minwinbase.h',
'non_win/timezoneapi.h',
'non_win/verrsrc.h',
'non_win/windows.h',
'non_win/winnt.h',
'win/getopt.h',
'win/strings.cc',
'win/strings.h',
'win/sys/types.h',
'win/time.cc',
'win/time.h',
'win/winnt.h',
],
'conditions': [
['OS=="mac"', {
'dependencies': [
'../third_party/apple_cctools/apple_cctools.gyp:apple_cctools',
],
'include_dirs': [
'mac',
],
'direct_dependent_settings': {
'include_dirs': [
'mac',
],
},
}],
['OS=="win"', {
'include_dirs': [
'win',
],
'direct_dependent_settings': {
'include_dirs': [
'win',
],
},
'dependencies': [
'../third_party/getopt/getopt.gyp:getopt',
],
}, {
'include_dirs': [
'non_win',
],
'direct_dependent_settings': {
'include_dirs': [
'non_win',
],
},
}],
],
},
],
}
| # Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_compat',
'type': 'static_library',
'sources': [
'mac/AvailabilityMacros.h',
'mac/kern/exc_resource.h'
'mac/mach/mach.h',
'mac/mach-o/getsect.cc',
'mac/mach-o/getsect.h',
'mac/mach-o/loader.h',
'mac/sys/resource.h',
'non_mac/mach/mach.h',
'non_win/dbghelp.h',
'non_win/minwinbase.h',
'non_win/timezoneapi.h',
'non_win/verrsrc.h',
'non_win/windows.h',
'non_win/winnt.h',
'win/getopt.h',
'win/strings.cc',
'win/strings.h',
'win/sys/types.h',
'win/time.cc',
'win/time.h',
'win/winnt.h',
],
'conditions': [
['OS=="mac"', {
'dependencies': [
'../third_party/apple_cctools/apple_cctools.gyp:apple_cctools',
],
'include_dirs': [
'mac',
],
'direct_dependent_settings': {
'include_dirs': [
'mac',
],
},
}],
['OS=="win"', {
'include_dirs': [
'win',
],
'direct_dependent_settings': {
'include_dirs': [
'win',
],
},
'dependencies': [
'../third_party/getopt/getopt.gyp:getopt',
],
}, {
'include_dirs': [
'non_win',
],
'direct_dependent_settings': {
'include_dirs': [
'non_win',
],
},
}],
],
},
],
}
| Python | 0.000001 |
79cd3dc227ff1d13faa9581b6f22caa176db2360 | Mark Document.to_archive as abstract | c2corg_api/models/document.py | c2corg_api/models/document.py | from sqlalchemy import (
Column,
Integer,
Boolean,
String,
ForeignKey,
Enum
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
import abc
from c2corg_api.models import Base, schema
from utils import copy_attributes
quality_types = [
'stub',
'medium',
'correct',
'good',
'excellent'
]
class Culture(Base):
"""The supported languages.
"""
__tablename__ = 'cultures'
culture = Column(String(2), primary_key=True)
class _DocumentMixin(object):
"""
Contains the attributes that are common for `Document` and
`ArchiveDocument`.
"""
# move to metadata?
protected = Column(Boolean)
redirects_to = Column(Integer)
quality = Column(
Enum(name='quality_type', inherit_schema=True, *quality_types))
type = Column(String(1))
__mapper_args__ = {
'polymorphic_identity': 'd',
'polymorphic_on': type
}
class Document(Base, _DocumentMixin):
"""
The base class from which all document types will inherit. For each child
class (e.g. waypoint, route, ...) a separate table will be created, which
is linked to the base table via "joined table inheritance".
This table contains the current version of a document.
"""
__tablename__ = 'documents'
document_id = Column(Integer, primary_key=True)
# TODO constraint that there is at least one locale
locales = relationship('DocumentLocale')
_ATTRIBUTES = ['document_id', 'protected', 'redirects_to', 'quality']
@abc.abstractmethod
def to_archive(self):
"""Create an `Archive*` instance with the same attributes.
This method is supposed to be implemented by child classes.
"""
return
def to_archive(self, doc):
"""Copy the attributes of this document into a passed in
`Archive*` instance.
"""
copy_attributes(self, doc, Document._ATTRIBUTES)
return doc
def get_archive_locales(self):
return [locale.to_archive() for locale in self.locales]
class ArchiveDocument(Base, _DocumentMixin):
"""
The base class for the archive documents.
"""
__tablename__ = 'documents_archives'
id = Column(Integer, primary_key=True)
document_id = Column(Integer, nullable=False) # TODO as fk
# Locales for documents
class _DocumentLocaleMixin(object):
id = Column(Integer, primary_key=True)
@declared_attr
def document_id(self):
return Column(
Integer, ForeignKey(schema + '.documents.document_id'),
nullable=False)
@declared_attr
def culture(self):
return Column(
String(2), ForeignKey(schema + '.cultures.culture'),
nullable=False)
title = Column(String(150), nullable=False)
description = Column(String)
type = Column(String(1))
__mapper_args__ = {
'polymorphic_identity': 'd',
'polymorphic_on': type
}
class DocumentLocale(Base, _DocumentLocaleMixin):
__tablename__ = 'documents_locales'
_ATTRIBUTES = ['document_id', 'culture', 'title', 'description']
def to_archive(self, locale):
copy_attributes(self, locale, DocumentLocale._ATTRIBUTES)
return locale
class ArchiveDocumentLocale(Base, _DocumentLocaleMixin):
__tablename__ = 'documents_locales_archives'
| from sqlalchemy import (
Column,
Integer,
Boolean,
String,
ForeignKey,
Enum
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from c2corg_api.models import Base, schema
from utils import copy_attributes
quality_types = [
'stub',
'medium',
'correct',
'good',
'excellent'
]
class Culture(Base):
"""The supported languages.
"""
__tablename__ = 'cultures'
culture = Column(String(2), primary_key=True)
class _DocumentMixin(object):
"""
Contains the attributes that are common for `Document` and
`ArchiveDocument`.
"""
# move to metadata?
protected = Column(Boolean)
redirects_to = Column(Integer)
quality = Column(
Enum(name='quality_type', inherit_schema=True, *quality_types))
type = Column(String(1))
__mapper_args__ = {
'polymorphic_identity': 'd',
'polymorphic_on': type
}
class Document(Base, _DocumentMixin):
"""
The base class from which all document types will inherit. For each child
class (e.g. waypoint, route, ...) a separate table will be created, which
is linked to the base table via "joined table inheritance".
This table contains the current version of a document.
"""
__tablename__ = 'documents'
document_id = Column(Integer, primary_key=True)
# TODO constraint that there is at least one locale
locales = relationship('DocumentLocale')
_ATTRIBUTES = ['document_id', 'protected', 'redirects_to', 'quality']
def to_archive(self, doc):
copy_attributes(self, doc, Document._ATTRIBUTES)
return doc
def get_archive_locales(self):
return [locale.to_archive() for locale in self.locales]
class ArchiveDocument(Base, _DocumentMixin):
"""
The base class for the archive documents.
"""
__tablename__ = 'documents_archives'
id = Column(Integer, primary_key=True)
document_id = Column(Integer, nullable=False) # TODO as fk
# Locales for documents
class _DocumentLocaleMixin(object):
id = Column(Integer, primary_key=True)
@declared_attr
def document_id(self):
return Column(
Integer, ForeignKey(schema + '.documents.document_id'),
nullable=False)
@declared_attr
def culture(self):
return Column(
String(2), ForeignKey(schema + '.cultures.culture'),
nullable=False)
title = Column(String(150), nullable=False)
description = Column(String)
type = Column(String(1))
__mapper_args__ = {
'polymorphic_identity': 'd',
'polymorphic_on': type
}
class DocumentLocale(Base, _DocumentLocaleMixin):
__tablename__ = 'documents_locales'
_ATTRIBUTES = ['document_id', 'culture', 'title', 'description']
def to_archive(self, locale):
copy_attributes(self, locale, DocumentLocale._ATTRIBUTES)
return locale
class ArchiveDocumentLocale(Base, _DocumentLocaleMixin):
__tablename__ = 'documents_locales_archives'
| Python | 0.001214 |
41bb51f7f0aa1fda927af51498ec1acbf9eeddcc | fix history links | leaguebot/services/alerters/slack.py | leaguebot/services/alerters/slack.py | from leaguebot import app
import leaguebot.models.map as screepmap
import leaguebot.services.screeps as screeps
import leaguebot.services.slack as slack
import re
def sendBattleMessage(battleinfo):
message = getBattleMessageText(battleinfo)
sendToSlack(message)
def getBattleMessageText(battleinfo):
room_name = battleinfo['_id']
room_owner = screepmap.getRoomOwner(room_name)
pvp_time = str(battleinfo['lastPvpTime'])
history_link = '<https://screeps.com/a/#!/history/' + room_name + '?t=' + pvp_time + '|' + pvp_time + '>'
message = history_link + ' - Battle: ' + '<https://screeps.com/a/#!/room/' + room_name + '|' + room_name + '>'
if not room_owner:
return message
room_level = screepmap.getRoomLevel(room_name)
if room_level and room_level > 0:
message += ' RCL ' + str(room_level)
message += ', defender ' + '<https://screeps.com/a/#!/profile/' + room_owner + '|' + room_owner + '>'
room_alliance = screepmap.getUserAlliance(room_owner)
if room_alliance:
message += ' (' + room_alliance + ')'
return message
def sendNukeMessage(nukeinfo):
message = getNukeMessageText(nukeinfo)
sendToSlack(message)
def getNukeMessageText(nukeinfo):
tick = screeps.get_time()
eta = str(nukeinfo['landTime']-tick)
room_name = nukeinfo['room']
room_owner = screepmap.getRoomOwner(room_name)
message = str(tick) + ' - Nuke: ' + room_name + ' in ' + str(eta) + ' ticks'
if not room_owner:
message += ', abandoned'
else:
room_alliance = screepmap.getUserAlliance(room_owner)
message += ', defender ' + room_owner
if room_alliance:
message += ' (' + room_alliance + ')'
return message
def sendToSlack(message):
if 'SEND_TO_SLACK' not in app.config or not app.config['SEND_TO_SLACK']:
return False
try:
channel = app.config['SLACK_CHANNEL']
slack.send_slack_message(channel, message)
print (message)
return True
except:
return False
| from leaguebot import app
import leaguebot.models.map as screepmap
import leaguebot.services.screeps as screeps
import leaguebot.services.slack as slack
import re
def sendBattleMessage(battleinfo):
message = getBattleMessageText(battleinfo)
sendToSlack(message)
def getBattleMessageText(battleinfo):
room_name = battleinfo['_id']
room_owner = screepmap.getRoomOwner(room_name)
pvp_time = str(battleinfo['lastPvpTime'])
history_link = '<https://screeps.com/a/#!/history/E53N64?t=' + pvp_time + '|' + pvp_time + '>'
message = history_link + ' - Battle: ' + '<https://screeps.com/a/#!/room/' + room_name + '|' + room_name + '>'
if not room_owner:
return message
room_level = screepmap.getRoomLevel(room_name)
if room_level and room_level > 0:
message += ' RCL ' + str(room_level)
message += ', defender ' + '<https://screeps.com/a/#!/profile/' + room_owner + '|' + room_owner + '>'
room_alliance = screepmap.getUserAlliance(room_owner)
if room_alliance:
message += ' (' + room_alliance + ')'
return message
def sendNukeMessage(nukeinfo):
message = getNukeMessageText(nukeinfo)
sendToSlack(message)
def getNukeMessageText(nukeinfo):
tick = screeps.get_time()
eta = str(nukeinfo['landTime']-tick)
room_name = nukeinfo['room']
room_owner = screepmap.getRoomOwner(room_name)
message = str(tick) + ' - Nuke: ' + room_name + ' in ' + str(eta) + ' ticks'
if not room_owner:
message += ', abandoned'
else:
room_alliance = screepmap.getUserAlliance(room_owner)
message += ', defender ' + room_owner
if room_alliance:
message += ' (' + room_alliance + ')'
return message
def sendToSlack(message):
if 'SEND_TO_SLACK' not in app.config or not app.config['SEND_TO_SLACK']:
return False
try:
channel = app.config['SLACK_CHANNEL']
slack.send_slack_message(channel, message)
print (message)
return True
except:
return False
| Python | 0 |
2c9a0f9783c72af122d7c728a7760c8a2027d45f | Fix remove debug print | tests/resolver_test.py | tests/resolver_test.py | from gnr.core.gnrbag import Bag, BagCbResolver
def hello(x=''):
return 'i say : %s ' % x
b = Bag()
b.setCallBackItem('say_hello', hello, x='hello')
b.setCallBackItem('say_muu', hello, x='muu')
b.setCallBackItem('say_buzbuz', hello, x='buzbuz')
resolver = BagCbResolver(hello, x='fatto da resolver e non da setCallBackItem')
b.setItem('say_resolver', resolver) | from gnr.core.gnrbag import Bag, BagCbResolver
def hello(x=''):
return 'i say : %s ' % x
b = Bag()
b.setCallBackItem('say_hello', hello, x='hello')
b.setCallBackItem('say_muu', hello, x='muu')
b.setCallBackItem('say_buzbuz', hello, x='buzbuz')
resolver = BagCbResolver(hello, x='fatto da resolver e non da setCallBackItem')
b.setItem('say_resolver', resolver)
print b['say_hello']
print b['say_muu']
print b['say_buzbuz']
print b['say_resolver'] | Python | 0.000019 |
539c11706d91db92e36f49694603f2ed668d8cbb | Add a __unicode__ method to the Book model. Will show book title in admin instead of "Book object". | test_environment/books/models.py | test_environment/books/models.py | from dockit.schema import Document, Schema, ModelReferenceField, \
TextField, DictField, SchemaField, FileField, IntegerField, \
ReferenceField, ListField, GenericSchemaField, CharField, DateField
from django.contrib.auth.models import User
class Author(Document):
user = ModelReferenceField(User)
internal_id = TextField()
class Meta:
collection = 'author'
class Address(Schema):
street_1 = TextField()
street_2 = TextField(blank=True)
city = TextField()
postal_code = TextField()
region = TextField()
country = TextField()
extra_data = DictField(blank=True)
class Publisher(Document):
name = TextField()
address = SchemaField(Address)
def __unicode__(self):
return self.name
class Meta:
collection = 'publisher'
class Book(Document):
title = TextField()
cover_image = FileField(upload_to='book-images')
year = IntegerField()
publisher = ReferenceField(Publisher)
authors = ListField(ReferenceField(Author), db_index=True)
tags = ListField(TextField(), db_index=True)
def __unicode__(self):
return self.title
class Meta:
collection = 'book'
Book.objects.index('tags').commit()
class SubComplexTwo(Schema):
field2 = TextField()
class SubComplexOne(Schema):
field1 = TextField()
nested = SchemaField(SubComplexTwo)
class ComplexObject(Document):
field1 = TextField()
image = FileField(upload_to='complex-images', blank=True)
addresses = ListField(SchemaField(Address), blank=True)
main_address = SchemaField(Address, blank=True)
generic_objects = ListField(GenericSchemaField(), blank=True)
nested = SchemaField(SubComplexOne, blank=True)
def __unicode__(self):
return unicode(self.field1)
class Meta:
collection = 'complex_object'
class Publication(Document):
name = CharField()
date = DateField()
class Meta:
typed_field = '_type'
class Newspaper(Publication):
city = CharField()
class Meta:
typed_key = 'newspaper'
class Magazine(Publication):
issue_number = CharField()
class Meta:
typed_key = 'magazine'
class BaseProduct(Document):
name = CharField()
class Meta:
typed_field = '_type'
class Brand(Document):
name = CharField()
products = ListField(SchemaField(BaseProduct))
class Shoes(BaseProduct):
class Meta:
typed_key = 'shoes'
class Shirt(BaseProduct):
class Meta:
typed_key = 'shirt'
| from dockit.schema import Document, Schema, ModelReferenceField, \
TextField, DictField, SchemaField, FileField, IntegerField, \
ReferenceField, ListField, GenericSchemaField, CharField, DateField
from django.contrib.auth.models import User
class Author(Document):
user = ModelReferenceField(User)
internal_id = TextField()
class Meta:
collection = 'author'
class Address(Schema):
street_1 = TextField()
street_2 = TextField(blank=True)
city = TextField()
postal_code = TextField()
region = TextField()
country = TextField()
extra_data = DictField(blank=True)
class Publisher(Document):
name = TextField()
address = SchemaField(Address)
def __unicode__(self):
return self.name
class Meta:
collection = 'publisher'
class Book(Document):
title = TextField()
cover_image = FileField(upload_to='book-images')
year = IntegerField()
publisher = ReferenceField(Publisher)
authors = ListField(ReferenceField(Author), db_index=True)
tags = ListField(TextField(), db_index=True)
class Meta:
collection = 'book'
Book.objects.index('tags').commit()
class SubComplexTwo(Schema):
field2 = TextField()
class SubComplexOne(Schema):
field1 = TextField()
nested = SchemaField(SubComplexTwo)
class ComplexObject(Document):
field1 = TextField()
image = FileField(upload_to='complex-images', blank=True)
addresses = ListField(SchemaField(Address), blank=True)
main_address = SchemaField(Address, blank=True)
generic_objects = ListField(GenericSchemaField(), blank=True)
nested = SchemaField(SubComplexOne, blank=True)
def __unicode__(self):
return unicode(self.field1)
class Meta:
collection = 'complex_object'
class Publication(Document):
name = CharField()
date = DateField()
class Meta:
typed_field = '_type'
class Newspaper(Publication):
city = CharField()
class Meta:
typed_key = 'newspaper'
class Magazine(Publication):
issue_number = CharField()
class Meta:
typed_key = 'magazine'
class BaseProduct(Document):
name = CharField()
class Meta:
typed_field = '_type'
class Brand(Document):
name = CharField()
products = ListField(SchemaField(BaseProduct))
class Shoes(BaseProduct):
class Meta:
typed_key = 'shoes'
class Shirt(BaseProduct):
class Meta:
typed_key = 'shirt'
| Python | 0 |
925c6a53eea4b8ca4e3243a5a651873efb72ef4d | use jinja2 templating for olog message | bluesky/callbacks/olog.py | bluesky/callbacks/olog.py | from io import StringIO
TEMPLATES = {}
TEMPLATES['long'] = """
{{- start.plan_type }} ['{{ start.uid[:6] }}'] (scan num: {{ start.scan_id }})
Scan Plan
---------
{{ start.plan_type }}
{%- for k, v in start.plan_args.items() %}
{{ k }}: {{ v }}
{%- endfor %}
{% if 'signature' in start -%}
Call:
{{ start.signature }}
{% endif %}
Metaadata
---------
{% for k, v in start.items() -%}
{%- if k not in ['plan_type', 'plan_args'] -%}{{ k }} : {{ v }}
{% endif -%}
{%- endfor -%}"""
TEMPLATES['desc'] = """
{{- start.plan_type }} ['{{ start.uid[:6] }}'] (scan num: {{ start.scan_id }})"""
TEMPLATES['call'] = """RE({{ start.plan_type }}(
{%- for k, v in start.plan_args.items() %}{%- if not loop.first %} {% endif %}{{ k }}={{ v }}
{%- if not loop.last %},
{% endif %}{% endfor %}))
"""
def logbook_cb_factory(logbook_func, desc_template=None, long_template=None):
"""Create a logbook run_start callback
The returned function is suitable for registering as
a 'start' callback on the the BlueSky run engine.
Parameters
----------
logbook_func : callable
The required signature is ::
def logbok_func(text=None, logbooks=None, tags=None, properties=None,
attachments=None, verify=True, ensure=False):
'''
Parameters
----------
text : string
The body of the log entry.
logbooks : string or list of strings
The logbooks which to add the log entry to.
tags : string or list of strings
The tags to add to the log entry.
properties : dict of property dicts
The properties to add to the log entry
attachments : list of file like objects
The attachments to add to the log entry
verify : bool
Check that properties, tags and logbooks are in the Olog
instance.
ensure : bool
If a property, tag or logbook is not in the Olog then
create the property, tag or logbook before making the log
s entry. Seting ensure to True will set verify to False.
'''
pass
This matches the API on `SimpleOlogClient.log`
"""
import jinja2
env = jinja2.Environment()
if long_template is None:
long_template = TEMPLATES['long']
if desc_template is None:
desc_template = TEMPLATES['desc']
# It seems that the olog only has one text field, which it calls
# `text` on the python side and 'description' on the olog side.
# There are some CSS applications that try to shove the entire
# thing into a single line. We work around this by doing two
# strings, a long one which will get put in a as an attachment
# and a short one to go in as the 'text' which will be used as the
# description
long_msg = env.from_string(long_template)
desc_msg = env.from_string(desc_template)
def lbcb(name, doc):
# This only applies to 'start' Documents.
if name != 'start':
return
atch = StringIO(long_msg.render(start=doc))
desc = desc_msg.render(start=doc)
logbook_func(text=desc, properties={'start':doc},
attachments=[atch],
ensure=True)
return lbcb
def call_str(start, call_template=None):
"""Given a start document generate an evalable call scring
The default template assumes that `plan_args` and `plan_type`
are at the top level of the document.
Parameter
---------
start : dict
A document which follows the runstart schema
call_template : str, optional
A jinja2 template rendered with `cr.render(start=start)`
If not provided defaults to `CALL_TEMPLATE`
"""
import jinja2
env = jinja2.Environment()
if call_template is None:
call_template = TEMPLATES['call']
call_renderer = env.from_string(call_template)
return call_renderer.render(start=start)
| def log(name, doc):
# This only applies to 'start' Documents.
if doc != 'start':
return
input_message, = msg.args
output = """
Header uid: {{uid!r}}
Scan Plan
---------
{input_message}
Metadata
--------
{{metadata!r}}
""".format(input_message=t
d = doc
self.logbook(log_message, d)
def _run_engine_log_template(metadata):
template = []
for key in metadata:
template.append("{key}: {{{key}}}".format(key=key))
return '\n'.join(template)
def _logmsg(md):
call_str = _call_str(md['plan_type'], md['plan_args'])
msgs = ['Scan Class: {scn_cls}', '']
for k, v in md['plan_args'].items():
msgs.append('{k}: {{{k}!r}}'.format(k=k))
msgs.append('')
msgs.append('To call:')
msgs.extend(call_str)
return msgs
def logmsg(self):
msgs = self._logmsg()
return '\n'.join(msgs)
def logdict(self):
out_dict = {k: getattr(self, k) for k in self._fields}
out_dict['scn_cls'] = self.__class__.__name__
return out_dict
def _call_str(plan_type, plan_args):
args = []
for k, v in plan_args.items():
args.append("{k}={{{k}!r}}".format(k=k))
return ["RE({{scn_cls}}({args}))".format(args=', '.join(args)), ]
| Python | 0 |
d88013450d5e3ec62a3cb8b4fcfa2afbc173338b | remove from there as well | tests/safety/common.py | tests/safety/common.py | from panda.tests.safety import libpandasafety_py
MAX_WRONG_COUNTERS = 5
class UNSAFE_MODE:
DEFAULT = 0
DISABLE_DISENGAGE_ON_GAS = 1
DISABLE_STOCK_AEB = 2
RAISE_LONGITUDINAL_LIMITS_TO_ISO_MAX = 8
def make_msg(bus, addr, length=8):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
if addr >= 0x800:
to_send[0].RIR = (addr << 3) | 5
else:
to_send[0].RIR = (addr << 21) | 1
to_send[0].RDTR = length
to_send[0].RDTR |= bus << 4
return to_send
class StdTest:
@staticmethod
def test_relay_malfunction(test, addr, bus=0):
# input is a test class and the address that, if seen on specified bus, triggers
# the relay_malfunction protection logic: both tx_hook and fwd_hook are
# expected to return failure
test.assertFalse(test.safety.get_relay_malfunction())
test.safety.safety_rx_hook(make_msg(bus, addr, 8))
test.assertTrue(test.safety.get_relay_malfunction())
for a in range(1, 0x800):
for b in range(0, 3):
test.assertFalse(test.safety.safety_tx_hook(make_msg(b, a, 8)))
test.assertEqual(-1, test.safety.safety_fwd_hook(b, make_msg(b, a, 8)))
@staticmethod
def test_manually_enable_controls_allowed(test):
test.safety.set_controls_allowed(1)
test.assertTrue(test.safety.get_controls_allowed())
test.safety.set_controls_allowed(0)
test.assertFalse(test.safety.get_controls_allowed())
@staticmethod
def test_spam_can_buses(test, TX_MSGS):
for addr in range(1, 0x800):
for bus in range(0, 4):
if all(addr != m[0] or bus != m[1] for m in TX_MSGS):
test.assertFalse(test.safety.safety_tx_hook(make_msg(bus, addr, 8)))
@staticmethod
def test_allow_brake_at_zero_speed(test):
# Brake was already pressed
test.safety.safety_rx_hook(test._speed_msg(0))
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0))
test.assertTrue(test.safety.get_controls_allowed())
# rising edge of brake should disengage
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0)) # reset no brakes
@staticmethod
def test_not_allow_brake_when_moving(test, standstill_threshold):
# Brake was already pressed
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold + 1))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(0))
| from panda.tests.safety import libpandasafety_py
MAX_WRONG_COUNTERS = 5
class UNSAFE_MODE:
DEFAULT = 0
DISABLE_DISENGAGE_ON_GAS = 1
DISABLE_STOCK_AEB = 2
ENABLE_WEAK_STEERING_WHILE_NOT_ENGAGED = 4
RAISE_LONGITUDINAL_LIMITS_TO_ISO_MAX = 8
def make_msg(bus, addr, length=8):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
if addr >= 0x800:
to_send[0].RIR = (addr << 3) | 5
else:
to_send[0].RIR = (addr << 21) | 1
to_send[0].RDTR = length
to_send[0].RDTR |= bus << 4
return to_send
class StdTest:
@staticmethod
def test_relay_malfunction(test, addr, bus=0):
# input is a test class and the address that, if seen on specified bus, triggers
# the relay_malfunction protection logic: both tx_hook and fwd_hook are
# expected to return failure
test.assertFalse(test.safety.get_relay_malfunction())
test.safety.safety_rx_hook(make_msg(bus, addr, 8))
test.assertTrue(test.safety.get_relay_malfunction())
for a in range(1, 0x800):
for b in range(0, 3):
test.assertFalse(test.safety.safety_tx_hook(make_msg(b, a, 8)))
test.assertEqual(-1, test.safety.safety_fwd_hook(b, make_msg(b, a, 8)))
@staticmethod
def test_manually_enable_controls_allowed(test):
test.safety.set_controls_allowed(1)
test.assertTrue(test.safety.get_controls_allowed())
test.safety.set_controls_allowed(0)
test.assertFalse(test.safety.get_controls_allowed())
@staticmethod
def test_spam_can_buses(test, TX_MSGS):
for addr in range(1, 0x800):
for bus in range(0, 4):
if all(addr != m[0] or bus != m[1] for m in TX_MSGS):
test.assertFalse(test.safety.safety_tx_hook(make_msg(bus, addr, 8)))
@staticmethod
def test_allow_brake_at_zero_speed(test):
# Brake was already pressed
test.safety.safety_rx_hook(test._speed_msg(0))
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0))
test.assertTrue(test.safety.get_controls_allowed())
# rising edge of brake should disengage
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0)) # reset no brakes
@staticmethod
def test_not_allow_brake_when_moving(test, standstill_threshold):
# Brake was already pressed
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold + 1))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(0))
| Python | 0 |
bfb048d9a1ac34cd07e0fc8d94c0e97d901ee096 | fix simple_parser | tests/simple_parser.py | tests/simple_parser.py |
from test_parser import *
import sys, os
os.chdir(os.path.split(os.path.abspath(__file__))[0])
sys.path.insert(0, os.path.abspath(os.pardir))
import parser
if __name__ == "__main__":
x = ""
if len(sys.argv) > 1:
x = sys.argv[1]
else:
x = "x"
p = parser.HTMLParser()
document = p.parse(StringIO.StringIO(x))
print convertTreeDump(document.printTree())
| from test_parser import *
import sys
os.chdir("..")
import parser
if __name__ == "__main__":
x = ""
if len(sys.argv) > 1:
x = sys.argv[1]
else:
x = "x"
p = parser.HTMLParser()
document = p.parse(StringIO.StringIO(x))
print convertTreeDump(document.printTree())
| Python | 0 |
24b02f5added52ac572ba24a90ef0c74c3fb7cd7 | use api.craft.ai for runtime url | local_demo.py | local_demo.py | #!/usr/bin/python
import os
import requests
import json
import subprocess
import time
def userInput(config):
def defaultValue(config, key):
return config[key] if key in config else ""
res = {}
invalid = {}
res['user_name'] = raw_input("your GitHub username (default = " + defaultValue(config, 'user_name') + "): ")
res['project_name'] = raw_input("name of your SAC project on GitHub (default = " + defaultValue(config, 'project_name') + "): ")
res['project_branch'] = raw_input("current working branch of your SAC project on GitHub (default = " + defaultValue(config, 'project_branch') + "): ")
res['sac_app_id'] = raw_input("generated SAC app ID (default = " + defaultValue(config, 'sac_app_id') + "): ")
res['sac_app_secret'] = raw_input("generated SAC app secret (default = " + defaultValue(config, 'sac_app_secret') + "): ")
for k, v in res.items():
if v == "": res[k] = defaultValue(config, k)
invalid = [k for k, v in res.items() if v == ""]
if len(invalid) > 0:
print "invalid configuration: properties", invalid, "must be set"
res = userInput(config)
return res
config_file = open('config.json', 'r')
config = json.load(config_file)
invalid_properties = [k for k, v in config.items() if v == ""]
print "current configuration:", json.dumps(config, indent=2)
if len(invalid_properties) > 0:
print "invalid configuration: properties", invalid_properties, "must be set"
config = userInput(config)
if 'user_name' and 'project_name' and 'project_branch' and 'sac_app_id' and 'sac_app_secret' in config:
reply = str(raw_input('config file complete. do you wish to reset it? (y/n): ')).lower().strip()
if reply[0] == 'y':
config = userInput(config)
else:
config = userInput(config)
with open('config.json', 'w') as config_file:
json.dump(config, config_file, indent=2)
p = subprocess.Popen(["ngrok", "http", "8080"])
time.sleep(1.5)
# retrieving public url for exposed localhost:8080
headers = {'Content-Type': 'application/json'}
r = requests.get('http://127.0.0.1:4040/api/tunnels', headers=headers)
public_url = json.loads(r.text)['tunnels'][0]['public_url']
# setting environment variables with user input
os.environ["CRAFT_DEMO_SAC_USER"] = config['user_name']
os.environ["CRAFT_DEMO_SAC_PROJECT"] = config['project_name']
os.environ["CRAFT_DEMO_SAC_VERSION"] = config['project_branch']
os.environ["CRAFT_DEMO_SAC_APP_ID"] = config['sac_app_id']
os.environ["CRAFT_DEMO_SAC_APP_SECRET"] = config['sac_app_secret']
os.environ["CRAFT_DEMO_SAC_PORT"] = '8080'
os.environ["CRAFT_DEMO_SAC_URL"] = 'http://localhost:8080'
os.environ["CRAFT_DEMO_SAC_WS_URL"] = 'ws://localhost:8080'
os.environ["CRAFT_RUNTIME_SERVER_URL"] = 'https://api.craft.ai'
os.environ["CRAFT_HUB_URL"] = 'https://api.craft.ai'
os.environ["CRAFT_DEMO_SAC_ACTIONS_URL"] = public_url
subprocess.call(["python", "-u", "src/server/main.py"])
p.terminate()
| #!/usr/bin/python
import os
import requests
import json
import subprocess
import time
def userInput(config):
def defaultValue(config, key):
return config[key] if key in config else ""
res = {}
invalid = {}
res['user_name'] = raw_input("your GitHub username (default = " + defaultValue(config, 'user_name') + "): ")
res['project_name'] = raw_input("name of your SAC project on GitHub (default = " + defaultValue(config, 'project_name') + "): ")
res['project_branch'] = raw_input("current working branch of your SAC project on GitHub (default = " + defaultValue(config, 'project_branch') + "): ")
res['sac_app_id'] = raw_input("generated SAC app ID (default = " + defaultValue(config, 'sac_app_id') + "): ")
res['sac_app_secret'] = raw_input("generated SAC app secret (default = " + defaultValue(config, 'sac_app_secret') + "): ")
for k, v in res.items():
if v == "": res[k] = defaultValue(config, k)
invalid = [k for k, v in res.items() if v == ""]
if len(invalid) > 0:
print "invalid configuration: properties", invalid, "must be set"
res = userInput(config)
return res
config_file = open('config.json', 'r')
config = json.load(config_file)
invalid_properties = [k for k, v in config.items() if v == ""]
print "current configuration:", json.dumps(config, indent=2)
if len(invalid_properties) > 0:
print "invalid configuration: properties", invalid_properties, "must be set"
config = userInput(config)
if 'user_name' and 'project_name' and 'project_branch' and 'sac_app_id' and 'sac_app_secret' in config:
reply = str(raw_input('config file complete. do you wish to reset it? (y/n): ')).lower().strip()
if reply[0] == 'y':
config = userInput(config)
else:
config = userInput(config)
with open('config.json', 'w') as config_file:
json.dump(config, config_file, indent=2)
p = subprocess.Popen(["ngrok", "http", "8080"])
time.sleep(1.5)
# retrieving public url for exposed localhost:8080
headers = {'Content-Type': 'application/json'}
r = requests.get('http://127.0.0.1:4040/api/tunnels', headers=headers)
public_url = json.loads(r.text)['tunnels'][0]['public_url']
# setting environment variables with user input
os.environ["CRAFT_DEMO_SAC_USER"] = config['user_name']
os.environ["CRAFT_DEMO_SAC_PROJECT"] = config['project_name']
os.environ["CRAFT_DEMO_SAC_VERSION"] = config['project_branch']
os.environ["CRAFT_DEMO_SAC_APP_ID"] = config['sac_app_id']
os.environ["CRAFT_DEMO_SAC_APP_SECRET"] = config['sac_app_secret']
os.environ["CRAFT_DEMO_SAC_PORT"] = '8080'
os.environ["CRAFT_DEMO_SAC_URL"] = 'http://localhost:8080'
os.environ["CRAFT_DEMO_SAC_WS_URL"] = 'ws://localhost:8080'
os.environ["CRAFT_RUNTIME_SERVER_URL"] = 'https://api.craft.ai'
os.environ["CRAFT_HUB_URL"] = 'https://hub.craft.ai'
os.environ["CRAFT_DEMO_SAC_ACTIONS_URL"] = public_url
subprocess.call(["python", "-u", "src/server/main.py"])
p.terminate()
| Python | 0 |
a80646e0891dc5345caffd0acd12a073a0b47187 | add missing test | tests/testLimitator.py | tests/testLimitator.py | # -*- coding: utf-8 -*-
from tools.limitator import *
import unittest
import time
class TestLimitator(unittest.TestCase):
USER1 = {"id":1}
USER2 = {"id":2}
def test_1(self):
l = Limitator(5, 2)
for i in range(5):
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
time.sleep(3)
for i in range(2):
l.next(self.USER1)
time.sleep(1)
for i in range(2):
l.next(self.USER1)
time.sleep(3)
for i in range(5):
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_2(self):
l = Limitator(5, 2, True)
for i in range(5):
l.next(self.USER1)
for i in range(5):
l.next(self.USER2)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
try:
l.next(self.USER2)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_3(self):
l = Limitator(5, 2)
l.next(self.USER1, 5)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_4(self):
l = Limitator(5, 2, True)
l.next(self.USER1, 5)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_5(self):
l = Limitator(1, 61, True)
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
time.sleep(62)
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_6(self):
l = Limitator(5, 2, True)
l.next(self.USER1, 3)
try:
l.next(self.USER1, 3)
self.fail("must crash")
except LimitatorLimitted:
pass
l.next(self.USER1, 2)
| # -*- coding: utf-8 -*-
from tools.limitator import *
import unittest
import time
class TestLimitator(unittest.TestCase):
USER1 = {"id":1}
USER2 = {"id":2}
def test_1(self):
l = Limitator(5, 2)
for i in range(5):
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
time.sleep(3)
for i in range(2):
l.next(self.USER1)
time.sleep(1)
for i in range(2):
l.next(self.USER1)
time.sleep(3)
for i in range(5):
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_2(self):
l = Limitator(5, 2, True)
for i in range(5):
l.next(self.USER1)
for i in range(5):
l.next(self.USER2)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
try:
l.next(self.USER2)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_3(self):
l = Limitator(5, 2)
l.next(self.USER1, 5)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_4(self):
l = Limitator(5, 2, True)
l.next(self.USER1, 5)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_5(self):
l = Limitator(1, 61, True)
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
time.sleep(62)
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
| Python | 0.000288 |
88c9facf33771ecfccf9c3b8f6d3e37b3a2b648c | Revert the first 'speedup' | chainerrl/misc/collections.py | chainerrl/misc/collections.py | import itertools
import random
import six
class RandomAccessQueue(object):
"""FIFO queue with fast indexing
Operations getitem, setitem, append, popleft, and len
are amortized O(1)-time, if this data structure is used ephemerally.
"""
def __init__(self, *args, **kwargs):
self.maxlen = kwargs.pop('maxlen', None)
assert self.maxlen is None or self.maxlen >= 0
self._queue_front = []
self._queue_back = list(*args, **kwargs)
self._apply_maxlen()
def _apply_maxlen(self):
if self.maxlen is not None:
while len(self) > self.maxlen:
self.popleft()
def __iter__(self):
return itertools.chain(reversed(self._queue_front),
iter(self._queue_back))
def __repr__(self):
return "RandomAccessQueue({})".format(str(list(iter(self))))
def __len__(self):
return len(self._queue_front) + len(self._queue_back)
def __getitem__(self, i):
if i >= 0:
nf = len(self._queue_front)
if i < nf:
return self._queue_front[~i]
else:
i -= nf
if i < len(self._queue_back):
return self._queue_back[i]
else:
raise IndexError("RandomAccessQueue index out of range")
else:
nb = len(self._queue_back)
if i >= -nb:
return self._queue_back[i]
else:
i += nb
if i >= -len(self._queue_front):
return self._queue_front[~i]
else:
raise IndexError("RandomAccessQueue index out of range")
def __setitem__(self, i, x):
if i >= 0:
nf = len(self._queue_front)
if i < nf:
self._queue_front[~i] = x
else:
i -= nf
if i < len(self._queue_back):
self._queue_back[i] = x
else:
raise IndexError("RandomAccessQueue index out of range")
else:
nb = len(self._queue_back)
if i >= -nb:
self._queue_back[i] = x
else:
i += nb
if i >= -len(self._queue_front):
self._queue_front[~i] = x
else:
raise IndexError("RandomAccessQueue index out of range")
def append(self, x):
self._queue_back.append(x)
if self.maxlen is not None and len(self) > self.maxlen:
self.popleft()
def extend(self, xs):
self._queue_back.extend(xs)
self._apply_maxlen()
def popleft(self):
if not self._queue_front:
if not self._queue_back:
raise IndexError("pop from empty RandomAccessQueue")
self._queue_front = self._queue_back
self._queue_back = []
self._queue_front.reverse()
return self._queue_front.pop()
def _sample(self, k):
n = len(self)
if k > n:
raise ValueError("Sample larger than population or is negative")
# The following code is equivalent to
# return [self[i] for i in random.sample(six.moves.range(n), k)]
nf = len(self._queue_front)
return [self._queue_front[i] if i < nf else self._queue_back[i - nf]
for i in random.sample(six.moves.range(n), k)]
| import itertools
import numpy as np
class RandomAccessQueue(object):
"""FIFO queue with fast indexing
Operations getitem, setitem, append, popleft, and len
are amortized O(1)-time, if this data structure is used ephemerally.
"""
def __init__(self, *args, **kwargs):
self.maxlen = kwargs.pop('maxlen', None)
assert self.maxlen is None or self.maxlen >= 0
self._queue_front = []
self._queue_back = list(*args, **kwargs)
self._apply_maxlen()
def _apply_maxlen(self):
if self.maxlen is not None:
while len(self) > self.maxlen:
self.popleft()
def __iter__(self):
return itertools.chain(reversed(self._queue_front),
iter(self._queue_back))
def __repr__(self):
return "RandomAccessQueue({})".format(str(list(iter(self))))
def __len__(self):
return len(self._queue_front) + len(self._queue_back)
def __getitem__(self, i):
if i >= 0:
nf = len(self._queue_front)
if i < nf:
return self._queue_front[~i]
else:
i -= nf
if i < len(self._queue_back):
return self._queue_back[i]
else:
raise IndexError("RandomAccessQueue index out of range")
else:
nb = len(self._queue_back)
if i >= -nb:
return self._queue_back[i]
else:
i += nb
if i >= -len(self._queue_front):
return self._queue_front[~i]
else:
raise IndexError("RandomAccessQueue index out of range")
def __setitem__(self, i, x):
if i >= 0:
nf = len(self._queue_front)
if i < nf:
self._queue_front[~i] = x
else:
i -= nf
if i < len(self._queue_back):
self._queue_back[i] = x
else:
raise IndexError("RandomAccessQueue index out of range")
else:
nb = len(self._queue_back)
if i >= -nb:
self._queue_back[i] = x
else:
i += nb
if i >= -len(self._queue_front):
self._queue_front[~i] = x
else:
raise IndexError("RandomAccessQueue index out of range")
def append(self, x):
self._queue_back.append(x)
if self.maxlen is not None and len(self) > self.maxlen:
self.popleft()
def extend(self, xs):
self._queue_back.extend(xs)
self._apply_maxlen()
def popleft(self):
if not self._queue_front:
if not self._queue_back:
raise IndexError("pop from empty RandomAccessQueue")
self._queue_front = self._queue_back
self._queue_back = []
self._queue_front.reverse()
return self._queue_front.pop()
def _sample(self, k):
n = len(self)
if k > n:
raise ValueError("Sample larger than population or is negative")
# The following code is equivalent to
# return [self[i] for i in np.random.choice(n, k)]
nf = len(self._queue_front)
return [self._queue_front[i] if i < nf else self._queue_back[i - nf]
for i in np.random.choice(n, k)]
| Python | 0.000002 |
ae8f9c39cd75d837a4cb5a4cea4d3d11fd1cabed | Add additional test case for comments | tests/test_comments.py | tests/test_comments.py | from hypothesis_auto import auto_pytest_magic
from isort import comments
auto_pytest_magic(comments.parse)
auto_pytest_magic(comments.add_to_line)
def test_add_to_line():
assert comments.add_to_line([], "import os # comment", removed=True).strip() == "import os"
| from hypothesis_auto import auto_pytest_magic
from isort import comments
auto_pytest_magic(comments.parse)
auto_pytest_magic(comments.add_to_line)
| Python | 0 |
aa1008691e3433f8350d3f3a5e5d03d9c629a45c | Test for getting parameters back from ideal observer | tests/test_idealobs.py | tests/test_idealobs.py | import pytest
import scipy.io as sio
from pambox import idealobs
import numpy as np
@pytest.fixture
def data():
return np.array([0.28032187, 1.07108181, 3.35513227, 8.66774961,
18.61914334, 33.63172026, 51.87228063, 69.72236134,
83.79127082, 92.72205919, 97.28779782, 99.16754416])
@pytest.fixture
def idealobs_parameters():
return (3.74647303e+00, 5.15928999e-02, -9.09197905e-07, 8000.)
@pytest.fixture
def snr():
return np.arange(-9, 3, 1)
@pytest.fixture
def snrenv(snr):
return 10. ** np.linspace(-2, 2, len(snr))
def test_fit_obs(data, snrenv, idealobs_parameters):
c = idealobs.IdealObs()
c.fit_obs(snrenv, data)
params = c.get_params()
res = [params['k'], params['q'], params['sigma_s']]
np.testing.assert_allclose(res, idealobs_parameters[0:3], atol=1e-5)
def test_psy_fn():
mat = sio.loadmat('./test_files/test_psychometric_function.mat')
x = mat['x'][0]
mu = 0.
sigma = 1.0
target = mat['p'][0]
y = idealobs.psy_fn(x, mu, sigma)
np.testing.assert_allclose(y, target)
def test_snr_env_to_pc(snrenv, idealobs_parameters, data):
c = idealobs.IdealObs(k=1., q=0.5, sigma_s=0.6, m=8000.)
pc = c.snrenv_to_pc(np.arange(0, 21))
target = np.array([0.0000, 0.0025, 0.0267, 0.1327, 0.4403, 1.1314, 2.4278,
4.5518, 7.6788, 11.8990, 17.1955, 23.4442, 30.4320,
37.8885, 45.5214, 53.0503, 60.2323, 66.8786, 72.8613,
78.1116, 82.6125])
np.testing.assert_allclose(pc, target, atol=1e-4)
def test_get_params():
p = {'k':1, 'q':2, 'sigma_s':0.5, 'm':800}
c = idealobs.IdealObs(**p)
assert p == c.get_params()
| import pytest
import scipy.io as sio
from pambox import idealobs
import numpy as np
@pytest.fixture
def data():
return np.array([0.28032187, 1.07108181, 3.35513227, 8.66774961,
18.61914334, 33.63172026, 51.87228063, 69.72236134,
83.79127082, 92.72205919, 97.28779782, 99.16754416])
@pytest.fixture
def idealobs_parameters():
return (3.74647303e+00, 5.15928999e-02, -9.09197905e-07, 8000.)
@pytest.fixture
def snr():
return np.arange(-9, 3, 1)
@pytest.fixture
def snrenv(snr):
return 10. ** np.linspace(-2, 2, len(snr))
def test_fit_obs(data, snrenv, idealobs_parameters):
c = idealobs.IdealObs()
c.fit_obs(snrenv, data)
params = c.get_params()
res = [params['k'], params['q'], params['sigma_s']]
np.testing.assert_allclose(res, idealobs_parameters[0:3], atol=1e-5)
def test_psy_fn():
mat = sio.loadmat('./test_files/test_psychometric_function.mat')
x = mat['x'][0]
mu = 0.
sigma = 1.0
target = mat['p'][0]
y = idealobs.psy_fn(x, mu, sigma)
np.testing.assert_allclose(y, target)
def test_snr_env_to_pc(snrenv, idealobs_parameters, data):
c = idealobs.IdealObs(k=1., q=0.5, sigma_s=0.6, m=8000.)
pc = c.snrenv_to_pc(np.arange(0, 21))
target = np.array([0.0000, 0.0025, 0.0267, 0.1327, 0.4403, 1.1314, 2.4278,
4.5518, 7.6788, 11.8990, 17.1955, 23.4442, 30.4320,
37.8885, 45.5214, 53.0503, 60.2323, 66.8786, 72.8613,
78.1116, 82.6125])
np.testing.assert_allclose(pc, target, atol=1e-4)
| Python | 0 |
28e049dfc0a784c9c47ef671286e39e13825c6c5 | Remove useless import | tests/test_multival.py | tests/test_multival.py | # test_multival.py
"""Test suite for MultiValue class"""
# Copyright (c) 2012 Darcy Mason
# This file is part of pydicom, relased under an MIT-style license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
import unittest
from datetime import date
from pydicom.multival import MultiValue
from pydicom.valuerep import DS, DSfloat, DSdecimal, IS
from pydicom import config
import sys
python_version = sys.version_info
class MultiValuetests(unittest.TestCase):
def testMultiDS(self):
"""MultiValue: Multi-valued data elements can be created........"""
multival = MultiValue(DS, ['11.1', '22.2', '33.3'])
for val in multival:
self.assertTrue(isinstance(val, (DSfloat, DSdecimal)),
"Multi-value DS item not converted to DS")
def testLimits(self):
"""MultiValue: Raise error if any item outside DICOM limits...."""
original_flag = config.enforce_valid_values
config.enforce_valid_values = True
self.assertRaises(OverflowError, MultiValue, IS, [1, -2 ** 31 - 1]) # Overflow error not raised for IS out of DICOM valid range
config.enforce_valid_values = original_flag
def testAppend(self):
"""MultiValue: Append of item converts it to required type..."""
multival = MultiValue(IS, [1, 5, 10])
multival.append('5')
self.assertTrue(isinstance(multival[-1], IS))
self.assertEqual(multival[-1], 5, "Item set by append is not correct value")
def testSetIndex(self):
"""MultiValue: Setting list item converts it to required type"""
multival = MultiValue(IS, [1, 5, 10])
multival[1] = '7'
self.assertTrue(isinstance(multival[1], IS))
self.assertEqual(multival[1], 7, "Item set by index is not correct value")
def testExtend(self):
"""MultiValue: Extending a list converts all to required type"""
multival = MultiValue(IS, [1, 5, 10])
multival.extend(['7', 42])
self.assertTrue(isinstance(multival[-2], IS))
self.assertTrue(isinstance(multival[-1], IS))
self.assertEqual(multival[-2], 7, "Item set by extend not correct value")
def testSlice(self):
"""MultiValue: Setting slice converts items to required type."""
multival = MultiValue(IS, range(7))
multival[2:7:2] = [4, 16, 36]
for val in multival:
self.assertTrue(isinstance(val, IS), "Slice IS value not correct type")
self.assertEqual(multival[4], 16, "Set by slice failed for item 4 of list")
if __name__ == "__main__":
unittest.main()
| # test_multival.py
"""Test suite for MultiValue class"""
# Copyright (c) 2012 Darcy Mason
# This file is part of pydicom, relased under an MIT-style license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
import unittest
from datetime import date
from pydicom.multival import MultiValue
from pydicom.valuerep import DS, DSfloat, DSdecimal, IS, DA
from pydicom import config # don't import datetime_conversion directly
import sys
python_version = sys.version_info
class MultiValuetests(unittest.TestCase):
def testMultiDS(self):
"""MultiValue: Multi-valued data elements can be created........"""
multival = MultiValue(DS, ['11.1', '22.2', '33.3'])
for val in multival:
self.assertTrue(isinstance(val, (DSfloat, DSdecimal)),
"Multi-value DS item not converted to DS")
def testLimits(self):
"""MultiValue: Raise error if any item outside DICOM limits...."""
original_flag = config.enforce_valid_values
config.enforce_valid_values = True
self.assertRaises(OverflowError, MultiValue, IS, [1, -2 ** 31 - 1]) # Overflow error not raised for IS out of DICOM valid range
config.enforce_valid_values = original_flag
def testAppend(self):
"""MultiValue: Append of item converts it to required type..."""
multival = MultiValue(IS, [1, 5, 10])
multival.append('5')
self.assertTrue(isinstance(multival[-1], IS))
self.assertEqual(multival[-1], 5, "Item set by append is not correct value")
def testSetIndex(self):
"""MultiValue: Setting list item converts it to required type"""
multival = MultiValue(IS, [1, 5, 10])
multival[1] = '7'
self.assertTrue(isinstance(multival[1], IS))
self.assertEqual(multival[1], 7, "Item set by index is not correct value")
def testExtend(self):
"""MultiValue: Extending a list converts all to required type"""
multival = MultiValue(IS, [1, 5, 10])
multival.extend(['7', 42])
self.assertTrue(isinstance(multival[-2], IS))
self.assertTrue(isinstance(multival[-1], IS))
self.assertEqual(multival[-2], 7, "Item set by extend not correct value")
def testSlice(self):
"""MultiValue: Setting slice converts items to required type."""
multival = MultiValue(IS, range(7))
multival[2:7:2] = [4, 16, 36]
for val in multival:
self.assertTrue(isinstance(val, IS), "Slice IS value not correct type")
self.assertEqual(multival[4], 16, "Set by slice failed for item 4 of list")
if __name__ == "__main__":
unittest.main()
| Python | 0.000004 |
2fdb9d17b2c033370d663b4e72d71c1c7e105a84 | fix test for python 3 | tests/test_pipeline.py | tests/test_pipeline.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from nose.tools import eq_
from redis_shard.shard import RedisShardAPI
from redis_shard._compat import b
from .config import settings
class TestShard(unittest.TestCase):
def setUp(self):
self.client = RedisShardAPI(**settings)
self.clear_db()
def tearDown(self):
pass
def clear_db(self):
self.client.delete('testset')
self.client.delete('testzset')
self.client.delete('testlist')
def test_pipeline(self):
self.client.set('test', '1')
pipe = self.client.pipeline()
pipe.set('test', '2')
pipe.zadd('testzset', 'first', 1)
pipe.zincrby('testzset', 'first')
pipe.zadd('testzset', 'second', 2)
pipe.execute()
pipe.reset()
eq_(self.client.get('test'), b'2')
eq_(self.client.zscore('testzset', 'first'), 2.0)
eq_(self.client.zscore('testzset', 'second'), 2.0)
with self.client.pipeline() as pipe:
pipe.set('test', '3')
pipe.zadd('testzset', 'first', 4)
pipe.zincrby('testzset', 'first')
pipe.zadd('testzset', 'second', 5)
pipe.execute()
eq_(self.client.get('test'), b'3')
eq_(self.client.zscore('testzset', 'first'), 5.0)
eq_(self.client.zscore('testzset', 'second'), 5.0)
def test_pipeline_script(self):
pipe = self.client.pipeline()
for i in range(100):
pipe.eval("""
redis.call('set', KEYS[1], ARGV[1])
""", 1, 'testx%d' % i, i)
pipe.execute()
for i in range(100):
eq_(self.client.get('testx%d' % i), b('%d' % i))
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from nose.tools import eq_
from redis_shard.shard import RedisShardAPI
from redis_shard._compat import b
from .config import settings
class TestShard(unittest.TestCase):
def setUp(self):
self.client = RedisShardAPI(**settings)
self.clear_db()
def tearDown(self):
pass
def clear_db(self):
self.client.delete('testset')
self.client.delete('testzset')
self.client.delete('testlist')
def test_pipeline(self):
self.client.set('test', '1')
pipe = self.client.pipeline()
pipe.set('test', '2')
pipe.zadd('testzset', 'first', 1)
pipe.zincrby('testzset', 'first')
pipe.zadd('testzset', 'second', 2)
pipe.execute()
pipe.reset()
eq_(self.client.get('test'), '2')
eq_(self.client.zscore('testzset', 'first'), 2.0)
eq_(self.client.zscore('testzset', 'second'), 2.0)
with self.client.pipeline() as pipe:
pipe.set('test', '3')
pipe.zadd('testzset', 'first', 4)
pipe.zincrby('testzset', 'first')
pipe.zadd('testzset', 'second', 5)
pipe.execute()
eq_(self.client.get('test'), '3')
eq_(self.client.zscore('testzset', 'first'), 5.0)
eq_(self.client.zscore('testzset', 'second'), 5.0)
def test_pipeline_script(self):
pipe = self.client.pipeline()
for i in range(100):
pipe.eval("""
redis.call('set', KEYS[1], ARGV[1])
""", 1, 'testx%d' % i, i)
pipe.execute()
for i in range(100):
eq_(self.client.get('testx%d' % i), b('%d' % i))
| Python | 0.000105 |
ab4c02c1f5f5cf3ba46b4924c48693d028dc23db | Split pipeline tests | tests/test_pipeline.py | tests/test_pipeline.py | from valohai_yaml.objs import Config, DeploymentNode
def test_pipeline_valid(pipeline_config: Config):
assert pipeline_config.lint().is_valid()
def test_little_pipeline(pipeline_config: Config):
assert any(
(
edge.source_node == "batch1"
and edge.source_type == "parameter"
and edge.source_key == "aspect-ratio"
and edge.target_node == "batch2"
and edge.target_type == "parameter"
and edge.target_key == "aspect-ratio"
)
for edge in pipeline_config.pipelines["My little pipeline"].edges
)
def test_deployment_pipeline(pipeline_config: Config):
dp = pipeline_config.pipelines["My deployment pipeline"]
assert any(
(
edge.source_node == "train"
and edge.source_type == "output"
and edge.source_key == "model"
and edge.target_node == "deploy-predictor"
and edge.target_type == "file"
and edge.target_key == "predict-digit.model"
)
for edge in dp.edges
)
dn_predict = dp.get_node_by(name='deploy-predictor')
assert isinstance(dn_predict, DeploymentNode)
assert "predictor-staging" in dn_predict.aliases
assert "predict-digit" in dn_predict.endpoints
dn_no_preset = dp.get_node_by(name='deploy-no-presets')
assert isinstance(dn_no_preset, DeploymentNode)
assert dn_no_preset.aliases == []
assert dn_no_preset.endpoints == []
def test_medium_pipeline(pipeline_config: Config):
assert any(
(edge.source_type == "output" and edge.source_key == "model.pb")
for edge in pipeline_config.pipelines["My medium pipeline"].edges
)
| from valohai_yaml.objs import Config, DeploymentNode
def test_pipeline(pipeline_config: Config):
lr = pipeline_config.lint()
assert lr.is_valid()
assert any(
(
edge.source_node == "batch1"
and edge.source_type == "parameter"
and edge.source_key == "aspect-ratio"
and edge.target_node == "batch2"
and edge.target_type == "parameter"
and edge.target_key == "aspect-ratio"
)
for edge in pipeline_config.pipelines["My little pipeline"].edges
)
assert any(
(
edge.source_node == "train"
and edge.source_type == "output"
and edge.source_key == "model"
and edge.target_node == "deploy-predictor"
and edge.target_type == "file"
and edge.target_key == "predict-digit.model"
)
for edge in pipeline_config.pipelines["My deployment pipeline"].edges
)
dp = pipeline_config.pipelines["My deployment pipeline"]
dn_predict = dp.get_node_by(name='deploy-predictor')
assert isinstance(dn_predict, DeploymentNode)
assert "predictor-staging" in dn_predict.aliases
assert "predict-digit" in dn_predict.endpoints
dn_no_preset = dp.get_node_by(name='deploy-no-presets')
assert isinstance(dn_no_preset, DeploymentNode)
assert dn_no_preset.aliases == []
assert dn_no_preset.endpoints == []
assert any(
(edge.source_type == "output" and edge.source_key == "model.pb")
for edge in pipeline_config.pipelines["My medium pipeline"].edges
)
| Python | 0.000001 |
9e57e467ab508cd0e5fab2862a2c9b651eaa7838 | rename tag basisofRecords to BASISOFRECORDS | bin/aggregate_metrics.py | bin/aggregate_metrics.py | import sys
import os
import json
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator, CartoDBWriter
def check_arguments():
if len(sys.argv) != 3:
print 'usage: aggregate_metrics.py <data directory> <settings.json>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.\n'
print ' settings.json: contains the `api_key` that will'
print ' be used to contact the cartodb API.'
sys.exit(-1)
data_dir, settings_file = sys.argv[1:]
return [data_dir, settings_file]
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data, settings_file):
settings = json.load(open(settings_file))
writer = CartoDBWriter()
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'MATERIAL_SAMPLE', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'LITERATURE', 'UNKNOWN']
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['BASISOFRECORDS']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
nr_of_records = data[dataset]['NUMBER_OF_RECORDS']
row.append(nr_of_records)
writer.write_basis_of_record(row, settings['api_key'])
def main():
data_dir, settings_file = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data, settings_file)
main()
| import sys
import os
import json
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator, CartoDBWriter
def check_arguments():
if len(sys.argv) != 3:
print 'usage: aggregate_metrics.py <data directory> <settings.json>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.\n'
print ' settings.json: contains the `api_key` that will'
print ' be used to contact the cartodb API.'
sys.exit(-1)
data_dir, settings_file = sys.argv[1:]
return [data_dir, settings_file]
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data, settings_file):
settings = json.load(open(settings_file))
writer = CartoDBWriter()
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'MATERIAL_SAMPLE', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'LITERATURE', 'UNKNOWN']
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['basisofRecords']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
nr_of_records = data[dataset]['NUMBER_OF_RECORDS']
row.append(nr_of_records)
writer.write_basis_of_record(row, settings['api_key'])
def main():
data_dir, settings_file = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data, settings_file)
main()
| Python | 0.000001 |
77c0ad615c7f0270c0425866f06edde8856892b9 | Add Augur Unit Tests For parseIntelXML() | build/tests/test_augur.py | build/tests/test_augur.py | #!/usr/bin/python3
"""
test_augur.py
APP: Inquisition
DESC: Unit test for Augur library
CREATION_DATE: 2017-11-25
"""
# MODULES
# | Native
import configparser
import unittest
# | Third-Party
from bs4 import BeautifulSoup as BSoup
# | Custom
from lib.destiny.Augur import Augur
# METADATA
__author__ = 'Joshua Carlson-Purcell'
__copyright__ = 'Copyright 2017, CarlsoNet'
__license__ = 'MIT'
__version__ = '1.0.0-alpha'
__maintainer__ = 'Joshua Carlson-Purcell'
__email__ = 'jcarlson@carlso.net'
__status__ = 'Development'
class AugurTestCase(unittest.TestCase):
def setUp(self):
# generate config
cfg = configparser.ConfigParser()
cfg.read('build/tests/unit_tests_GOOD.cfg')
self.augur = Augur(cfg=cfg)
def test_getXMLSrcData_validURL(self):
responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/')
self.assertIsInstance(responseData, BSoup)
def test_getXMLSrcData_invalidURL(self):
responseData = self.augur.getXMLSrcData(url='https://invalid.url/')
self.assertEqual(responseData, {})
def test_getXMLSrcData_blankURL(self):
try:
responseData = self.augur.getXMLSrcData(url='')
except ValueError:
self.assertTrue(True)
def test_mapIOCItemNameToFieldName(self):
fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='remoteIP')
self.assertEqual(fieldName, 'src_ip')
def test_mapIOCItemNameToFieldName_blankFieldName(self):
try:
fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='')
except ValueError:
self.assertTrue(True)
def test_parseIntelXML(self):
responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/')
parsedData = self.augur.parseIntelXML(responseData)
self.assertNotEqual(parsedData, {})
if __name__ == '__main__':
unittest.main() | #!/usr/bin/python3
"""
test_augur.py
APP: Inquisition
DESC: Unit test for Augur library
CREATION_DATE: 2017-11-25
"""
# MODULES
# | Native
import configparser
import unittest
# | Third-Party
from bs4 import BeautifulSoup as BSoup
# | Custom
from lib.destiny.Augur import Augur
# METADATA
__author__ = 'Joshua Carlson-Purcell'
__copyright__ = 'Copyright 2017, CarlsoNet'
__license__ = 'MIT'
__version__ = '1.0.0-alpha'
__maintainer__ = 'Joshua Carlson-Purcell'
__email__ = 'jcarlson@carlso.net'
__status__ = 'Development'
class AugurTestCase(unittest.TestCase):
def setUp(self):
# generate config
cfg = configparser.ConfigParser()
cfg.read('build/tests/unit_tests_GOOD.cfg')
self.augur = Augur(cfg=cfg)
def test_getXMLSrcData_validURL(self):
responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/')
self.assertIsInstance(responseData, BSoup)
def test_getXMLSrcData_invalidURL(self):
responseData = self.augur.getXMLSrcData(url='https://invalid.url/')
self.assertEqual(responseData, {})
def test_getXMLSrcData_blankURL(self):
try:
responseData = self.augur.getXMLSrcData(url='')
except ValueError:
self.assertTrue(True)
def test_mapIOCItemNameToFieldName(self):
fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='remoteIP')
self.assertEqual(fieldName, 'src_ip')
def test_mapIOCItemNameToFieldName_blankFieldName(self):
try:
fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='')
except ValueError:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main() | Python | 0 |
5c2141610c40c1879400a88c82ac055081bb7451 | Let the final table have any number of periods per day | main/views.py | main/views.py | import sys
from string import ascii_uppercase
import itertools
from typing import Dict, List
from openpyxl import Workbook
from terminaltables import AsciiTable
from main import models
from main import solver
Timetable = Dict[models.Subject, List[int]]
def timetable_to_workbook(timetable: Timetable, sheet_name: str = 'Timetable', periods_per_day: int = 4):
wb = Workbook()
ws = wb.active
ws.title = sheet_name
for subject in timetable:
for period in timetable[subject]:
# TODO: error when some period is out of bounds (e.g. period 200 when there are just 4 periods / day)
# since everything is zero-indexed, floor division by the number of periods in a day gives us the day to
# which the period belongs, # i.e. for 4 periods in a day 0-3 -> day 0 (Monday) , 4-7 -> day 1 (Tuesday)...
# and the modulo by four gives the period in that day
day = period // periods_per_day
period_in_day = period % periods_per_day
cell = ws['{}{}'.format(ascii_uppercase[day], period_in_day + 1)]
cell.value = (cell.value or '') + subject.name + '\n'
return wb
def timetable_dict_to_ascii_table(timetable: Timetable) -> str:
flat_timetable = list(itertools.repeat('', 20))
for subject, periods in timetable.items():
for period in periods:
flat_timetable[period] += (subject.name + '\n')
square_timetable = list(zip(*[flat_timetable[i:i + 4] for i in range(0, len(flat_timetable), 4)]))
return AsciiTable(
[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']] + square_timetable
).table
def main():
ds = models.Datastore(sys.argv[1])
students = list(ds.get_students().values())
tt = solver.possible_timetables(students, 20)
timetable_to_workbook(next(tt)).save('out.xlsx')
if __name__ == '__main__':
main()
| import sys
from string import ascii_uppercase
import itertools
from typing import Dict, List
from openpyxl import Workbook
from terminaltables import AsciiTable
from main import models
from main import solver
Timetable = Dict[models.Subject, List[int]]
def timetable_to_workbook(timetable: Timetable, sheet_name: str = 'Timetable', periods_per_day: int = 4):
wb = Workbook()
ws = wb.active
ws.title = sheet_name
for subject in timetable:
for period in timetable[subject]:
# TODO: error when some period is out of bounds (e.g. period 200 when there are just 4 periods / day)
# since everything is zero-indexed, floor division by the number of periods in a day gives us the day to
# which the period belongs, # i.e. for 4 periods in a day 0-3 -> day 0 (Monday) , 4-7 -> day 1 (Tuesday)...
# and the modulo by four gives the period in that day
day = period // 4
period_in_day = period % 4
cell = ws['{}{}'.format(ascii_uppercase[day], period_in_day + 1)]
cell.value = (cell.value or '') + subject.name + '\n'
return wb
def timetable_dict_to_ascii_table(timetable: Timetable) -> str:
flat_timetable = list(itertools.repeat('', 20))
for subject, periods in timetable.items():
for period in periods:
flat_timetable[period] += (subject.name + '\n')
square_timetable = list(zip(*[flat_timetable[i:i + 4] for i in range(0, len(flat_timetable), 4)]))
return AsciiTable(
[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']] + square_timetable
).table
def main():
ds = models.Datastore(sys.argv[1])
students = list(ds.get_students().values())
tt = solver.possible_timetables(students, 20)
timetable_to_workbook(next(tt)).save('out.xlsx')
if __name__ == '__main__':
main()
| Python | 0.997706 |
b6572ec32295365862947845a8c916eae428700f | Clean up temporary files on 'nt'. | makemodule.py | makemodule.py | #!/bin/env python
"""
makemodule
Module generation tool
Copyright (c) 2015 Sam Saint-Pettersen.
Released under the MIT/X11 License.
"""
import sys
import os
import xml.dom.minidom as xml
class makemodule:
def __init__(self, args):
if len(args) == 1:
self.displayUsage()
else:
self.writeModuleXML()
def displayUsage(self):
print(__doc__)
print('Usage: makemodule [module..module]\n')
sys.exit(1)
def writeModuleXML(self):
names = []
enabled = []
redirect = ''
cleanup = False
if os.name == 'nt':
redirect = ' > a.tmp 2>&1'
cleanup = True
else:
redirect = ' >> /dev/null 2>&1'
for arg in sys.argv[1:]:
names.append(arg)
exitCode = int(os.system(arg + redirect))
if exitCode == 32512:
enabled.append(False)
else:
enabled.append(True)
doc = xml.Document()
c = doc.createElement('configuration')
doc.appendChild(c)
i = 0
for name in names:
m = doc.createElement('module')
c.appendChild(m)
n = doc.createElement('name')
m.appendChild(n)
n_is = doc.createTextNode(name)
n.appendChild(n_is)
e = doc.createElement('enabled')
m.appendChild(e)
e_is = doc.createTextNode(str(enabled[i]))
e.appendChild(e_is)
i = i + 1
print('Writing modules.xml...')
f = open('modules.xml', 'w')
f.write(doc.toprettyxml())
f.close()
if os.name == 'nt': os.remove('a.tmp')
makemodule(sys.argv)
| #!/bin/env python
"""
makemodule
Module generation tool
Copyright (c) 2015 Sam Saint-Pettersen.
Released under the MIT/X11 License.
"""
import sys
import os
import xml.dom.minidom as xml
class makemodule:
def __init__(self, args):
if len(args) == 1:
self.displayUsage()
else:
self.writeModuleXML()
def displayUsage(self):
print(__doc__)
print('Usage: makemodule [module..module]\n')
sys.exit(1)
def writeModuleXML(self):
names = []
enabled = []
redirect = ''
cleanup = False
if os.name == 'nt':
redirect = ' > a.tmp 2>&1'
cleanup = True
else:
redirect = ' >> /dev/null 2>&1'
for arg in sys.argv[1:]:
names.append(arg)
exitCode = int(os.system(arg + redirect))
if exitCode == 32512:
enabled.append(False)
else:
enabled.append(True)
doc = xml.Document()
c = doc.createElement('configuration')
doc.appendChild(c)
i = 0
for name in names:
m = doc.createElement('module')
c.appendChild(m)
n = doc.createElement('name')
m.appendChild(n)
n_is = doc.createTextNode(name)
n.appendChild(n_is)
e = doc.createElement('enabled')
m.appendChild(e)
e_is = doc.createTextNode(str(enabled[i]))
e.appendChild(e_is)
i = i + 1
print('Writing modules.xml...')
f = open('modules.xml', 'w')
f.write(doc.toprettyxml())
f.close()
makemodule(sys.argv)
| Python | 0 |
9aae92fb0e22c97f559b6e3ee895d9959e010e05 | Add missing import | tests_tf/test_model.py | tests_tf/test_model.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from cleverhans.model import Model, CallableModelWrapper
class TestModelClass(unittest.TestCase):
def test_get_layer(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_layer` not implemented
with self.assertRaises(Exception) as context:
model.get_layer(x, layer='')
self.assertTrue(context.exception)
def test_get_logits(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_logits` not implemented
with self.assertRaises(Exception) as context:
model.get_logits(x)
self.assertTrue(context.exception)
def test_get_probs(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_probs` not implemented
with self.assertRaises(Exception) as context:
model.get_probs(x)
self.assertTrue(context.exception)
def test_get_layer_names(self):
# Define empty model
model = Model()
# Exception is thrown when `get_layer_names` not implemented
with self.assertRaises(Exception) as context:
model.get_layer_names()
self.assertTrue(context.exception)
def test_fprop(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `fprop` not implemented
with self.assertRaises(Exception) as context:
model.fprop(x)
self.assertTrue(context.exception)
class TestCallableModelWrapperInitArguments(unittest.TestCase):
def test_output_layer(self):
def model():
return True
# The following two calls should not raise Exceptions
wrap = CallableModelWrapper(model, 'probs')
wrap = CallableModelWrapper(model, 'logits')
if __name__ == '__main__':
unittest.main()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from cleverhans.model import Model
class TestModelClass(unittest.TestCase):
def test_get_layer(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_layer` not implemented
with self.assertRaises(Exception) as context:
model.get_layer(x, layer='')
self.assertTrue(context.exception)
def test_get_logits(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_logits` not implemented
with self.assertRaises(Exception) as context:
model.get_logits(x)
self.assertTrue(context.exception)
def test_get_probs(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_probs` not implemented
with self.assertRaises(Exception) as context:
model.get_probs(x)
self.assertTrue(context.exception)
def test_get_layer_names(self):
# Define empty model
model = Model()
# Exception is thrown when `get_layer_names` not implemented
with self.assertRaises(Exception) as context:
model.get_layer_names()
self.assertTrue(context.exception)
def test_fprop(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `fprop` not implemented
with self.assertRaises(Exception) as context:
model.fprop(x)
self.assertTrue(context.exception)
class TestCallableModelWrapperInitArguments(unittest.TestCase):
def test_output_layer(self):
def model():
return True
# The following two calls should not raise Exceptions
wrap = CallableModelWrapper(model, 'probs')
wrap = CallableModelWrapper(model, 'logits')
if __name__ == '__main__':
unittest.main()
| Python | 0.000466 |
4646e7c682ba9a0291815a5d0de98674a9de3410 | Fix RemoteCapture definition | src/pyshark/capture/remote_capture.py | src/pyshark/capture/remote_capture.py | from pyshark import LiveCapture
class RemoteCapture(LiveCapture):
"""
A capture which is performed on a remote machine which has an rpcapd service running.
"""
def __init__(self, remote_host, remote_interface, remote_port=2002, bpf_filter=None):
"""
Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff() method
to get packets.
Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic
is unencrypted!
:param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd.
:param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is
not the device display name but the true interface name (i.e. \\Device\\NPF_..).
:param remote_port: The remote port the rpcapd service is listening on
:param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading.
"""
interface = 'rpcap://%s:%d/%s' % (remote_host, remote_port, remote_interface)
super(RemoteCapture, self).__init__(interface, bpf_filter=bpf_filter) | from pyshark import LiveCapture
class RemoteCapture(LiveCapture):
"""
A capture which is performed on a remote machine which has an rpcapd service running.
"""
def __init__(self, remote_host, remote_interface, remote_port=2002, bpf_filter=None):
"""
Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff() method
to get packets.
Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic
is unencrypted!
:param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd.
:param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is
not the device display name but the true interface name (i.e. \Device\NPF_..).
:param remote_port: The remote port the rpcapd service is listening on
:param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading.
"""
interface = 'rpcap://%s:%d/%s' % (remote_host, remote_port, remote_interface)
super(RemoteCapture, self).__init__(interface, bpf_filter=bpf_filter) | Python | 0.000001 |
1a9f0320b3a8aecc50cfee6335c3b6e8dc81c233 | Make this tool less hacky. | tools/commit-impact.py | tools/commit-impact.py | #!/usr/bin/env python
#
# See the impact of a Futhark commit compared to the previous one we
# have benchmarking for.
import sys
import subprocess
from urllib.request import urlopen
from urllib.error import HTTPError
import json
import tempfile
import os
def url_for(backend, system, commit):
return 'https://futhark-lang.org/benchmark-results/futhark-{}-{}-{}.json'.format(backend, system, commit)
def results_for_commit(backend, system, commit):
try:
url = url_for(backend, system, commit)
print('Fetching {}...'.format(url))
return json.loads(urlopen(url).read())
except HTTPError:
return None
def first_commit_with_results(backend, system, commits):
for commit in commits:
res = results_for_commit(backend, system, commit)
if res:
return commit, res
def find_commits(start):
return subprocess.check_output(['git', 'rev-list', start]).decode('utf-8').splitlines()
if __name__ == '__main__':
backend, system, commit = sys.argv[1:4]
now = results_for_commit(backend, system, commit)
if not now:
print('No results found')
sys.exit(1)
if len(sys.argv) == 5:
commits = find_commits(sys.argv[4])
else:
commits = find_commits(commit)[1:]
then_commit, then = first_commit_with_results(backend, system, commits[1:])
print('Comparing {}'.format(commit))
print(' with {}'.format(then_commit))
with tempfile.NamedTemporaryFile(prefix=commit, mode='w') as now_file:
with tempfile.NamedTemporaryFile(prefix=then_commit, mode='w') as then_file:
json.dump(now, now_file)
json.dump(then, then_file)
now_file.flush()
then_file.flush()
os.system('tools/cmp-bench-json.py {} {}'.format(then_file.name, now_file.name))
| #!/usr/bin/env python
#
# See the impact of a Futhark commit compared to the previous one we
# have benchmarking for.
import sys
import subprocess
from urllib.request import urlopen
from urllib.error import HTTPError
import json
def url_for(backend, system, commit):
return 'https://futhark-lang.org/benchmark-results/futhark-{}-{}-{}.json'.format(backend, system, commit)
def results_for_commit(backend, system, commit):
try:
url = url_for(backend, system, commit)
print('Fetching {}...'.format(url))
return json.loads(urlopen(url).read())
except HTTPError:
return None
def first_commit_with_results(backend, system, commits):
for commit in commits:
res = results_for_commit(backend, system, commit)
if res:
return commit, res
if __name__ == '__main__':
backend, system, commit = sys.argv[1:]
commits = subprocess.check_output(['git', 'rev-list', commit]).decode('utf-8').splitlines()
now = results_for_commit(backend, system, commit)
if not now:
print('No results found')
sys.exit(1)
then_commit, then = first_commit_with_results(backend, system, commits[1:])
print('Comparing {}'.format(commit))
print(' with {}'.format(then_commit))
# Hacky hacky...
m = __import__('cmp-bench-json')
m.compare(then, now)
| Python | 0 |
0fa1e147fc7d2522a4352c0bbc60e4da67380257 | add a missing statement | landlab/utils/tests/test_stream_length.py | landlab/utils/tests/test_stream_length.py | from landlab import RasterModelGrid, FieldError
from landlab.components import FlowAccumulator, FastscapeEroder, FlowDirectorSteepest
import numpy as np
from landlab.utils.stream_length import calculate_stream_length
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
def test_no_flow_recievers():
"""Test that correct error is raised when no flow recievers are on the grid."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid(30, 70)
# test that the stream length utility will fail because of a ValueError
assert_raises(FieldError, calculate_stream_length, mg)
def test_no_upstream_array():
"""Test that correct error is raised when no flow__upstream_node_order."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid(30, 70)
fd = FlowDirectorSteepest(mg)
fd.run_one_step()
# test that the stream length utility will fail because of a ValueError
assert_raises(FieldError, calculate_stream_length, mg)
| from landlab import RasterModelGrid, FieldError
from landlab.components import FlowAccumulator, FastscapeEroder, FlowDirectorSteepest
import numpy as np
from landlab.utils.stream_length import calculate_stream_length
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
def test_no_flow_recievers():
"""Test that correct error is raised when no flow recievers are on the grid."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid(30, 70)
# test that the stream length utility will fail because of a ValueError
assert_raises(FieldError, calculate_stream_length, mg)
def test_no_upstream_array():
"""Test that correct error is raised when no flow__upstream_node_order."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid(30, 70)
z = mg.add_zeros('topographic__elevation', at='node')
fd = FlowDirectorSteepest(mg)
fd.run_one_step()
# test that the stream length utility will fail because of a ValueError
assert_raises(FieldError, calculate_stream_length, mg)
| Python | 1 |
85ee5f5e6d7a5937b67c9d11ae127709749f7490 | Bump to version 0.4.1 | cmsplugin_cascade/__init__.py | cmsplugin_cascade/__init__.py | __version__ = "0.4.1"
| __version__ = "0.4.0"
| Python | 0 |
59b8ae5f17e556c09ef8592723f9c684843c7dcc | update function and comment | code/utils/outlierfunction.py | code/utils/outlierfunction.py |
# find outliers based on DVARS and FD
def outlier(data, bound):
'''
Input:
data: array of values
bound: threshold for outliers
Output:
indices of outliers
'''
outlier = []
# set nonoutlier values to 0, outliers to nonzero
for i in data:
if i <= bound:
outlier.append(0)
else:
outlier.append(i)
# find outlier indices
outlier_indices = np.nonzero(outlier)
return outlier_indices
|
# find outliers based on DVARS and FD
def outlier(data, bound):
'''
Input:
data: array of values
bound: threshold for outliers
Output:
indices of outliers
'''
outlier = []
# set outlier values to 0
for i in data:
if i <= bound:
outlier.append(0)
else:
outlier.append(i)
# find outlier indices
outlier_indices = np.nonzero(outlier)
return outlier_indices
| Python | 0 |
b0f7e70e29783de6980006be92bc105287b3b5c3 | Remove dependency on not-yet-added [] library Change on 2014/01/24 by mgainer <mgainer@google.com> ------------- Created by MOE: http://code.google.com/p/moe-java MOE_MIGRATED_REVID=60226626 | coursebuilder/main.py | coursebuilder/main.py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder web application entry point."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
from common import tags
from controllers import sites
from models import custom_modules
import modules.activity_tag.activity_tag
import modules.admin.admin
import modules.announcements.announcements
import modules.assessment_tags.questions
import modules.course_explorer.course_explorer
import modules.courses.courses
import modules.dashboard.dashboard
import modules.oauth2.oauth2
import modules.oeditor.oeditor
import modules.review.review
import modules.search.search
import modules.upload.upload
import webapp2
# use this flag to control debug only features
debug = not appengine_config.PRODUCTION_MODE
# init and enable most known modules
modules.activity_tag.activity_tag.register_module().enable()
modules.admin.admin.register_module().enable()
modules.announcements.announcements.register_module().enable()
modules.assessment_tags.questions.register_module().enable()
modules.course_explorer.course_explorer.register_module().enable()
modules.courses.courses.register_module().enable()
modules.dashboard.dashboard.register_module().enable()
modules.oeditor.oeditor.register_module().enable()
modules.review.review.register_module().enable()
modules.search.search.register_module().enable()
modules.upload.upload.register_module().enable()
# register modules that are not enabled by default.
modules.oauth2.oauth2.register_module()
# compute all possible routes
global_routes, namespaced_routes = custom_modules.Registry.get_all_routes()
# routes available at '/%namespace%/' context paths
sites.ApplicationRequestHandler.bind(namespaced_routes)
app_routes = [(r'(.*)', sites.ApplicationRequestHandler)]
# enable Appstats handlers if requested
appstats_routes = []
if appengine_config.gcb_appstats_enabled():
# pylint: disable-msg=g-import-not-at-top
import google.appengine.ext.appstats.ui as appstats_ui
# pylint: enable-msg=g-import-not-at-top
# add all Appstats URL's to /admin/stats basepath
for path, handler in appstats_ui.URLMAP:
assert '.*' == path[:2]
appstats_routes.append(('/admin/stats/%s' % path[3:], handler))
# tag extension resource routes
extensions_routes = [(
'/extensions/tags/.*/resources/.*', tags.ResourcesHandler)]
# i18n configuration for jinja2
webapp2_i18n_config = {'translations_path': os.path.join(
appengine_config.BUNDLE_ROOT, 'modules/i18n/resources/locale')}
# init application
app = webapp2.WSGIApplication(
global_routes + extensions_routes + appstats_routes + app_routes,
config={'webapp2_extras.i18n': webapp2_i18n_config},
debug=debug)
| # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder web application entry point."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
from common import tags
from controllers import sites
from models import custom_modules
import modules.activity_tag.activity_tag
import modules.admin.admin
import modules.announcements.announcements
import modules.assessment_tags.questions
import modules.course_explorer.course_explorer
import modules.courses.courses
import modules.dashboard.dashboard
import modules.mapreduce.mapreduce_module
import modules.oauth2.oauth2
import modules.oeditor.oeditor
import modules.review.review
import modules.search.search
import modules.upload.upload
import webapp2
# use this flag to control debug only features
debug = not appengine_config.PRODUCTION_MODE
# init and enable most known modules
modules.activity_tag.activity_tag.register_module().enable()
modules.admin.admin.register_module().enable()
modules.announcements.announcements.register_module().enable()
modules.assessment_tags.questions.register_module().enable()
modules.course_explorer.course_explorer.register_module().enable()
modules.courses.courses.register_module().enable()
modules.dashboard.dashboard.register_module().enable()
modules.mapreduce.mapreduce_module.register_module().enable()
modules.oeditor.oeditor.register_module().enable()
modules.review.review.register_module().enable()
modules.search.search.register_module().enable()
modules.upload.upload.register_module().enable()
# register modules that are not enabled by default.
modules.oauth2.oauth2.register_module()
# compute all possible routes
global_routes, namespaced_routes = custom_modules.Registry.get_all_routes()
# routes available at '/%namespace%/' context paths
sites.ApplicationRequestHandler.bind(namespaced_routes)
app_routes = [(r'(.*)', sites.ApplicationRequestHandler)]
# enable Appstats handlers if requested
appstats_routes = []
if appengine_config.gcb_appstats_enabled():
# pylint: disable-msg=g-import-not-at-top
import google.appengine.ext.appstats.ui as appstats_ui
# pylint: enable-msg=g-import-not-at-top
# add all Appstats URL's to /admin/stats basepath
for path, handler in appstats_ui.URLMAP:
assert '.*' == path[:2]
appstats_routes.append(('/admin/stats/%s' % path[3:], handler))
# tag extension resource routes
extensions_routes = [(
'/extensions/tags/.*/resources/.*', tags.ResourcesHandler)]
# i18n configuration for jinja2
webapp2_i18n_config = {'translations_path': os.path.join(
appengine_config.BUNDLE_ROOT, 'modules/i18n/resources/locale')}
# init application
app = webapp2.WSGIApplication(
global_routes + extensions_routes + appstats_routes + app_routes,
config={'webapp2_extras.i18n': webapp2_i18n_config},
debug=debug)
| Python | 0 |
270c8ca68357f92999474fbf110fed7b01cdfdf2 | Use proper way to access package resources. | cqlengine/__init__.py | cqlengine/__init__.py | import os
import pkg_resources
from cqlengine.columns import *
from cqlengine.functions import *
from cqlengine.models import Model
from cqlengine.query import BatchQuery
__cqlengine_version_path__ = pkg_resources.resource_filename('cqlengine',
'VERSION')
__version__ = open(__cqlengine_version_path__, 'r').readline().strip()
# compaction
SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy"
LeveledCompactionStrategy = "LeveledCompactionStrategy"
ANY = "ANY"
ONE = "ONE"
TWO = "TWO"
THREE = "THREE"
QUORUM = "QUORUM"
LOCAL_QUORUM = "LOCAL_QUORUM"
EACH_QUORUM = "EACH_QUORUM"
ALL = "ALL"
| import os
from cqlengine.columns import *
from cqlengine.functions import *
from cqlengine.models import Model
from cqlengine.query import BatchQuery
__cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION')
__version__ = open(__cqlengine_version_path__, 'r').readline().strip()
# compaction
SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy"
LeveledCompactionStrategy = "LeveledCompactionStrategy"
ANY = "ANY"
ONE = "ONE"
TWO = "TWO"
THREE = "THREE"
QUORUM = "QUORUM"
LOCAL_QUORUM = "LOCAL_QUORUM"
EACH_QUORUM = "EACH_QUORUM"
ALL = "ALL"
| Python | 0 |
5584ec8c6aa8e6567b3ddd286c1c7305fad070a3 | fix init | cryptotik/__init__.py | cryptotik/__init__.py |
from cryptotik.poloniex import Poloniex
from cryptotik.bittrex import Bittrex
from cryptotik.btce import Btce
from cryptotik.therock import TheRock
from cryptotik.livecoin import Livecoin
from cryptotik.okcoin import OKcoin
from cryptotik.hitbtc import Hitbtc
|
from cryptotik.poloniex import Poloniex
from cryptotik.bittrex import Bittrex
from cryptotik.btce import Btce
from cryptotik.therock import TheRock
from cryptotik.livecoin import Livecoin
<<<<<<< HEAD
from cryptotik.okcoin import OKcoin
=======
from cryptotik.hitbtc import Hitbtc
>>>>>>> 7e948ea7ab42a9ad57d9ec1259539995ff34fb34
| Python | 0.024288 |
35cc2bce4e5fb62083ec1a44bda85c2da064d119 | Remove debug print statements | cs251tk/specs/load.py | cs251tk/specs/load.py | from logging import warning
from glob import iglob
import json
import os
import shutil
from .cache import cache_specs
from .dirs import get_specs_dir
def load_all_specs(*, basedir=get_specs_dir()):
os.makedirs(basedir, exist_ok=True)
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename, basedir) for filename in spec_files])
def load_some_specs(idents, *, basedir=get_specs_dir()):
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
wanted_spec_files = [os.path.join(basedir, '_cache', '{}.json'.format(ident)) for ident in idents]
all_spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
loadable_spec_files = set(all_spec_files).intersection(wanted_spec_files)
print(loadable_spec_files)
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename) for filename in loadable_spec_files])
def load_spec(filename, basedir):
with open(filename, 'r', encoding='utf-8') as specfile:
loaded_spec = json.load(specfile)
name = os.path.splitext(os.path.basename(filename))[0]
assignment = loaded_spec['assignment']
# Ask if user wants to re-cache specs to fix discrepancy
if name != assignment:
warning('assignment "{}" does not match the filename {}'.format(assignment, filename))
recache = input("Re-cache specs? (Y/N)")
if recache == "Y" or recache == "y":
shutil.rmtree(os.path.join(basedir, '_cache'))
cache_specs(basedir)
return assignment, loaded_spec
| from logging import warning
from glob import iglob
import json
import os
import shutil
import sys
from .cache import cache_specs
from .dirs import get_specs_dir
def load_all_specs(*, basedir=get_specs_dir()):
os.makedirs(basedir, exist_ok=True)
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename, basedir) for filename in spec_files])
def load_some_specs(idents, *, basedir=get_specs_dir()):
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
wanted_spec_files = [os.path.join(basedir, '_cache', '{}.json'.format(ident)) for ident in idents]
all_spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
loadable_spec_files = set(all_spec_files).intersection(wanted_spec_files)
print(loadable_spec_files)
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename) for filename in loadable_spec_files])
def load_spec(filename, basedir):
with open(filename, 'r', encoding='utf-8') as specfile:
loaded_spec = json.load(specfile)
name = os.path.splitext(os.path.basename(filename))[0]
assignment = loaded_spec['assignment']
if name != assignment:
warning('assignment "{}" does not match the filename {}'.format(assignment, filename))
# warning("Re-caching specs\n")
# print(file=sys.stderr)
recache = input("Re-cache specs? (Y/N)")
if recache == "Y" or recache == "y":
shutil.rmtree(os.path.join(basedir, '_cache'))
cache_specs(basedir)
return assignment, loaded_spec
| Python | 0.000003 |
096d3c44a60c83820410a85cd6a56f20b13b9ccd | 更新 API Infor, 使用新格式改寫 users_total_count API 的回應 | commonrepo/infor_api/views.py | commonrepo/infor_api/views.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import permissions
from rest_framework import renderers
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import api_view, detail_route
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from commonrepo.elos.models import ELO
from commonrepo.users.models import User as User
from .permissions import IsOwnerOrReadOnly
# ELOs
@api_view(['GET'])
def elos_total_count(request):
if request.method == 'GET':
return Response({"code": 202,
"status": "ok",
"result": {
"total_elos": ELO.objects.all().count()
}
},
status=status.HTTP_202_ACCEPTED)
else:
return Response({"code": 400,
"status": "error"
},
status=status.HTTP_400_BAD_REQUEST)
# Users
@api_view(['GET'])
def users_total_count(request):
if request.method == 'GET':
return Response({"code": 202,
"status": "ok",
"result": {
"total_users": User.objects.all().count()
}
},
status=status.HTTP_202_ACCEPTED)
else:
return Response({"code": 400,
"status": "error"
},
status=status.HTTP_400_BAD_REQUEST) | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import permissions
from rest_framework import renderers
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import api_view, detail_route
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from commonrepo.elos.models import ELO
from commonrepo.users.models import User as User
from .permissions import IsOwnerOrReadOnly
# ELOs
@api_view(['GET'])
def elos_total_count(request):
if request.method == 'GET':
return Response({"code": 202,
"status": "ok",
"result": {
"total_elos": ELO.objects.all().count()
}
},
status=status.HTTP_202_ACCEPTED)
else:
return Response({"code": 400,
"status": "error"
},
status=status.HTTP_400_BAD_REQUEST)
# Users
@api_view(['GET'])
def users_total_count(request):
if request.method == 'GET':
return Response({"total_users": User.objects.all().count() }, status=status.HTTP_202_ACCEPTED)
else:
return Response(status=status.HTTP_400_BAD_REQUEST) | Python | 0 |
98e52a8c603346a03b2e5a6197ce2cb49af86f2c | Cut uselessness codes | complexism/multimodel/mmrt.py | complexism/multimodel/mmrt.py | import networkx as nx
from complexism.misc.counter import count
from complexism.mcore import *
from .entries import RelationEntry
__author__ = 'TimeWz667'
__all__ = ['ObsMultiModel', 'MultiModel']
class ObsMultiModel(Observer):
def __init__(self):
Observer.__init__(self)
self.ObservingModels = list()
def add_observing_model(self, model):
if model not in self.ObservingModels:
self.ObservingModels.append(model)
def update_dynamic_observations(self, model, flow, ti):
for m in self.ObservingModels:
mod = model.get_model(m)
flow.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Flow.items() if k != 'Time'})
def read_statics(self, model, tab, ti):
for m in self.ObservingModels:
mod = model.get_model(m)
if tab is self.Last:
tab.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Last.items() if k != 'Time'})
elif self.ExtMid:
tab.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Mid.items() if k != 'Time'})
class MultiModel(BranchModel):
def __init__(self, name, pc=None):
BranchModel.__init__(self, name, pc, ObsMultiModel())
self.Models = nx.MultiDiGraph()
def add_observing_model(self, m):
if m in self.Models:
self.Observer.add_observing_model(m)
def append(self, m):
if m.Name not in self.Models:
self.Models.add_node(m.Name, model=m)
def link(self, src, tar, message=None, **kwargs):
src = src if isinstance(src, RelationEntry) else RelationEntry(src)
tar = tar if isinstance(tar, RelationEntry) else RelationEntry(tar)
m_src = self.select_all(src.Selector)
m_tar = self.select_all(tar.Selector)
if src.is_single():
ms = m_src.first()
for kt, mt in m_tar.items():
if ms is not mt:
mt.listen(ms.Name, message, src.Parameter, tar.Parameter, **kwargs)
self.Models.add_edge(ms.Name, mt.Name, par_src=src.Parameter, par_tar=tar.Parameter)
def read_y0(self, y0, ti):
if not y0:
return
for k, m in self.Models.nodes().data('model'):
m.read_y0(y0=y0[k], ti=ti)
def reset_impulse(self, ti):
for s, nbd in self.Models.adjacency():
src = self.get_model(s)
for t in nbd.keys():
tar = self.get_model(t)
tar.impulse_foreign(src, 'update', ti)
@count()
def do_request(self, req):
src = self.get_model(req.Who)
for t, kb in self.Models[req.Who].items():
# for _, atr in kb.items():
tar = self.get_model(t)
tar.impulse_foreign(src, req.Message, req.When)
def find_next(self):
pass
def all_models(self):
return dict(self.Models.nodes().data('model'))
def get_model(self, k):
return self.Models.nodes[k]['model']
def clone(self, **kwargs):
pass
| import networkx as nx
from complexism.misc.counter import count
from complexism.mcore import *
from .entries import RelationEntry
__author__ = 'TimeWz667'
__all__ = ['ObsMultiModel', 'MultiModel']
class ObsMultiModel(Observer):
def __init__(self):
Observer.__init__(self)
self.ObservingModels = list()
def add_observing_model(self, model):
if model not in self.ObservingModels:
self.ObservingModels.append(model)
def update_dynamic_observations(self, model, flow, ti):
for m in self.ObservingModels:
mod = model.get_model(m)
flow.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Flow.items() if k != 'Time'})
def read_statics(self, model, tab, ti):
for m in self.ObservingModels:
mod = model.get_model(m)
if tab is self.Last:
tab.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Last.items() if k != 'Time'})
elif self.ExtMid:
tab.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Mid.items() if k != 'Time'})
class MultiModel(BranchModel):
def __init__(self, name, pc=None):
BranchModel.__init__(self, name, pc, ObsMultiModel())
self.Models = nx.MultiDiGraph()
def add_observing_model(self, m):
if m in self.Models:
self.Observer.add_observing_model(m)
def append(self, m):
if m.Name not in self.Models:
self.Models.add_node(m.Name, model=m)
def link(self, src, tar, message=None, **kwargs):
src = src if isinstance(src, RelationEntry) else RelationEntry(src)
tar = tar if isinstance(tar, RelationEntry) else RelationEntry(tar)
m_src = self.select_all(src.Selector)
m_tar = self.select_all(tar.Selector)
if src.is_single():
ms = m_src.first()
for kt, mt in m_tar.items():
if ms is not mt:
mt.listen(ms.Name, message, src.Parameter, tar.Parameter, **kwargs)
self.Models.add_edge(ms.Name, mt.Name, par_src=src.Parameter, par_tar=tar.Parameter)
def read_y0(self, y0, ti):
if not y0:
return
for k, m in self.Models.nodes().data('model'):
m.read_y0(y0=y0[k], ti=ti)
def reset_impulse(self, ti):
for s, nbd in self.Models.adjacency():
src = self.get_model(s)
for t in nbd.keys():
tar = self.get_model(t)
tar.impulse_foreign(src, 'update', ti)
@count()
def do_request(self, req):
src = self.get_model(req.Who)
for t, kb in self.Models[req.Who].items():
# for _, atr in kb.items():
tar = self.get_model(t)
tar.impulse_foreign(src, req.Message, req.When)
def find_next(self):
for k, model in self.all_models().items():
for req in model.Next:
self.Requests.append_request(req.up_scale(self.Name))
self.Requests.append_event(req.Event, k, self.Name)
# self.Requests.append_requests([req.up_scale(k) for req in model.Next])
def all_models(self):
return dict(self.Models.nodes().data('model'))
def get_model(self, k):
return self.Models.nodes[k]['model']
def clone(self, **kwargs):
pass
| Python | 0 |
6e35e4f5af341bbcda050434d86fd7e4712ebd0f | Update JGit to get PackInserter fix | lib/jgit/jgit.bzl | lib/jgit/jgit.bzl | load("//tools/bzl:maven_jar.bzl", "GERRIT", "MAVEN_LOCAL", "MAVEN_CENTRAL", "maven_jar")
_JGIT_VERS = "4.9.2.201712150930-r.3-g43ef5dabf"
_DOC_VERS = "4.9.2.201712150930-r" # Set to _JGIT_VERS unless using a snapshot
JGIT_DOC_URL = "http://download.eclipse.org/jgit/site/" + _DOC_VERS + "/apidocs"
_JGIT_REPO = GERRIT # Leave here even if set to MAVEN_CENTRAL.
# set this to use a local version.
# "/home/<user>/projects/jgit"
LOCAL_JGIT_REPO = ""
def jgit_repos():
if LOCAL_JGIT_REPO:
native.local_repository(
name = "jgit",
path = LOCAL_JGIT_REPO,
)
else:
jgit_maven_repos()
def jgit_maven_repos():
maven_jar(
name = "jgit_lib",
artifact = "org.eclipse.jgit:org.eclipse.jgit:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "3f6a1002069be91d99e1b356193aac5bbe5b3da3",
src_sha1 = "4fbbcd1e2f474917dd0ddbfef2580f474daf4dbd",
unsign = True,
)
maven_jar(
name = "jgit_servlet",
artifact = "org.eclipse.jgit:org.eclipse.jgit.http.server:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "78425749a618dd82da8dcf19ef9fd14e4318315b",
unsign = True,
)
maven_jar(
name = "jgit_archive",
artifact = "org.eclipse.jgit:org.eclipse.jgit.archive:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "884933af30be5c64187838e43764e0e19309f850",
)
maven_jar(
name = "jgit_junit",
artifact = "org.eclipse.jgit:org.eclipse.jgit.junit:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "d7c24fec0a23842a03a6eea592a07fbd1448e783",
unsign = True,
)
def jgit_dep(name):
mapping = {
"@jgit_junit//jar": "@jgit//org.eclipse.jgit.junit:junit",
"@jgit_lib//jar:src": "@jgit//org.eclipse.jgit:libjgit-src.jar",
"@jgit_lib//jar": "@jgit//org.eclipse.jgit:jgit",
"@jgit_servlet//jar":"@jgit//org.eclipse.jgit.http.server:jgit-servlet",
"@jgit_archive//jar": "@jgit//org.eclipse.jgit.archive:jgit-archive",
}
if LOCAL_JGIT_REPO:
return mapping[name]
else:
return name
| load("//tools/bzl:maven_jar.bzl", "GERRIT", "MAVEN_LOCAL", "MAVEN_CENTRAL", "maven_jar")
_JGIT_VERS = "4.9.2.201712150930-r"
_DOC_VERS = _JGIT_VERS # Set to _JGIT_VERS unless using a snapshot
JGIT_DOC_URL = "http://download.eclipse.org/jgit/site/" + _DOC_VERS + "/apidocs"
_JGIT_REPO = MAVEN_CENTRAL # Leave here even if set to MAVEN_CENTRAL.
# set this to use a local version.
# "/home/<user>/projects/jgit"
LOCAL_JGIT_REPO = ""
def jgit_repos():
if LOCAL_JGIT_REPO:
native.local_repository(
name = "jgit",
path = LOCAL_JGIT_REPO,
)
else:
jgit_maven_repos()
def jgit_maven_repos():
maven_jar(
name = "jgit_lib",
artifact = "org.eclipse.jgit:org.eclipse.jgit:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "a3a2d1df793245ebfc7322db3c2b9828ee184850",
src_sha1 = "afa9a25e5502aeeb3b93d773ee445866fb316069",
unsign = True,
)
maven_jar(
name = "jgit_servlet",
artifact = "org.eclipse.jgit:org.eclipse.jgit.http.server:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "87b4d287feff8b6b4c4f38a504460d2a3d4624f3",
unsign = True,
)
maven_jar(
name = "jgit_archive",
artifact = "org.eclipse.jgit:org.eclipse.jgit.archive:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "ce4133fb0735d454dc8f6695fe6c6d5eff18a452",
)
maven_jar(
name = "jgit_junit",
artifact = "org.eclipse.jgit:org.eclipse.jgit.junit:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "127074493f6a6ee5e6232a707d9adb523479e3bb",
unsign = True,
)
def jgit_dep(name):
mapping = {
"@jgit_junit//jar": "@jgit//org.eclipse.jgit.junit:junit",
"@jgit_lib//jar:src": "@jgit//org.eclipse.jgit:libjgit-src.jar",
"@jgit_lib//jar": "@jgit//org.eclipse.jgit:jgit",
"@jgit_servlet//jar":"@jgit//org.eclipse.jgit.http.server:jgit-servlet",
"@jgit_archive//jar": "@jgit//org.eclipse.jgit.archive:jgit-archive",
}
if LOCAL_JGIT_REPO:
return mapping[name]
else:
return name
| Python | 0 |
6a5729d566a6e75c97b67a544dd7aed9c857e6de | update attachment attributes | data_center/models.py | data_center/models.py | # -*- coding: utf-8 -*-
from datetime import datetime
from django.db import models
from django.utils.http import urlquote
attachment_url_format = 'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE/JH/output/6_6.1_6.1.12/%s.pdf' # noqa
class Course(models.Model):
"""Course database schema"""
no = models.CharField(max_length=20, blank=True)
code = models.CharField(max_length=20, blank=True)
eng_title = models.CharField(max_length=200, blank=True)
chi_title = models.CharField(max_length=200, blank=True)
note = models.TextField(blank=True)
objective = models.CharField(max_length=80, blank=True)
time = models.CharField(max_length=20, blank=True)
time_token = models.CharField(max_length=20, blank=True)
teacher = models.CharField(max_length=40, blank=True) # Only save Chinese
room = models.CharField(max_length=20, blank=True)
credit = models.IntegerField(blank=True, null=True)
limit = models.IntegerField(blank=True, null=True)
prerequisite = models.BooleanField(default=False, blank=True)
ge = models.CharField(max_length=80, blank=True)
hit = models.IntegerField(default=0)
syllabus = models.TextField(blank=True) # pure text
has_attachment = models.BooleanField(default=False) # has pdf
def __str__(self):
return self.no
@property
def attachment_url(self):
return attachment_url_format % urlquote(self.no)
class Department(models.Model):
dept_name = models.CharField(max_length=20, blank=True)
required_course = models.ManyToManyField(Course, blank=True)
def __unicode__(self):
return self.dept_name
class Announcement(models.Model):
TAG_CHOICE = (
('Info', '公告'),
('Bug', '已知問題'),
('Fix', '問題修復'),
)
content = models.TextField(blank=True)
time = models.DateTimeField(default=datetime.now)
tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info')
def __unicode__(self):
return '%s|%s' % (self.time, self.tag)
| # -*- coding: utf-8 -*-
from datetime import datetime
from django.db import models
class Course(models.Model):
"""Course database schema"""
no = models.CharField(max_length=20, blank=True)
code = models.CharField(max_length=20, blank=True)
eng_title = models.CharField(max_length=200, blank=True)
chi_title = models.CharField(max_length=200, blank=True)
note = models.TextField(blank=True)
objective = models.CharField(max_length=80, blank=True)
time = models.CharField(max_length=20, blank=True)
time_token = models.CharField(max_length=20, blank=True)
teacher = models.CharField(max_length=40, blank=True) # Only save Chinese
room = models.CharField(max_length=20, blank=True)
credit = models.IntegerField(blank=True, null=True)
limit = models.IntegerField(blank=True, null=True)
prerequisite = models.BooleanField(default=False, blank=True)
ge = models.CharField(max_length=80, blank=True)
hit = models.IntegerField(default=0)
syllabus = models.TextField(blank=True) # pure text
def __str__(self):
return self.no
class Department(models.Model):
dept_name = models.CharField(max_length=20, blank=True)
required_course = models.ManyToManyField(Course, blank=True)
def __unicode__(self):
return self.dept_name
class Announcement(models.Model):
TAG_CHOICE = (
('Info', '公告'),
('Bug', '已知問題'),
('Fix', '問題修復'),
)
content = models.TextField(blank=True)
time = models.DateTimeField(default=datetime.now)
tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info')
def __unicode__(self):
return '%s|%s' % (self.time, self.tag)
| Python | 0.000001 |
00dec661c39437e2fd031328431ab59ca428aaf3 | Fix deprecation warning regarding BaseException.message | linkedin/utils.py | linkedin/utils.py | # -*- coding: utf-8 -*-
import requests
from .exceptions import LinkedInError, get_exception_for_error_code
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
import json
def enum(enum_type='enum', base_classes=None, methods=None, **attrs):
"""
Generates a enumeration with the given attributes.
"""
# Enumerations can not be initalized as a new instance
def __init__(instance, *args, **kwargs):
raise RuntimeError('%s types can not be initialized.' % enum_type)
if base_classes is None:
base_classes = ()
if methods is None:
methods = {}
base_classes = base_classes + (object,)
for k, v in methods.iteritems():
methods[k] = classmethod(v)
attrs['enums'] = attrs.copy()
methods.update(attrs)
methods['__init__'] = __init__
return type(enum_type, base_classes, methods)
def to_utf8(st):
if isinstance(st, unicode):
return st.encode('utf-8')
else:
return bytes(st)
def raise_for_error(response):
try:
response.raise_for_status()
except (requests.HTTPError, requests.ConnectionError) as error:
try:
if len(response.content) == 0:
# There is nothing we can do here since LinkedIn has neither sent
# us a 2xx response nor a response content.
return
response = response.json()
if ('error' in response) or ('errorCode' in response):
message = '%s: %s' % (response.get('error', str(error)),
response.get('message', 'Unknown Error'))
error_code = response.get('status')
ex = get_exception_for_error_code(error_code)
raise ex(message)
else:
raise LinkedInError(error.message)
except (ValueError, TypeError):
raise LinkedInError(error.message)
HTTP_METHODS = enum('HTTPMethod', GET='GET', POST='POST',
PUT='PUT', DELETE='DELETE', PATCH='PATCH')
| # -*- coding: utf-8 -*-
import requests
from .exceptions import LinkedInError, get_exception_for_error_code
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
import json
def enum(enum_type='enum', base_classes=None, methods=None, **attrs):
"""
Generates a enumeration with the given attributes.
"""
# Enumerations can not be initalized as a new instance
def __init__(instance, *args, **kwargs):
raise RuntimeError('%s types can not be initialized.' % enum_type)
if base_classes is None:
base_classes = ()
if methods is None:
methods = {}
base_classes = base_classes + (object,)
for k, v in methods.iteritems():
methods[k] = classmethod(v)
attrs['enums'] = attrs.copy()
methods.update(attrs)
methods['__init__'] = __init__
return type(enum_type, base_classes, methods)
def to_utf8(st):
if isinstance(st, unicode):
return st.encode('utf-8')
else:
return bytes(st)
def raise_for_error(response):
try:
response.raise_for_status()
except (requests.HTTPError, requests.ConnectionError) as error:
try:
if len(response.content) == 0:
# There is nothing we can do here since LinkedIn has neither sent
# us a 2xx response nor a response content.
return
response = response.json()
if ('error' in response) or ('errorCode' in response):
message = '%s: %s' % (response.get('error', error.message),
response.get('message', 'Unknown Error'))
error_code = response.get('status')
ex = get_exception_for_error_code(error_code)
raise ex(message)
else:
raise LinkedInError(error.message)
except (ValueError, TypeError):
raise LinkedInError(error.message)
HTTP_METHODS = enum('HTTPMethod', GET='GET', POST='POST',
PUT='PUT', DELETE='DELETE', PATCH='PATCH')
| Python | 0.000013 |
7210d1d7840fb9190d616e1a59af6e9619f93835 | Add VoiceCloseReasons | litecord/enums.py | litecord/enums.py | """
enums.py - Various Enums used by litecord
"""
class OP:
"""Gateway OP codes."""
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
STATUS_UPDATE = 3
VOICE_STATE_UPDATE = 4
VOICE_SERVER_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_GUILD_MEMBERS = 8
INVALID_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
class CloseCodes:
"""Websocket close codes used by the gateway."""
UNKNOWN_ERROR = 4000
UNKNOWN_OP = 4001
DECODE_ERROR = 4002
NOT_AUTH = 4003
AUTH_FAILED = 4004
ALREADY_AUTH = 4005
INVALID_SEQ = 4007
RATE_LIMITED = 4008
SESSION_TIMEOUT = 4009
INVALID_SHARD = 4010
SHARDING_REQUIRED = 4011
CloseReasons = {
CloseCodes.UNKNOWN_OP: 'Unknown OP code',
CloseCodes.NOT_AUTH: 'Not authenticated',
CloseCodes.AUTH_FAILED: 'Failed to authenticate',
CloseCodes.ALREADY_AUTH: 'Already identified',
CloseCodes.INVALID_SEQ: 'Invalid sequence',
CloseCodes.RATE_LIMITED: 'Rate limited',
CloseCodes.SESSION_TIMEOUT: 'Session timed out',
CloseCodes.INVALID_SHARD: 'Invalid Shard',
CloseCodes.SHARDING_REQUIRED: 'Sharding required',
}
class VoiceOP:
"""Voice OP codes.
These OP codes are used in the Voice Websocket.
"""
IDENTIFY = 0
SELECT_PROTOCOL = 1
READY = 2
HEARTBEAT = 3
SESSION_DESCRIPTION = 4
SPEAKING = 5
HEARTBEAT_ACK = 6
RESUME = 7
HELLO = 8
RESUMED = 9
CLIENT_DISCONNECT = 13
class VoiceWSCloseCodes:
"""Close codes used by the Voice WebSocket."""
UNKNOWN_OP = 4001
NOT_AUTH = 4003
AUTH_FAILED = 4004
ALREADY_AUTH = 4005
INVALID_SESSION = 4006
SESSION_TIMEOUT = 4009
SERVER_NOT_FOUND = 4011
UNKNOWN_PROTOCOL = 4012
DISCONNECTED = 4014
SERVER_CRASH = 4015
UNKNOWN_ENC_MODE = 4016
VoiceCloseReasons = {
VoiceWSCloseCodes.UNKNOWN_OP: 'Unknown OP code sent',
VoiceWSCloseCodes.NOT_AUTH: 'Not authenticated..',
VoiceWSCloseCodes.AUTH_FAILED: 'Authentication failed',
VoiceWSCloseCodes.ALREADY_AUTH: 'Already Authenticated',
VoiceWSCloseCodes.INVALID_SESSION: 'Invalid session provided',
VoiceWSCloseCodes.SESSION_TIMEOUT: 'Session Timeout',
VoiceWSCloseCodes.SERVER_NOT_FOUND: 'Server not found to connect',
VoiceWSCloseCodes.UNKNOWN_PROTOCOL: 'Unknown Protocol',
VoiceWSCloseCodes.DISCONNECTED: 'Disconnected from Voice',
VoiceWSCloseCodes.SERVER_CRASH: 'Server crashed',
VoiceWSCloseCodes.UNKNOWN_ENC_CODE: 'Unknown encryption',
}
class AppType:
"""Application Type."""
BOT = 0
class ChannelType:
"""Channel Type."""
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
class MessageType:
"""Message Type.
``DEFAULT`` is the one that users can usually send.
The rest are system messages.
"""
DEFAULT = 0
RECIPIENT_ADD = 1
RECIPIENT_REMOVE = 2
CALL = 3
CHANNEL_NAME_CHANGE = 4
CHANNEL_ICON_CHANGE = 5
CHANNEL_PINNED_MESSAGE = 6
GUILD_MEMBER_JOIN = 7
| """
enums.py - Various Enums used by litecord
"""
class OP:
"""Gateway OP codes."""
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
STATUS_UPDATE = 3
VOICE_STATE_UPDATE = 4
VOICE_SERVER_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_GUILD_MEMBERS = 8
INVALID_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
class CloseCodes:
"""Websocket close codes used by the gateway."""
UNKNOWN_ERROR = 4000
UNKNOWN_OP = 4001
DECODE_ERROR = 4002
NOT_AUTH = 4003
AUTH_FAILED = 4004
ALREADY_AUTH = 4005
INVALID_SEQ = 4007
RATE_LIMITED = 4008
SESSION_TIMEOUT = 4009
INVALID_SHARD = 4010
SHARDING_REQUIRED = 4011
CloseReasons = {
CloseCodes.UNKNOWN_OP: 'Unknown OP code',
CloseCodes.NOT_AUTH: 'Not authenticated',
CloseCodes.AUTH_FAILED: 'Failed to authenticate',
CloseCodes.ALREADY_AUTH: 'Already identified',
CloseCodes.INVALID_SEQ: 'Invalid sequence',
CloseCodes.RATE_LIMITED: 'Rate limited',
CloseCodes.SESSION_TIMEOUT: 'Session timed out',
CloseCodes.INVALID_SHARD: 'Invalid Shard',
CloseCodes.SHARDING_REQUIRED: 'Sharding required',
}
class VoiceOP:
"""Voice OP codes.
These OP codes are used in the Voice Websocket.
"""
IDENTIFY = 0
SELECT_PROTOCOL = 1
READY = 2
HEARTBEAT = 3
SESSION_DESCRIPTION = 4
SPEAKING = 5
HEARTBEAT_ACK = 6
RESUME = 7
HELLO = 8
RESUMED = 9
CLIENT_DISCONNECT = 13
class VoiceWSCloseCodes:
"""Close codes used by the Voice WebSocket."""
UNKNOWN_OP = 4001
NOT_AUTH = 4003
AUTH_FAILED = 4004
ALREADY_AUTH = 4005
INVALID_SESSION = 4006
SESSION_TIMEOUT = 4009
SERVER_NOT_FOUND = 4011
UNKNOWN_PROTOCOL = 4012
DISCONNECTED = 4014
SERVER_CRASH = 4015
UNKNOWN_ENC_MODE = 4016
class AppType:
"""Application Type."""
BOT = 0
class ChannelType:
"""Channel Type."""
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
class MessageType:
"""Message Type.
``DEFAULT`` is the one that users can usually send.
The rest are system messages.
"""
DEFAULT = 0
RECIPIENT_ADD = 1
RECIPIENT_REMOVE = 2
CALL = 3
CHANNEL_NAME_CHANGE = 4
CHANNEL_ICON_CHANGE = 5
CHANNEL_PINNED_MESSAGE = 6
GUILD_MEMBER_JOIN = 7
| Python | 0.000001 |
17028a6ae567e9d67dbaa99b86a956fefdd3e792 | fix pynotify | livereload/app.py | livereload/app.py | import os
import logging
import tornado.web
import tornado.options
import tornado.ioloop
from tornado import escape
from tornado import websocket
from tornado.util import ObjectDict
from livereload.task import Task
ROOT = os.path.abspath(os.path.dirname(__file__))
STATIC_PATH = os.path.join(ROOT, 'static')
NOTIFIER = None
APPLICATION_ICON = None
def _get_growl():
import gntp.notifier
growl = gntp.notifier.GrowlNotifier(
applicationName='Python LiveReload',
notifications=['Message'],
defaultNotifications=['Message'],
applicationIcon=APPLICATION_ICON,
)
result = growl.register()
if result is not True:
return None
def notifier(message):
return growl.notify(
'Message',
'LiveReload',
message,
icon=APPLICATION_ICON,
)
return notifier
def _get_notifyOSD():
import pynotify
pynotify.init('Python LiveReload')
return lambda message: pynotify.Notification('LiveReload', message).show()
def send_notify(message):
global NOTIFIER
if NOTIFIER:
return NOTIFIER(message)
try:
NOTIFIER = _get_growl()
except ImportError:
NOTIFIER = _get_notifyOSD()
except:
NOTIFIER = logging.info
return NOTIFIER(message)
class LiveReloadHandler(websocket.WebSocketHandler):
waiters = set()
_watch_running = False
def allow_draft76(self):
return True
def on_close(self):
if self in LiveReloadHandler.waiters:
LiveReloadHandler.waiters.remove(self)
send_notify('There are %s waiters left' % len(self.waiters))
def send_message(self, message):
if isinstance(message, dict):
message = escape.json_encode(message)
try:
self.write_message(message)
except:
logging.error('Error sending message', exc_info=True)
def watch_tasks(self):
path = Task.watch()
if path:
send_notify('Reload %s waiters\nChanged %s' % \
(len(LiveReloadHandler.waiters), path))
msg = {
'command': 'reload',
'path': path,
'liveCSS': True
}
for waiter in LiveReloadHandler.waiters:
try:
waiter.write_message(msg)
except:
logging.error('Error sending message', exc_info=True)
LiveReloadHandler.waiters.remove(waiter)
def on_message(self, message):
message = ObjectDict(escape.json_decode(message))
if message.command == 'hello':
handshake = {}
handshake['command'] = 'hello'
protocols = message.protocols
protocols.append(
'http://livereload.com/protocols/2.x-remote-control'
)
handshake['protocols'] = protocols
handshake['serverName'] = 'livereload-tornado'
self.send_message(handshake)
if message.command == 'info' and 'url' in message:
send_notify('Browser Connected: %s' % message.url)
LiveReloadHandler.waiters.add(self)
if not LiveReloadHandler._watch_running:
try:
execfile('Guardfile')
except:
Task.add(os.getcwd())
LiveReloadHandler._watch_running = True
logging.info('Start watching changes')
tornado.ioloop.PeriodicCallback(self.watch_tasks, 500).start()
handlers = [
(r'/livereload', LiveReloadHandler),
(r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}),
]
def main():
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=handlers)
app.listen(35729)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
| import os
import logging
import tornado.web
import tornado.options
import tornado.ioloop
from tornado import escape
from tornado import websocket
from tornado.util import ObjectDict
from livereload.task import Task
ROOT = os.path.abspath(os.path.dirname(__file__))
STATIC_PATH = os.path.join(ROOT, 'static')
NOTIFIER = None
APPLICATION_ICON = None
def _get_growl():
import gntp.notifier
growl = gntp.notifier.GrowlNotifier(
applicationName='Python LiveReload',
notifications=['Message'],
defaultNotifications=['Message'],
applicationIcon=APPLICATION_ICON,
)
result = growl.register()
if result is not True:
return None
def notifier(message):
return growl.notify(
'Message',
'LiveReload',
message,
icon=APPLICATION_ICON,
)
return notifier
def _get_notifyOSD():
import pynotify
pynotify.init()
return lambda message: pynotify.Notification('LiveReload', message).show()
def send_notify(message):
global NOTIFIER
if NOTIFIER:
return NOTIFIER(message)
try:
NOTIFIER = _get_growl()
except ImportError:
NOTIFIER = _get_notifyOSD()
except:
NOTIFIER = logging.info
return NOTIFIER(message)
class LiveReloadHandler(websocket.WebSocketHandler):
waiters = set()
_watch_running = False
def allow_draft76(self):
return True
def on_close(self):
if self in LiveReloadHandler.waiters:
LiveReloadHandler.waiters.remove(self)
send_notify('There are %s waiters left' % len(self.waiters))
def send_message(self, message):
if isinstance(message, dict):
message = escape.json_encode(message)
try:
self.write_message(message)
except:
logging.error('Error sending message', exc_info=True)
def watch_tasks(self):
path = Task.watch()
if path:
send_notify('Reload %s waiters\nChanged %s' % \
(len(LiveReloadHandler.waiters), path))
msg = {
'command': 'reload',
'path': path,
'liveCSS': True
}
for waiter in LiveReloadHandler.waiters:
try:
waiter.write_message(msg)
except:
logging.error('Error sending message', exc_info=True)
LiveReloadHandler.waiters.remove(waiter)
def on_message(self, message):
message = ObjectDict(escape.json_decode(message))
if message.command == 'hello':
handshake = {}
handshake['command'] = 'hello'
protocols = message.protocols
protocols.append(
'http://livereload.com/protocols/2.x-remote-control'
)
handshake['protocols'] = protocols
handshake['serverName'] = 'livereload-tornado'
self.send_message(handshake)
if message.command == 'info' and 'url' in message:
send_notify('Browser Connected: %s' % message.url)
LiveReloadHandler.waiters.add(self)
if not LiveReloadHandler._watch_running:
try:
execfile('Guardfile')
except:
Task.add(os.getcwd())
LiveReloadHandler._watch_running = True
logging.info('Start watching changes')
tornado.ioloop.PeriodicCallback(self.watch_tasks, 500).start()
handlers = [
(r'/livereload', LiveReloadHandler),
(r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}),
]
def main():
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=handlers)
app.listen(35729)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
| Python | 0.000031 |
004c9c11441f59590121a5428fce29ccde3f7694 | Fix error with keyword argument | mesh_utils.py | mesh_utils.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# Booltron super add-on for super fast booleans.
# Copyright (C) 2014-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import bmesh
from mathutils import bvhtree
def delete_loose(bm):
for v in bm.verts:
if v.is_wire or not v.link_edges:
bm.verts.remove(v)
class MeshUtils:
def object_overlap(self, obs):
depsgraph = bpy.context.depsgraph
bm = bmesh.new()
for ob in obs:
me = ob.to_mesh(depsgraph, True)
me.transform(ob.matrix_world)
bm.from_mesh(me)
bpy.data.meshes.remove(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
tree = bvhtree.BVHTree.FromBMesh(bm, epsilon=0.00001)
overlap = tree.overlap(tree)
bm.free()
return bool(overlap)
def object_prepare(self):
ob1 = bpy.context.object
obs = bpy.context.selected_objects
if ob1.select_get():
obs.remove(ob1)
if self.keep_objects:
# TODO local view
# space_data = bpy.context.space_data
for ob in obs:
ob_copy = ob.copy()
ob_copy.data = ob.data.copy()
for coll in ob.users_collection:
coll.objects.link(ob_copy)
# TODO local view
# if self.local_view:
# base.layers_from_view(space_data)
ob_copy.select_set(True)
ob.select_set(False)
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.convert(target="MESH")
if self.pos_correct:
self.object_pos_correct(obs)
def mesh_prepare(self, ob, select=False):
me = ob.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
delete_loose(bm)
bmesh.ops.holes_fill(bm, edges=bm.edges)
if self.triangulate:
bmesh.ops.triangulate(bm, faces=bm.faces, quad_method="SHORT_EDGE")
for f in bm.faces:
f.select = select
bm.to_mesh(me)
bm.free()
def mesh_cleanup(self, ob):
me = ob.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
delete_loose(bm)
bm.to_mesh(me)
bm.free()
def mesh_check(self, ob):
bm = bmesh.new()
bm.from_mesh(ob.data)
for e in bm.edges:
if not e.is_manifold:
self.report({"ERROR"}, "Boolean operation result is non-manifold")
bm.free()
return True
bm.free()
return False
| # ##### BEGIN GPL LICENSE BLOCK #####
#
# Booltron super add-on for super fast booleans.
# Copyright (C) 2014-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import bmesh
from mathutils import bvhtree
def delete_loose(bm):
for v in bm.verts:
if v.is_wire or not v.link_edges:
bm.verts.remove(v)
class MeshUtils:
def object_overlap(self, obs):
depsgraph = bpy.context.depsgraph
bm = bmesh.new()
for ob in obs:
me = ob.to_mesh(depsgraph, True)
me.transform(ob.matrix_world)
bm.from_mesh(me)
bpy.data.meshes.remove(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
tree = bvhtree.BVHTree.FromBMesh(bm, epsilon=0.00001)
overlap = tree.overlap(tree)
bm.free()
return bool(overlap)
def object_prepare(self):
ob1 = bpy.context.object
obs = bpy.context.selected_objects
if ob1.select_get():
obs.remove(ob1)
if self.keep_objects:
# TODO local view
# space_data = bpy.context.space_data
for ob in obs:
ob_copy = ob.copy()
ob_copy.data = ob.data.copy()
for coll in ob.users_collection:
coll.objects.link(ob_copy)
# TODO local view
# if self.local_view:
# base.layers_from_view(space_data)
ob_copy.select_set(True)
ob.select_set(False)
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.convert(target="MESH")
if self.pos_correct:
self.object_pos_correct(obs)
def mesh_prepare(self, ob, select=False):
me = ob.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
delete_loose(bm)
bmesh.ops.holes_fill(bm, edges=bm.edges)
if self.triangulate:
bmesh.ops.triangulate(bm, faces=bm.faces, quad_method=3)
for f in bm.faces:
f.select = select
bm.to_mesh(me)
bm.free()
def mesh_cleanup(self, ob):
me = ob.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
delete_loose(bm)
bm.to_mesh(me)
bm.free()
def mesh_check(self, ob):
bm = bmesh.new()
bm.from_mesh(ob.data)
for e in bm.edges:
if not e.is_manifold:
self.report({"ERROR"}, "Boolean operation result is non-manifold")
bm.free()
return True
bm.free()
return False
| Python | 0.000017 |
36e8335bc146e4eda6801b2c148410c3ea620ae5 | Update scipy.py | wigs/scipy.py | wigs/scipy.py | class scipy(PythonWig):
tarball_uri = 'https://github.com/scipy/scipy/releases/download/v$RELEASE_VERSION$/scipy-$RELEASE_VERSION$.tar.gz'
last_release_version = 'v0.18.1'
git_uri = 'https://github.com/scipy/scipy'
dependencies = ['numpy']
optional_dependencies = ['openblas']
supported_features = ['openblas']
default_features = ['+openblas']
def setup(self):
self.site_cfg = []
def switch_openblas_on(self):
self.require('openblas')
include_dirs = map(os.path.abspath, P.prefix_include_dirs)
lib_dirs = map(os.path.abspath, P.prefix_lib_dirs)
self.site_cfg += [
'[openblas]',
'libraries = openblas',
'include_dirs = %s' % os.path.pathsep.join(include_dirs),
'library_dirs = %s' % os.path.pathsep.join(lib_dirs),
'runtime_library_dirs = %s' % os.path.pathsep.join(lib_dirs)
]
| class scipy(PythonWig):
tarball_uri = 'https://github.com/scipy/scipy/releases/download/v$RELEASE_VERSION$/scipy-$RELEASE_VERSION$.tar.gz'
last_release_version = 'v0.18.1'
git_uri = 'https://github.com/scipy/scipy'
dependencies = ['numpy']
| Python | 0.000002 |
374c386a6b2dd1ad1ba75ba70009de6c7ee3c3fc | Add process_request method to Application | restalchemy/api/applications.py | restalchemy/api/applications.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import dec
from restalchemy.api import resources
from restalchemy.api import routes
DEFAULT_CONTENT_TYPE = 'application/json'
class WSGIApp(object):
def __init__(self, route_class):
super(WSGIApp, self).__init__()
self._main_route = routes.route(route_class)
resources.ResourceMap.set_resource_map(
routes.Route.build_resource_map(route_class))
def process_request(self, req):
return self._main_route(req).do()
@dec.wsgify
def __call__(self, req):
return self.process_request(req)
Application = WSGIApp
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import dec
from restalchemy.api import resources
from restalchemy.api import routes
DEFAULT_CONTENT_TYPE = 'application/json'
class WSGIApp(object):
def __init__(self, route_class):
super(WSGIApp, self).__init__()
self._main_route = routes.route(route_class)
resources.ResourceMap.set_resource_map(
routes.Route.build_resource_map(route_class))
@dec.wsgify
def __call__(self, req):
return self._main_route(req).do()
Application = WSGIApp
| Python | 0.000002 |
b7e8af6ef92c0244bd5121c528e3e85441b0d835 | Disable test/mac/gyptest-objc-gc.py when using Xcode 5.1 | test/mac/gyptest-objc-gc.py | test/mac/gyptest-objc-gc.py | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that GC objc settings are handled correctly.
"""
import TestGyp
import TestMac
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
# Xcode 5.1 removed support for garbage-collection:
# error: garbage collection is no longer supported
if TestMac.Xcode.Version() < '0510':
CHDIR = 'objc-gc'
test.run_gyp('test.gyp', chdir=CHDIR)
build_error_code = {
'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
'make': 2,
'ninja': 1,
}[test.format]
test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code)
test.build(
'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code)
test.build('test.gyp', 'gc_req_exe', chdir=CHDIR)
test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR)
test.run_built_executable(
'gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe', chdir=CHDIR)
test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_off_exe', chdir=CHDIR)
test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n")
test.pass_test()
| #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that GC objc settings are handled correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
CHDIR = 'objc-gc'
test.run_gyp('test.gyp', chdir=CHDIR)
build_error_code = {
'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
'make': 2,
'ninja': 1,
}[test.format]
test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code)
test.build(
'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code)
test.build('test.gyp', 'gc_req_exe', chdir=CHDIR)
test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR)
test.run_built_executable('gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe', chdir=CHDIR)
test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_off_exe', chdir=CHDIR)
test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n")
test.pass_test()
| Python | 0.00004 |
da557b0b26d144095988a8809a97b83791077f20 | fix number | biblioteca/views.py | biblioteca/views.py | from django.shortcuts import render
from .models import Temas, Biblioteca
from django.shortcuts import get_object_or_404
from django.db.models import Q
# Create your views here.
def index(request,template='biblioteca/index.html',slug=None):
temas = Temas.objects.all()
ultimas_guias = Biblioteca.objects.filter(tipo_documento=1).order_by('-fecha')[:12]
return render(request, template, locals())
def buscar_guia(request, template='biblioteca/lista_guias.html'):
buscar_palabra = request.GET.get('q')
resultado = Biblioteca.objects.filter(tipo_documento=1).filter(Q(titulo__icontains=buscar_palabra) | Q(descripcion__icontains=buscar_palabra))
return render(request, template, locals())
def buscar_tema(request, template='biblioteca/lista_guias.html', id=None):
temas = Temas.objects.all()
buscar_palabra = get_object_or_404(Temas,id=id)
resultado = Biblioteca.objects.filter(tema=buscar_palabra)
return render(request, template, locals())
def detalle_guia(request,template='biblioteca/detalle.html',slug=None):
temas = Temas.objects.all()
la_guia = get_object_or_404(Biblioteca, slug=slug)
return render(request, template, locals())
| from django.shortcuts import render
from .models import Temas, Biblioteca
from django.shortcuts import get_object_or_404
from django.db.models import Q
# Create your views here.
def index(request,template='biblioteca/index.html',slug=None):
temas = Temas.objects.all()
ultimas_guias = Biblioteca.objects.filter(tipo_documento=1).order_by('-fecha')[:6]
return render(request, template, locals())
def buscar_guia(request, template='biblioteca/lista_guias.html'):
buscar_palabra = request.GET.get('q')
resultado = Biblioteca.objects.filter(tipo_documento=1).filter(Q(titulo__icontains=buscar_palabra) | Q(descripcion__icontains=buscar_palabra))
return render(request, template, locals())
def buscar_tema(request, template='biblioteca/lista_guias.html', id=None):
temas = Temas.objects.all()
buscar_palabra = get_object_or_404(Temas,id=id)
resultado = Biblioteca.objects.filter(tema=buscar_palabra)
return render(request, template, locals())
def detalle_guia(request,template='biblioteca/detalle.html',slug=None):
temas = Temas.objects.all()
la_guia = get_object_or_404(Biblioteca, slug=slug)
return render(request, template, locals())
| Python | 0.000047 |
1412c1a15f4b8b09beb4b7eb4b3245eaeb343a14 | Bump sleep time for Github API reader | src/api_readers/github_daemon.py | src/api_readers/github_daemon.py | from api_reader_daemon import APIReaderDaemon
import datetime
import time
from models import GithubRepo
from models import GithubRepoEvent
from github import Github
class GithubReaderDaemon(APIReaderDaemon):
def __init__(self, **kwargs):
# neh. don't need it.
pass
def start(self):
while True:
a_minute_ago = datetime.datetime.now() - datetime.timedelta(seconds = 60)
repos_to_read = self.session.query(GithubRepo).all()
for repo in repos_to_read:
try:
gh = Github()
e_repo = gh.get_repo(repo.gh_username + '/' + repo.gh_repo)
events = e_repo.get_events()
if events[0].created_at > a_minute_ago and events[0].type == 'PushEvent':
author = events[0].actor
commit = events[0].payload['commits'][0]['message']
new_event = GithubRepoEvent(repo.id, author.name,
author.avatar_url, commit)
self.session.add(new_event)
except:
continue
self.session.commit()
time.sleep(120)
def stop(self):
# or whatever
pass
if __name__ == '__main__':
GithubReaderDaemon().start()
| from api_reader_daemon import APIReaderDaemon
import datetime
import time
from models import GithubRepo
from models import GithubRepoEvent
from github import Github
class GithubReaderDaemon(APIReaderDaemon):
def __init__(self, **kwargs):
# neh. don't need it.
pass
def start(self):
while True:
a_minute_ago = datetime.datetime.now() - datetime.timedelta(seconds = 60)
repos_to_read = self.session.query(GithubRepo).all()
for repo in repos_to_read:
try:
gh = Github()
e_repo = gh.get_repo(repo.gh_username + '/' + repo.gh_repo)
events = e_repo.get_events()
if events[0].created_at > a_minute_ago and events[0].type == 'PushEvent':
author = events[0].actor
commit = events[0].payload['commits'][0]['message']
new_event = GithubRepoEvent(repo.id, author.name,
author.avatar_url, commit)
self.session.add(new_event)
except:
continue
self.session.commit()
time.sleep(60)
def stop(self):
# or whatever
pass
if __name__ == '__main__':
GithubReaderDaemon().start()
| Python | 0 |
d01430e40d923fdced0d753822a1f62fe69a916e | add analytics folder to path | bigbang/__init__.py | bigbang/__init__.py | from . import analysis
| Python | 0.000001 | |
17147f02abdb50f6df6398c8c3c750d858c1c758 | fix docs | doc/ext/nova_autodoc.py | doc/ext/nova_autodoc.py | import gettext
import os
gettext.install('nova')
from nova import utils
def setup(app):
rootdir = os.path.abspath(app.srcdir + '/..')
print "**Autodocumenting from %s" % rootdir
os.chdir(rootdir)
rv = utils.execute('./generate_autodoc_index.sh')
print rv[0]
| import gettext
import os
gettext.install('nova')
from nova import utils
def setup(app):
rootdir = os.path.abspath(app.srcdir + '/..')
print "**Autodocumenting from %s" % rootdir
rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir)
print rv[0]
| Python | 0.000008 |
d72a1dde759e4993f7c75764fd36668192b387e5 | Clean up middleware code | middleware.py | middleware.py | from mixcloud.speedbar.modules.base import RequestTrace
from django.utils.encoding import smart_unicode, smart_str
from django.utils.html import escapejs
from django.core.urlresolvers import reverse
from gargoyle import gargoyle
import re
HTML_TYPES = ('text/html', 'application/xhtml+xml')
METRIC_PLACEHOLDER_RE = re.compile('<span data-module="(?P<module>[^"]+)" data-metric="(?P<metric>[^"]+)"></span>')
class SpeedbarMiddleware(object):
def process_request(self, request):
RequestTrace.instance().stacktracer.root.label = '%s %s' % (request.method, request.path)
def process_response(self, request, response):
request_trace = RequestTrace.instance()
metrics = dict((key, module.get_metrics()) for key, module in request_trace.modules.items())
self.add_response_headers(response, metrics)
if hasattr(request, 'user') and request.user.is_staff:
if 'gzip' not in response.get('Content-Encoding', '') and response.get('Content-Type', '').split(';')[0] in HTML_TYPES:
# Force render of response (from lazy TemplateResponses) before speedbar is injected
if hasattr(response, 'render'):
response.render()
content = smart_unicode(response.content)
content = self.replace_templatetag_placeholders(content, metrics)
# Note: The URLs returned here do not exist at this point. The relevant data is added to the cache by a signal handler
# once all page processing is finally done. This means it is possible summary values displayed and the detailed
# break down won't quite correspond.
if gargoyle.is_active('speedbar:panel', request):
panel_url = reverse('speedbar_panel', args=[request_trace.id])
content = content.replace(
u'<script data-speedbar-panel-url-placeholder></script>',
u'<script>var _speedbar_panel_url = "%s";</script>' % (escapejs(panel_url),))
if gargoyle.is_active('speedbar:trace', request):
response['X-TraceUrl'] = reverse('speedbar_trace', args=[request_trace.id])
response.content = smart_str(content)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response
def add_response_headers(self, response, metrics):
"""
Adds all summary metrics to the response headers, so they can be stored in nginx logs if desired.
"""
def sanitize(string):
return string.title().replace(' ','-')
for module, module_values in metrics.items():
for key, value in module_values.items():
response['X-Mixcloud-%s-%s' % (sanitize(module), sanitize(key))] = value
def replace_templatetag_placeholders(self, content, metrics):
"""
The templatetags defined in this module add placeholder values which we replace with true values here. They
cannot just insert the values directly as not all processing may have happened by that point.
"""
def replace_placeholder(match):
module = match.group('module')
metric = match.group('metric')
return unicode(metrics[module][metric])
return METRIC_PLACEHOLDER_RE.sub(replace_placeholder, content)
| from mixcloud.speedbar.modules.base import RequestTrace
from django.utils.encoding import smart_unicode, smart_str
from django.utils.html import escapejs
from django.core.urlresolvers import reverse
from gargoyle import gargoyle
import re
HTML_TYPES = ('text/html', 'application/xhtml+xml')
METRIC_PLACEHOLDER_RE = re.compile('<span data-module="(?P<module>[^"]+)" data-metric="(?P<metric>[^"]+)"></span>')
class SpeedbarMiddleware(object):
def process_request(self, request):
RequestTrace.instance().stacktracer.root.label = '%s %s' % (request.method, request.path)
def process_response(self, request, response):
request_trace = RequestTrace.instance()
def sanitize(string):
return string.title().replace(' ','-')
metrics = dict((key, module.get_metrics()) for key, module in request_trace.modules.items())
for module, module_values in metrics.items():
for key, value in module_values.items():
response['X-Mixcloud-%s-%s' % (sanitize(module), sanitize(key))] = value
if hasattr(request, 'user') and request.user.is_staff:
if 'gzip' not in response.get('Content-Encoding', '') and response.get('Content-Type', '').split(';')[0] in HTML_TYPES:
# Force render of response (from lazy TemplateResponses) before speedbar is injected
if hasattr(response, 'render'):
response.render()
content = smart_unicode(response.content)
def replace_placeholder(match):
module = match.group('module')
metric = match.group('metric')
return unicode(metrics[module][metric])
content = METRIC_PLACEHOLDER_RE.sub(replace_placeholder, content)
if gargoyle.is_active('speedbar:panel', request):
panel_url = reverse('speedbar_panel', args=[request_trace.id])
content = content.replace(
u'<script data-speedbar-panel-url-placeholder></script>',
u'<script>var _speedbar_panel_url = "%s";</script>' % (escapejs(panel_url),))
if gargoyle.is_active('speedbar:trace', request):
response['X-TraceUrl'] = reverse('speedbar_trace', args=[request_trace.id])
response.content = smart_str(content)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response
| Python | 0.001611 |
14a0293224e78875e74bfc6491017d6059aa07f5 | Enhance PEP8 | bin/tftpy_server.py | bin/tftpy_server.py | #!/usr/bin/env python
# vim: ts=4 sw=4 et ai:
# -*- coding: utf8 -*-
import logging
import sys
from optparse import OptionParser
import tftpy
log = logging.getLogger('tftpy')
log.setLevel(logging.INFO)
# console handler
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
default_formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler.setFormatter(default_formatter)
log.addHandler(handler)
def main():
usage = ""
parser = OptionParser(usage=usage)
parser.add_option('-i',
'--ip',
type='string',
help='ip address to bind to (default: INADDR_ANY)',
default="")
parser.add_option('-p',
'--port',
type='int',
help='local port to use (default: 69)',
default=69)
parser.add_option('-r',
'--root',
type='string',
help='path to serve from',
default=None)
parser.add_option('-q',
'--quiet',
action='store_true',
default=False,
help="Do not log unless it is critical")
parser.add_option('-d',
'--debug',
action='store_true',
default=False,
help='upgrade logging from info to debug')
options, args = parser.parse_args()
if options.debug:
log.setLevel(logging.DEBUG)
# increase the verbosity of the formatter
debug_formatter = logging.Formatter('[%(asctime)s%(msecs)03d] %(levelname)s [%(name)s:%(lineno)s] %(message)s')
handler.setFormatter(debug_formatter)
elif options.quiet:
log.setLevel(logging.WARNING)
if not options.root:
parser.print_help()
sys.exit(1)
server = tftpy.TftpServer(options.root)
try:
server.listen(options.ip, options.port)
except tftpy.TftpException as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# vim: ts=4 sw=4 et ai:
# -*- coding: utf8 -*-
import logging
import sys
from optparse import OptionParser
import tftpy
log = logging.getLogger('tftpy')
log.setLevel(logging.INFO)
# console handler
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
default_formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler.setFormatter(default_formatter)
log.addHandler(handler)
def main():
usage=""
parser = OptionParser(usage=usage)
parser.add_option('-i',
'--ip',
type='string',
help='ip address to bind to (default: INADDR_ANY)',
default="")
parser.add_option('-p',
'--port',
type='int',
help='local port to use (default: 69)',
default=69)
parser.add_option('-r',
'--root',
type='string',
help='path to serve from',
default=None)
parser.add_option('-q',
'--quiet',
action='store_true',
default=False,
help="Do not log unless it is critical")
parser.add_option('-d',
'--debug',
action='store_true',
default=False,
help='upgrade logging from info to debug')
options, args = parser.parse_args()
if options.debug:
log.setLevel(logging.DEBUG)
# increase the verbosity of the formatter
debug_formatter = logging.Formatter('[%(asctime)s%(msecs)03d] %(levelname)s [%(name)s:%(lineno)s] %(message)s')
handler.setFormatter(debug_formatter)
elif options.quiet:
log.setLevel(logging.WARNING)
if not options.root:
parser.print_help()
sys.exit(1)
server = tftpy.TftpServer(options.root)
try:
server.listen(options.ip, options.port)
except tftpy.TftpException as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| Python | 0 |
dabbf0b5796a4d16bdd588e9d8c541c1f3c8559b | Support for building multiple images at once | src/ddocker/app/build.py | src/ddocker/app/build.py | """
"""
import logging
import pesos.scheduler
import os
import threading
import time
from pesos.vendor.mesos import mesos_pb2
from ddocker.app import subcommand
from ddocker.app.scheduler import Scheduler
from Queue import Queue
logger = logging.getLogger("ddocker.build")
def args(parser):
parser.add_argument("dockerfile", nargs="+")
parser.add_argument("--tag", action="append", default=[], dest="tags",
help="Multiple tags to apply to the image once built")
parser.add_argument("--executor-uri", dest="executor", required=True,
help="URI to the ddocker executor for mesos")
# Isolation
group = parser.add_argument_group("isolation")
group.add_argument("--cpu-limit", default=1.0,
help="CPU allocated to building the image")
group.add_argument("--mem-limit", default=256,
help="Memory allocated to building the image (mb)")
# Arguments for the staging filesystem
group = parser.add_argument_group("fs")
group.add_argument("--staging-uri", default="/tmp/ddocker",
help="The URI to use as a base directory for staging files.")
group.add_argument("--aws-access-key-id", default=os.environ.get("AWS_ACCESS_KEY_ID"),
help="Access key for using the S3 filesystem")
group.add_argument("--aws-secret-access-key", default=os.environ.get("AWS_SECRET_ACCESS_KEY"),
help="Secret key for using the S3 filesystem")
@subcommand("build", callback=args)
def main(args):
logger.info("Building docker image from %s", args.dockerfile)
task_queue = Queue()
# Launch the mesos framework
framework = mesos_pb2.FrameworkInfo()
framework.user = "" # Let mesos fill this in
framework.name = "ddocker"
if args.framework_id:
framework.id.value = args.framework_id
# Kick off the scheduler driver
scheduler = Scheduler(
task_queue,
args.executor,
args.cpu_limit,
args.mem_limit,
args
)
driver = pesos.scheduler.MesosSchedulerDriver(
scheduler, framework, args.mesos_master
)
# Put the task onto the queue
for dockerfile in args.dockerfile:
task_queue.put((dockerfile, args.tags))
thread = threading.Thread(target=driver.run)
thread.setDaemon(True)
thread.start()
# Wait here until the tasks are done
while thread.isAlive():
time.sleep(0.5)
| """
"""
import logging
import pesos.scheduler
import os
import threading
import time
from pesos.vendor.mesos import mesos_pb2
from ddocker.app import subcommand
from ddocker.app.scheduler import Scheduler
from Queue import Queue
logger = logging.getLogger("ddocker.build")
def args(parser):
parser.add_argument("dockerfile")
parser.add_argument("--tag", action="append", default=[], dest="tags",
help="Multiple tags to apply to the image once built")
parser.add_argument("--executor-uri", dest="executor", required=True,
help="URI to the ddocker executor for mesos")
# Isolation
group = parser.add_argument_group("isolation")
group.add_argument("--cpu-limit", default=1.0,
help="CPU allocated to building the image")
group.add_argument("--mem-limit", default=256,
help="Memory allocated to building the image (mb)")
# Arguments for the staging filesystem
group = parser.add_argument_group("fs")
group.add_argument("--staging-uri", default="/tmp/ddocker",
help="The URI to use as a base directory for staging files.")
group.add_argument("--aws-access-key-id", default=os.environ.get("AWS_ACCESS_KEY_ID"),
help="Access key for using the S3 filesystem")
group.add_argument("--aws-secret-access-key", default=os.environ.get("AWS_SECRET_ACCESS_KEY"),
help="Secret key for using the S3 filesystem")
@subcommand("build", callback=args)
def main(args):
logger.info("Building docker image from %s", args.dockerfile)
task_queue = Queue()
# Launch the mesos framework
framework = mesos_pb2.FrameworkInfo()
framework.user = "" # Let mesos fill this in
framework.name = "ddocker"
if args.framework_id:
framework.id.value = args.framework_id
# Kick off the scheduler driver
scheduler = Scheduler(
task_queue,
args.executor,
args.cpu_limit,
args.mem_limit,
args
)
driver = pesos.scheduler.MesosSchedulerDriver(
scheduler, framework, args.mesos_master
)
# Put the task onto the queue
task_queue.put((args.dockerfile, args.tags))
thread = threading.Thread(target=driver.run)
thread.setDaemon(True)
thread.start()
# Wait here until the tasks are done
while thread.isAlive():
time.sleep(0.5)
| Python | 0 |
70445bd32ba08b9bd88726a7551345f71ae4e630 | Improve logging, refactoring | executor/opensubmit/executor/execution.py | executor/opensubmit/executor/execution.py | '''
Functions related to command execution on the local host.
'''
from .result import Result, PassResult, FailResult
import logging
logger = logging.getLogger('opensubmit.executor')
import os, sys, platform, subprocess, signal
from threading import Timer
def kill_longrunning(config):
'''
Terminate everything under the current user account that has run too long.
This is a final safeguard if the subprocess timeout stuff is not working.
You better have no production servers running also under the current user account ...
'''
import psutil
ourpid = os.getpid()
username = psutil.Process(ourpid).username
# check for other processes running under this account
timeout = config.getint("Execution","timeout")
for proc in psutil.process_iter():
if proc.username == username and proc.pid != ourpid:
runtime = time.time() - proc.create_time
logger.debug("This user already runs %u for %u seconds." % (proc.pid, runtime))
if runtime > timeout:
logger.debug("Killing %u due to exceeded runtime." % proc.pid)
try:
proc.kill()
except Exception as e:
logger.error("ERROR killing process %d." % proc.pid)
def shell_execution(cmdline, working_dir, timeout=999999):
'''
Run given shell command in the given working directory with the given timeout.
Return according result object.
'''
got_timeout = False
# Allow code to load its own libraries
os.environ["LD_LIBRARY_PATH"]=working_dir
try:
if platform.system() == "Windows":
proc = subprocess.Popen(cmdline,
cwd=working_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
universal_newlines=True)
else:
proc = subprocess.Popen(cmdline,
cwd=working_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid,
universal_newlines=True)
output = None
try:
output, stderr = proc.communicate(timeout=timeout)
logger.debug("Process regulary finished.")
except subprocess.TimeoutExpired as e:
got_timeout = True
logger.debug("Process killed by timeout: " + str(e))
if output == None:
output = ""
except Exception:
details = str(sys.exc_info())
logger.info("Exception on process execution: " + details)
return FailResult("Internal error on execution: "+details)
logger.info("Executed {0} with error code {1}.".format(cmdline, proc.returncode))
if proc.returncode!=0:
logger.debug("Output of the failed execution:\n"+output)
dircontent = os.listdir(working_dir)
logger.debug("Working directory after execution: " + str(dircontent))
if got_timeout:
res=FailResult("Execution was terminated because it took too long (%u seconds). Output so far:\n\n%s"%(timeout,output))
else:
text = 'Execution of "{0}" ended with error code {1}.\n{2}\nDirectory content as I see it:\n{3}'.format(
' '.join(cmdline),
proc.returncode,
output,
str(dircontent))
if proc.returncode == 0:
res = PassResult(text)
else:
res = FailResult(text)
res.error_code=proc.returncode
return res
| '''
Functions related to command execution on the local host.
'''
from .submission import Submission
from .result import Result, PassResult, FailResult
import logging
logger = logging.getLogger('opensubmit.executor')
import os, sys, platform, subprocess, signal
from threading import Timer
def kill_longrunning(config):
'''
Terminate everything under the current user account that has run too long.
This is a final safeguard if the subprocess timeout stuff is not working.
You better have no production servers running also under the current user account ...
'''
import psutil
ourpid = os.getpid()
username = psutil.Process(ourpid).username
# check for other processes running under this account
timeout = config.getint("Execution","timeout")
for proc in psutil.process_iter():
if proc.username == username and proc.pid != ourpid:
runtime = time.time() - proc.create_time
logger.debug("This user already runs %u for %u seconds." % (proc.pid, runtime))
if runtime > timeout:
logger.debug("Killing %u due to exceeded runtime." % proc.pid)
try:
proc.kill()
except Exception as e:
logger.error("ERROR killing process %d." % proc.pid)
def shell_execution(cmdline, working_dir, timeout=999999):
'''
Run given shell command in the given working directory with the given timeout.
Return according result object.
'''
got_timeout = False
# Allow code to load its own libraries
os.environ["LD_LIBRARY_PATH"]=working_dir
try:
if platform.system() == "Windows":
proc = subprocess.Popen(cmdline,
cwd=working_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
universal_newlines=True)
else:
proc = subprocess.Popen(cmdline,
cwd=working_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid,
universal_newlines=True)
output = None
try:
output, stderr = proc.communicate(timeout=timeout)
logger.debug("Process regulary finished.")
except subprocess.TimeoutExpired as e:
got_timeout = True
logger.debug("Process killed by timeout: " + str(e))
if output == None:
output = ""
except Exception:
details = str(sys.exc_info())
logger.info("Exception on process execution: " + details)
return FailResult("Internal error on execution: "+details)
logger.info("Executed with error code {0}.".format(proc.returncode))
if proc.returncode!=0:
logger.debug("Output of the failed execution:\n"+output)
dircontent = os.listdir(working_dir)
logger.debug("Working directory after execution: " + str(dircontent))
if got_timeout:
res=FailResult("Execution was terminated because it took too long (%u seconds). Output so far:\n\n%s"%(timeout,output))
else:
if proc.returncode == 0:
res = PassResult()
else:
res = FailResult()
res.error_code=proc.returncode
res.stdout=output+"\n\nDirectory content as I see it:\n\n" + str(dircontent)
return res
| Python | 0.000002 |
f2eb527e7602472856f981726b91bb23bbf22a9e | Add URL decorator to dashboard app | stores/dashboard/app.py | stores/dashboard/app.py | from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext_lazy as _
from oscar.core.application import Application
from oscar.apps.dashboard.nav import register, Node
from oscar.views.decorators import staff_member_required
from stores.dashboard import views
node = Node(_('Store Manager'))
node.add_child(Node(_('Stores'), 'stores-dashboard:store-list'))
node.add_child(Node(_('Store Groups'), 'stores-dashboard:store-group-list'))
register(node, 100)
class StoresDashboardApplication(Application):
name = 'stores-dashboard'
store_list_view = views.StoreListView
store_create_view = views.StoreCreateView
store_update_view = views.StoreUpdateView
store_delete_view = views.StoreDeleteView
store_group_list_view = views.StoreGroupListView
store_group_create_view = views.StoreGroupCreateView
store_group_update_view = views.StoreGroupUpdateView
store_group_delete_view = views.StoreGroupDeleteView
def get_urls(self):
urlpatterns = patterns('',
url(r'^$', self.store_list_view.as_view(), name='store-list'),
url(
r'^create/$',
self.store_create_view.as_view(),
name='store-create'
),
url(
r'^update/(?P<pk>[\d]+)/$',
self.store_update_view.as_view(),
name='store-update'
),
url(
r'^delete/(?P<pk>[\d]+)/$',
self.store_delete_view.as_view(),
name='store-delete'
),
url(
r'^groups/$',
self.store_group_list_view.as_view(),
name='store-group-list'
),
url(
r'^groups/create/$',
self.store_group_create_view.as_view(),
name='store-group-create'
),
url(
r'^groups/update/(?P<pk>[\d]+)/$',
self.store_group_update_view.as_view(),
name='store-group-update'
),
url(
r'^groups/delete/(?P<pk>[\d]+)/$',
self.store_group_delete_view.as_view(),
name='store-group-delete'
),
)
return self.post_process_urls(urlpatterns)
def get_url_decorator(self, url_name):
return staff_member_required
application = StoresDashboardApplication()
| from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext_lazy as _
from oscar.core.application import Application
from oscar.apps.dashboard.nav import register, Node
from stores.dashboard import views
node = Node(_('Store Manager'))
node.add_child(Node(_('Stores'), 'stores-dashboard:store-list'))
node.add_child(Node(_('Store Groups'), 'stores-dashboard:store-group-list'))
register(node, 100)
class StoresDashboardApplication(Application):
name = 'stores-dashboard'
store_list_view = views.StoreListView
store_create_view = views.StoreCreateView
store_update_view = views.StoreUpdateView
store_delete_view = views.StoreDeleteView
store_group_list_view = views.StoreGroupListView
store_group_create_view = views.StoreGroupCreateView
store_group_update_view = views.StoreGroupUpdateView
store_group_delete_view = views.StoreGroupDeleteView
def get_urls(self):
urlpatterns = patterns('',
url(r'^$', self.store_list_view.as_view(), name='store-list'),
url(
r'^create/$',
self.store_create_view.as_view(),
name='store-create'
),
url(
r'^update/(?P<pk>[\d]+)/$',
self.store_update_view.as_view(),
name='store-update'
),
url(
r'^delete/(?P<pk>[\d]+)/$',
self.store_delete_view.as_view(),
name='store-delete'
),
url(
r'^groups/$',
self.store_group_list_view.as_view(),
name='store-group-list'
),
url(
r'^groups/create/$',
self.store_group_create_view.as_view(),
name='store-group-create'
),
url(
r'^groups/update/(?P<pk>[\d]+)/$',
self.store_group_update_view.as_view(),
name='store-group-update'
),
url(
r'^groups/delete/(?P<pk>[\d]+)/$',
self.store_group_delete_view.as_view(),
name='store-group-delete'
),
)
return self.post_process_urls(urlpatterns)
application = StoresDashboardApplication()
| Python | 0 |
fd2b4f1d536aec9e92a8b793eb2294c0a935bc35 | add cb and co for pusher | btspusher/pusher.py | btspusher/pusher.py | # -*- coding: utf-8 -*-
import asyncio
from autobahn.asyncio.wamp import ApplicationSession
from autobahn.wamp import auth
from btspusher.wamp import ApplicationRunner
class PusherComponent(ApplicationSession):
future = None # a future from asyncio
instance = None
login_info = None
cb = None
co = None
@staticmethod
def login(login_info):
PusherComponent.login_info = login_info
@asyncio.coroutine
def onJoin(self, details):
print("join")
PusherComponent.instance = self
if self.future:
self.future.set_result(1)
self.future = None
if self.cb:
self.cb(self)
if self.co:
yield from self.co(self)
def onConnect(self):
print("connected")
if self.login_info:
self.join(self.config.realm, [u"wampcra"], self.login_info["user"])
else:
self.join(self.config.realm)
def onChallenge(self, challenge):
key = self.login_info["password"].encode('utf8')
signature = auth.compute_wcs(
key, challenge.extra['challenge'].encode('utf8'))
return signature.decode('ascii')
def onLeave(self, details):
print("session left")
def onDisconnect(self):
PusherComponent.instance = None
print("lost connect")
class Pusher(object):
def __init__(
self, loop, login_info=None, co=None, cb=None):
url = u"wss://pusher.btsbots.com/ws"
realm = u"realm1"
try:
if login_info:
PusherComponent.login(login_info)
PusherComponent.future = asyncio.Future()
PusherComponent.co = co
PusherComponent.cb = cb
runner = ApplicationRunner(url, realm)
runner.run(PusherComponent)
loop.run_until_complete(
asyncio.wait_for(PusherComponent.future, 10))
except Exception:
print("can't connect to pusher.btsbots.com")
def publish(self, *args, **kwargs):
kwargs["__t"] = args[0]
if PusherComponent.instance:
PusherComponent.instance.publish(*args, **kwargs)
def sync_subscribe(self, *args, **kwargs):
if PusherComponent.instance:
asyncio.wait(PusherComponent.instance.subscribe(*args, **kwargs))
@asyncio.coroutine
def subscribe(self, *args, **kwargs):
if PusherComponent.instance:
yield from PusherComponent.instance.subscribe(*args, **kwargs)
def sync_call(self, *args, **kwargs):
if PusherComponent.instance:
asyncio.wait(PusherComponent.instance.call(*args, **kwargs))
@asyncio.coroutine
def call(self, *args, **kwargs):
if PusherComponent.instance:
yield from PusherComponent.instance.call(*args, **kwargs)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
bts_pusher = Pusher(loop)
def on_event(i):
print("Got event: {}".format(i))
# bts_pusher.sync_subscribe(on_event, "public.test")
bts_pusher.publish("public.test", "hello", a="bb")
loop.run_forever()
loop.close()
| # -*- coding: utf-8 -*-
import asyncio
from autobahn.asyncio.wamp import ApplicationSession
from autobahn.wamp import auth
from btspusher.wamp import ApplicationRunner
class PusherComponent(ApplicationSession):
future = None # a future from asyncio
instance = None
login_info = None
@staticmethod
def login(login_info):
PusherComponent.login_info = login_info
@asyncio.coroutine
def onJoin(self, details):
print("join")
if self.future:
self.future.set_result(1)
self.future = None
PusherComponent.instance = self
def onConnect(self):
print("connected")
if self.login_info:
self.join(self.config.realm, [u"wampcra"], self.login_info["user"])
else:
self.join(self.config.realm)
def onChallenge(self, challenge):
key = self.login_info["password"].encode('utf8')
signature = auth.compute_wcs(
key, challenge.extra['challenge'].encode('utf8'))
return signature.decode('ascii')
def onLeave(self, details):
print("session left")
def onDisconnect(self):
PusherComponent.instance = None
print("lost connect")
class Pusher(object):
def __init__(self, loop, login_info=None):
url = u"wss://pusher.btsbots.com/ws"
realm = u"realm1"
try:
if login_info:
PusherComponent.login(login_info)
PusherComponent.future = asyncio.Future()
runner = ApplicationRunner(url, realm)
runner.run(PusherComponent)
loop.run_until_complete(
asyncio.wait_for(PusherComponent.future, 10))
except Exception:
print("can't connect to pusher.btsbots.com")
def publish(self, *args, **kwargs):
kwargs["__t"] = args[0]
if PusherComponent.instance:
PusherComponent.instance.publish(*args, **kwargs)
def sync_subscribe(self, *args, **kwargs):
if PusherComponent.instance:
asyncio.wait(PusherComponent.instance.subscribe(*args, **kwargs))
@asyncio.coroutine
def subscribe(self, *args, **kwargs):
if PusherComponent.instance:
yield from PusherComponent.instance.subscribe(*args, **kwargs)
def sync_call(self, *args, **kwargs):
if PusherComponent.instance:
asyncio.wait(PusherComponent.instance.call(*args, **kwargs))
@asyncio.coroutine
def call(self, *args, **kwargs):
if PusherComponent.instance:
yield from PusherComponent.instance.call(*args, **kwargs)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
bts_pusher = Pusher(loop)
def on_event(i):
print("Got event: {}".format(i))
# bts_pusher.sync_subscribe(on_event, "public.test")
bts_pusher.publish("public.test", "hello", a="bb")
loop.run_forever()
loop.close()
| Python | 0 |
4fe36d96d3810b39fcd15dee87318763d0d277a9 | remove time | streamteam/io/nbody6.py | streamteam/io/nbody6.py | # coding: utf-8
""" Class for reading data from NBODY6 simulations """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import logging
import re
# Third-party
import numpy as np
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
# Project
from .core import NBodyReader
# Create logger
logger = logging.getLogger(__name__)
__all__ = ["NBODY6Reader"]
class NBODY6Reader(NBodyReader):
def _read_units(self):
""" """
units = dict(length=u.pc,
speed=u.km/u.s,
dimensionless=u.dimensionless_unscaled)
return units
def read_snapshot(self, filename, units=None):
""" Given a filename, read and return the data. By default,
returns data in simulation units, but this can be changed with
the `units` kwarg.
Parameters
----------
filename : str
The name of the shapshot file to read.
units : dict (optional)
A unit system to transform the data to. If None, will return
the data in simulation units.
"""
# read the first line to get the numer of particles and timestep
fullpath = os.path.join(self.path, filename)
# column names for SNAP file, in simulation units
colnames = "id x y z vx vy vz".split()
coltypes = "dimensionless length length length speed speed speed".split()
colunits = [self.sim_units[x] for x in coltypes]
data = np.genfromtxt(fullpath, skiprows=1, names=colnames)
if units is not None:
new_colunits = []
for colname,colunit in zip(colnames,colunits):
newdata = (data[colname]*colunit).decompose(units)
data[colname] = newdata.value
new_colunits.append(newdata.unit)
colunits = new_colunits
tbl = Table(data)
for colname,colunit in zip(colnames,colunits):
tbl[colname].unit = colunit
return tbl
| # coding: utf-8
""" Class for reading data from NBODY6 simulations """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import logging
import re
# Third-party
import numpy as np
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
# Project
from .core import NBodyReader
# Create logger
logger = logging.getLogger(__name__)
__all__ = ["NBODY6Reader"]
class NBODY6Reader(NBodyReader):
def _read_units(self):
""" """
units = dict(length=u.pc,
speed=u.km/u.s,
dimensionless=u.dimensionless_unscaled)
return units
def read_snapshot(self, filename, units=None):
""" Given a filename, read and return the data. By default,
returns data in simulation units, but this can be changed with
the `units` kwarg.
Parameters
----------
filename : str
The name of the shapshot file to read.
units : dict (optional)
A unit system to transform the data to. If None, will return
the data in simulation units.
"""
# read the first line to get the numer of particles and timestep
fullpath = os.path.join(self.path, filename)
# column names for SNAP file, in simulation units
colnames = "id x y z vx vy vz".split()
coltypes = "dimensionless length length length speed speed speed".split()
colunits = [self.sim_units[x] for x in coltypes]
data = np.genfromtxt(fullpath, skiprows=1, names=colnames)
if units is not None:
new_colunits = []
for colname,colunit in zip(colnames,colunits):
newdata = (data[colname]*colunit).decompose(units)
data[colname] = newdata.value
new_colunits.append(newdata.unit)
time = time.decompose(units)
colunits = new_colunits
tbl = Table(data, meta=dict(time=time.value))
for colname,colunit in zip(colnames,colunits):
tbl[colname].unit = colunit
return tbl
| Python | 0.99805 |
73fbfd435c849c0690121b0a3fc8545057247c8a | Fix command options issues | mistral_actions/client/shell.py | mistral_actions/client/shell.py | import sys
from mistral_actions.client import actions as actions_cli
import mistral_actions.utils as utils
def do_clear(args):
"""Unregister all actions from Mistral."""
actions_cli.unregister_all()
print("All actions are removed from Mistral successfully.")
@utils.arg(
'--override',
dest='override',
action="store_true",
default=False,
help="Set true will override all actions exist in Mistral.")
def do_register(args):
"""Register all actions to Mistral."""
override = args.override
try:
sys.argv.remove("--override")
except:
pass
registered_actions = actions_cli.get_all_registered()
discovered_actions = actions_cli.discover()
registered_action_names = [a['name'] for a in registered_actions]
discovered_action_names = [a['name'] for a in discovered_actions]
intersection = set(registered_action_names) & set(discovered_action_names)
if override:
for name in intersection:
actions_cli.unregister(name)
else:
discovered_actions = filter(
lambda a: a['name'] not in registered_action_names,
discovered_actions)
if len(discovered_actions):
try:
actions_cli.register_all(discovered_actions)
print("Follow actions have been registered: ")
for action in discovered_actions:
print(action['name'])
except Exception as ex:
print("Fail to register actions: %s" % ex)
else:
print("No action need to register.")
def do_discover(args):
"""Discover all actions from this project."""
discovered_actions = actions_cli.discover()
fileds = ['name', 'description', 'input_str']
print("Follow actions discovered: ")
utils.print_list(discovered_actions, fileds, sortby_index=0)
@utils.arg('name', metavar='<name>', help='Name of action.')
def do_unregister(args):
"""Unregister a action from Mistral."""
name = args.name
sys.argv.remove(name)
actions_cli.unregister(name)
def do_markdown_dump(args):
"""Dump all discovered actions to stdout as markdown table."""
sorted_actions = sorted(actions_cli.discover(), key=lambda a: a['name'])
fileds = ['name', 'description', 'input_str']
utils.dump_as_markdown_table(sorted_actions, fileds)
def do_action_list(args):
"""List all actions have been registered in Mistral."""
actions = actions_cli.get_all_registered()
fileds = ['name', 'description', 'input_str']
utils.print_list(actions, fileds, sortby_index=0)
| import sys
from mistral_actions.client import actions as actions_cli
import mistral_actions.utils as utils
def do_clear(args):
"""Unregister all actions from Mistral."""
actions_cli.unregister_all()
print("All actions are removed from Mistral successfully.")
@utils.arg(
'--override',
dest='override',
action="store_true",
default=False,
help="Set true will override all actions exist in Mistral.")
def do_register(args):
"""Register all actions to Mistral."""
registered_actions = actions_cli.get_all_registered()
discovered_actions = actions_cli.discover()
registered_action_names = [a['name'] for a in registered_actions]
discovered_action_names = [a['name'] for a in discovered_actions]
intersection = set(registered_action_names) & set(discovered_action_names)
if args.override:
for name in intersection:
actions_cli.unregister(name)
else:
discovered_actions = filter(
lambda a: a['name'] not in registered_action_names,
discovered_actions)
actions_cli.register_all(discovered_actions)
def do_discover(args):
"""Discover all actions from this project."""
discovered_actions = actions_cli.discover()
fileds = ['name', 'description', 'input_str']
print("Follow actions discovered: ")
utils.print_list(discovered_actions, fileds, sortby_index=0)
@utils.arg('name', metavar='<name>', help='Name of action.')
def do_unregister(args):
"""Unregister a action from Mistral."""
name = args.name
sys.argv.remove(name)
actions_cli.unregister(name)
def do_md_dump(args):
"""Dump all discovered actions to stdout."""
sorted_actions = sorted(actions_cli.discover(), key=lambda a: a['name'])
fileds = ['name', 'description', 'input_str']
utils.dump_as_markdown_table(sorted_actions, fileds)
def do_action_list(args):
"""List all actions has been registered in Mistral."""
actions = actions_cli.get_all_registered()
fileds = ['name', 'description', 'input_str']
utils.print_list(actions, fileds, sortby_index=0)
| Python | 0.000017 |
f846f58891e1389941f008e3f53c95ffd1b6558d | Update to add email functionality based on threshold checking. | dbtracker/__init__.py | dbtracker/__init__.py | import logging
from dbtracker.cli import Cli
import argparse
def main(argv=None):
parser = argparse.ArgumentParser(
description="Queries MySQL and PostgreSQL for stats")
parser.add_argument(
"-S", "--save",
action="store_true",
help="generate and save database stats")
parser.add_argument(
"-g", "--growth",
help="display a graph of the growth. Arguments in the form of run number ranges e.g. 3-4 or 4",
type=str)
parser.add_argument(
"-H", "--history",
help="List the datetime stamps of the last n saved runs",
type=int)
parser.add_argument(
"-c", "--count",
action="store_true",
help="Gets database row counts but does not save")
parser.add_argument(
"-d", "--dates",
type=str,
help="compares two datetime stamps e.g. 2015-04-24 16:18:57.166095-07:00 - 2015-04-22 17:00:50.746688-07:00")
parser.add_argument(
"-s", "--silent",
action="store_true",
help="turns logging levels down to ERROR only")
parser.add_argument(
"--min",
type=int,
help="Minimum threshold of a database row change, before a notification is sent.")
parser.add_argument(
"--max",
type=int,
help="Maximum threshold of a database row change, before a notification is sent.")
parser.add_argument(
"-C", "--config",
type=str,
help="use a custom configuration file path")
args = parser.parse_args(argv)
if args.silent:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
cli = Cli(args)
cli.main()
| import logging
from dbtracker.cli import Cli
import argparse
def main(argv=None):
parser = argparse.ArgumentParser(
description="Queries MySQL and PostgreSQL for stats")
parser.add_argument(
"-S", "--save",
action="store_true",
help="generate and save database stats")
parser.add_argument(
"-g", "--growth",
help="display a graph of the growth. Arguments in the form of run number ranges e.g. 3-4 or 4",
type=str)
parser.add_argument(
"-H", "--history",
help="List the datetime stamps of the last n saved runs",
type=int)
parser.add_argument(
"-c", "--count",
action="store_true",
help="Gets database row counts but does not save")
parser.add_argument(
"-d", "--dates",
type=str,
help="compares two datetime stamps e.g. 2015-04-24 16:18:57.166095-07:00 - 2015-04-22 17:00:50.746688-07:00")
parser.add_argument(
"-s", "--silent",
action="store_true",
help="turns logging levels down to ERROR only")
parser.add_argument(
"-C", "--config",
type=str,
help="use a custom configuration file path")
args = parser.parse_args(argv)
if args.silent:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
cli = Cli(args)
cli.main()
| Python | 0 |
516844b2d34da22a4ad567ba25f900e1f747327c | exclude unversioned protos (#2672) | Logging/synth.py | Logging/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
library = gapic.php_library(
service='logging',
version='v2',
config_path='/google/logging/artman_logging.yaml',
artman_output_name='google-cloud-logging-v2')
# copy all src including partial veneer classes
s.move(library / 'src')
# copy proto files to src also
s.move(
library / 'proto/src/Google/Cloud/Logging',
'src/',
[library / 'proto/src/Google/Cloud/Logging/Type'])
s.move(library / 'tests/')
# copy GPBMetadata file to metadata
s.move(
library / 'proto/src/GPBMetadata/Google/Logging',
'metadata/',
[library / 'proto/src/GPBMetadata/Google/Logging/Type'])
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# fix year
s.replace(
'**/Gapic/*GapicClient.php',
r'Copyright \d{4}',
r'Copyright 2016')
for client in ['ConfigServiceV2', 'LoggingServiceV2', 'MetricsServiceV2']:
s.replace(
f'**/V2/{client}Client.php',
r'Copyright \d{4}',
'Copyright 2016')
s.replace(
'tests/**/V2/*Test.php',
r'Copyright \d{4}',
r'Copyright 2018')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
| # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
library = gapic.php_library(
service='logging',
version='v2',
config_path='/google/logging/artman_logging.yaml',
artman_output_name='google-cloud-logging-v2')
# copy all src including partial veneer classes
s.move(library / 'src')
# copy proto files to src also
s.move(library / 'proto/src/Google/Cloud/Logging', 'src/')
s.move(library / 'tests/')
# copy GPBMetadata file to metadata
s.move(library / 'proto/src/GPBMetadata/Google/Logging', 'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# fix year
s.replace(
'**/Gapic/*GapicClient.php',
r'Copyright \d{4}',
r'Copyright 2016')
for client in ['ConfigServiceV2', 'LoggingServiceV2', 'MetricsServiceV2']:
s.replace(
f'**/V2/{client}Client.php',
r'Copyright \d{4}',
'Copyright 2016')
s.replace(
'tests/**/V2/*Test.php',
r'Copyright \d{4}',
r'Copyright 2018')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
| Python | 0 |
838d8c8952f63464dfafaaeba3b16b681317c15e | add plot | tests/test_annotate.py | tests/test_annotate.py | import matplotlib.pyplot as plt
import numpy as np
def plot():
fig = plt.figure(1, figsize=(8, 5))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1, 5), ylim=(-4, 3))
t = np.arange(0.0, 5.0, 0.2)
s = np.cos(2 * np.pi * t)
ax.plot(t, s, color="blue")
ax.annotate(
"text",
xy=(4.0, 1.0),
xycoords="data",
xytext=(4.5, 1.5),
textcoords="data",
arrowprops=dict(arrowstyle="->", ec="r"),
)
ax.annotate(
"arrowstyle",
xy=(0, 1),
xycoords="data",
xytext=(-50, 30),
textcoords="offset points",
arrowprops=dict(arrowstyle="->"),
)
ax.annotate(
"no arrow",
xy=(0, 1),
xycoords="data",
xytext=(50, -30),
textcoords="offset pixels",
)
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, __file__[:-3] + "_reference.tex")
if __name__ == "__main__":
plot()
plt.show()
| import matplotlib.pyplot as plt
import numpy as np
def plot():
fig = plt.figure(1, figsize=(8, 5))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1, 5), ylim=(-4, 3))
t = np.arange(0.0, 5.0, 0.2)
s = np.cos(2 * np.pi * t)
ax.plot(t, s, color="blue")
ax.annotate(
"text",
xy=(4.0, 1.0),
xycoords="data",
xytext=(4.5, 1.5),
textcoords="data",
arrowprops=dict(arrowstyle="->", ec="r"),
)
ax.annotate(
"arrowstyle",
xy=(0, 1),
xycoords="data",
xytext=(-50, 30),
textcoords="offset points",
arrowprops=dict(arrowstyle="->"),
)
ax.annotate(
"no arrow",
xy=(0, 1),
xycoords="data",
xytext=(50, -30),
textcoords="offset pixels",
)
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, __file__[:-3] + "_reference.tex")
| Python | 0.000094 |
8e10a62052f252c21c3898f70fc10d23c7261af0 | Update urls.py | submify/submify/urls.py | submify/submify/urls.py | """submify URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples::
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
url(r'^student/', include('student.urls'))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| """submify URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
url(r'^student/', include('student.urls'))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| Python | 0.000002 |
cfb68d7e1146241b9783d82d09f7f813e658d4aa | fix doctests | tests/test_doctests.py | tests/test_doctests.py | # encoding: utf8
from quantiphy import Quantity
import pytest
import doctest
import glob
import sys
def test_README():
if sys.version_info < (3, 6):
# code used in doctests assumes python3.6
return
Quantity.reset_prefs()
rv = doctest.testfile('../README.rst', optionflags=doctest.ELLIPSIS)
assert rv.failed == 0
assert rv.attempted == 29
def test_quantiphy():
if sys.version_info < (3, 6):
# code used in doctests assumes python3.6
return
Quantity.reset_prefs()
rv = doctest.testfile('../quantiphy.py', optionflags=doctest.ELLIPSIS)
assert rv.failed == 0
assert rv.attempted == 100
# this target should be undated when the number of doctests change
def test_manual():
if sys.version_info < (3, 6):
# code used in doctests assumes python3.6
return
Quantity.reset_prefs()
expected_test_count = {
'../doc/index.rst': 31,
'../doc/user.rst': 368,
'../doc/api.rst': 0,
'../doc/examples.rst': 36,
'../doc/accessories.rst': 12,
'../doc/releases.rst': 0,
}
found = glob.glob('../doc/*.rst')
for f in found:
assert f in expected_test_count, f
for path, tests in expected_test_count.items():
rv = doctest.testfile(path, optionflags=doctest.ELLIPSIS)
assert rv.failed == 0, path
assert rv.attempted == tests, path
if __name__ == '__main__':
# As a debugging aid allow the tests to be run on their own, outside pytest.
# This makes it easier to see and interpret and textual output.
defined = dict(globals())
for k, v in defined.items():
if callable(v) and k.startswith('test_'):
print()
print('Calling:', k)
print((len(k)+9)*'=')
v()
| # encoding: utf8
from quantiphy import Quantity
import pytest
import doctest
import glob
import sys
def test_README():
if sys.version_info < (3, 6):
# code used in doctests assumes python3.6
return
Quantity.reset_prefs()
rv = doctest.testfile('../README.rst', optionflags=doctest.ELLIPSIS)
assert rv.failed == 0
assert rv.attempted == 29
def test_quantiphy():
if sys.version_info < (3, 6):
# code used in doctests assumes python3.6
return
Quantity.reset_prefs()
rv = doctest.testfile('../quantiphy.py', optionflags=doctest.ELLIPSIS)
assert rv.failed == 0
assert rv.attempted == 100
# this target should be undated when the number of doctests change
def test_manual():
if sys.version_info < (3, 6):
# code used in doctests assumes python3.6
return
Quantity.reset_prefs()
expected_test_count = {
'../doc/index.rst': 29,
'../doc/user.rst': 368,
'../doc/api.rst': 0,
'../doc/examples.rst': 36,
'../doc/accessories.rst': 12,
'../doc/releases.rst': 0,
}
found = glob.glob('../doc/*.rst')
for f in found:
assert f in expected_test_count, f
for path, tests in expected_test_count.items():
rv = doctest.testfile(path, optionflags=doctest.ELLIPSIS)
assert rv.failed == 0, path
assert rv.attempted == tests, path
if __name__ == '__main__':
# As a debugging aid allow the tests to be run on their own, outside pytest.
# This makes it easier to see and interpret and textual output.
defined = dict(globals())
for k, v in defined.items():
if callable(v) and k.startswith('test_'):
print()
print('Calling:', k)
print((len(k)+9)*'=')
v()
| Python | 0.000001 |
72f7162b2a307297798dbeb866d54de5acfdeffb | correct input dimension comment | models/alexnet_14/alexNet_14.py | models/alexnet_14/alexNet_14.py | # The Model of DeepVO
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras import backend as K #enable tensorflow functions
#AlexNet with batch normalization in Keras
#input image is 128x128
def create_model():
"""
This model is designed to take in multiple inputs and give multiple outputs.
Here is what the network was designed for:
Inputs:
two 128x128 RGB images stacked (RGBRGB)
Outputs:
Rotation between images in quaternion form
Translation between two images
"""
main_input = Convolution2D(96, 11, 11, border_mode='same', input_shape=(128, 128, 6), name='main_input')
x = BatchNormalization()(main_input)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(11, 11), strides=(1, 1), border_mode='same')(x)
x = Convolution2D(384, 3, 3, border_mode='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same')(x)
x = Flatten()(x)
x = Dense(4096, init='normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(4096, init='normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Delta rotation in quaternion form
quaternion_rotation = Dense(4, activation='tanh', name='quaternion_rotation')(x)
quaternion_rotation = Lambda(normalize_quaternion)(quaternion_rotation)
# Delta Translation output
translation = Dense(3, activation='linear', name='translation')
model = Model(input=main_input, output=[translation, quaternion_rotation])
return model
def normalize_quaternion(x):
"use tensorflow normalize function on this layer to ensure valid quaternion rotation"
x = K.l2_normalize(x, dim=1)
return x
def run_model(model, Xtr, Ytr, Xte, Yte, save_path=None):
"Note: y should be [[translation],[quat rotation]]
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error'])
history = model.fit(Xtr, Ytr, batch_size=8, nb_epoch=30, verbose=1).history
score = model.evaluate(Xte, Yte, verbose=1)
if (save_path != None):
model.save(save_path)
return score, history
| # The Model of DeepVO
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras import backend as K #enable tensorflow functions
#AlexNet with batch normalization in Keras
#input image is 224x224
def create_model():
"""
This model is designed to take in multiple inputs and give multiple outputs.
Here is what the network was designed for:
Inputs:
two 128x128 RGB images stacked (RGBRGB)
Outputs:
Rotation between images in quaternion form
Translation between two images
"""
main_input = Convolution2D(96, 11, 11, border_mode='same', input_shape=(128, 128, 6), name='main_input')
x = BatchNormalization()(main_input)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(11, 11), strides=(1, 1), border_mode='same')(x)
x = Convolution2D(384, 3, 3, border_mode='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same')(x)
x = Flatten()(x)
x = Dense(4096, init='normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(4096, init='normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Delta rotation in quaternion form
quaternion_rotation = Dense(4, activation='tanh', name='quaternion_rotation')(x)
quaternion_rotation = Lambda(normalize_quaternion)(quaternion_rotation)
# Delta Translation output
translation = Dense(3, activation='linear', name='translation')
model = Model(input=main_input, output=[translation, quaternion_rotation])
return model
def normalize_quaternion(x):
"use tensorflow normalize function on this layer to ensure valid quaternion rotation"
x = K.l2_normalize(x, dim=1)
return x
def run_model(model, Xtr, Ytr, Xte, Yte, save_path=None):
"Note: y should be a 2d list of quaternion rotations and translations.""
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error'])
history = model.fit(Xtr, Ytr, batch_size=8, nb_epoch=30, verbose=1).history
score = model.evaluate(Xte, Yte, verbose=1)
if (save_path != None):
model.save(save_path)
return score, history
| Python | 0.000004 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.