commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
2804024fbee6b825dec512ff13d7b28a1fee5b25 | Add root Api object. | routeros_api/api.py | routeros_api/api.py | import hashlib
import binascii
from routeros_api import api_communicator
from routeros_api import api_socket
from routeros_api import base_api
def connect(host, username='admin', password='', port=8728):
socket = api_socket.get_socket(host, port)
base = base_api.Connection(socket)
communicator = api_communicator.ApiCommunicator(base)
login(communicator, username, password)
return RouterOsApi(communicator)
def login(communicator, login, password):
communicator.send_command('/', 'login')
response = communicator.receive_single_response()
token = binascii.unhexlify(response.attributes['ret'])
hasher = hashlib.md5()
hasher.update(b'\x00')
hasher.update(password.encode())
hasher.update(token)
hashed = b'00' + hasher.hexdigest().encode('ascii')
communicator.call('/', 'login', {'name': login, 'response': hashed})
class RouterOsApi(object):
def __init__(self, communicator):
self.communicator = communicator
def get_resource(self, path):
return RouterOsResource(self.communicator, path)
def get_binary_resource(self, path):
return RouterOsResource(self.communicator, path, binary=True)
class RouterOsResource(object):
def __init__(self, communicator, path, binary=False):
self.communicator = communicator
self.path = path
self.binary = binary
def get(self, **kwargs):
return self.call('print', {}, kwargs)
def get_async(self, **kwargs):
return self.call_async('print', {}, kwargs)
def detailed_get(self, **kwargs):
return self.call('print', {'detail': ''}, kwargs)
def detailed_get_async(self, **kwargs):
return self.call_async('print', {'detail': ''}, kwargs)
def set(self, **kwargs):
return self.call('set', kwargs)
def set_async(self, **kwargs):
return self.call('set', kwargs)
def add(self, **kwargs):
return self.call('add', kwargs)
def add_async(self, **kwargs):
return self.call_async('add', kwargs)
def remove(self, **kwargs):
return self.call('remove', kwargs)
def remove_async(self, **kwargs):
return self.call_async('remove', kwargs)
def call(self, command, arguments=None, queries=None,
additional_queries=()):
return self.communicator.call(
self.path, command, arguments=arguments, queries=queries,
additional_queries=additional_queries, binary=self.binary)
def call_async(self, command, arguments=None, queries=None,
additional_queries=()):
return self.communicator.call_async(
self.path, command, arguments=arguments, queries=queries,
additional_queries=additional_queries, binary=self.binary)
| Python | 0 | |
52f715af4b1cf6dd964e71cafdf807d1133fe717 | add a basic script that tests nvlist_in and nvlist_out functionality | tests/test_nvlist.py | tests/test_nvlist.py | import json
import math
from libzfs_core.nvlist import *
from libzfs_core.nvlist import _lib
props_in = {
"key1": "str",
"key2": 10,
"key3": {
"skey1": True,
"skey2": None,
"skey3": [
True,
False,
True
]
},
"key4": [
"ab",
"bc"
],
"key5": [
int(math.pow(2, 62)),
1,
2,
3
],
"key6": [
uint32_t(10),
uint32_t(11)
],
"key7": [
{
"skey71": "a",
"skey72": "b",
},
{
"skey71": "c",
"skey72": "d",
},
{
"skey71": "e",
"skey72": "f",
}
]
}
props_out = {}
with nvlist_in(props_in) as x:
print "Dumping a C nvlist_t produced from a python dictionary:"
_lib.dump_nvlist(x, 2)
with nvlist_out(props_out) as y:
_lib.nvlist_dup(x, y, 0)
print "\n\n"
print "Dumping a dictionary reconstructed from the nvlist_t:"
print json.dumps(props_out, sort_keys=True, indent=4)
| Python | 0.000001 | |
5348379759caa9576c3194ae0795e2fcc6ed3308 | add unit tests | tests/test_region.py | tests/test_region.py | # -*- coding: utf-8 -*-
from cooler.region import *
import nose
def test_bool_ops():
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 15, 20))
assert comes_before(a, b) == True
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == False
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 10, 20))
assert comes_before(a, b) == True
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == False
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 6, 10))
assert comes_before(a, b) == True
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == True
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 5, 10))
assert comes_before(a, b) == False
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == True
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 6))
assert comes_before(a, b) == False
assert comes_after(a, b) == True
assert contains(a, b) == False
assert overlaps(a, b) == True
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 5))
assert comes_before(a, b) == False
assert comes_after(a, b) == True
assert contains(a, b) == False
assert overlaps(a, b) == False
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 15))
assert comes_before(a, b) == False
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == True
def test_set_ops():
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
assert intersection(a, b) == Region('chr1', 10, 15)
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
assert union(a, b) == Region('chr1', 5, 20)
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 15, 20))
assert hull(a, b) == Region('chr1', 5, 20)
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
assert diff(a, b) == Region('chr1', 5, 10)
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
x, y, z = partition(a, b)
assert x == Region('chr1', 5, 10)
assert y == Region('chr1', 10, 15)
assert z == Region('chr1', 15, 20)
| Python | 0.000001 | |
c9fc6d4f98ba102d94fa54eedae6a50d38459d71 | add test_invalid_files to test_schema | tests/test_schema.py | tests/test_schema.py | import os
import jsonschema
import json
import pathlib
import copy
def get_example_json(filebase):
rootdir = pathlib.Path(__file__).resolve().parent.parent
jsonfilepath = str(rootdir / 'examples' / f'{filebase}.json')
with open(jsonfilepath) as f:
js = json.load(f)
return js
def get_json_schema():
this_path = os.path.dirname(os.path.abspath(__file__))
schema_path = os.path.join(os.path.dirname(this_path), 'hescorehpxml', 'schemas', 'hescore_json.schema.json')
with open(schema_path, 'r') as js:
schema = json.loads(js.read())
return schema
def get_error_messages(jsonfile, jsonschema):
errors = []
for error in sorted(jsonschema.iter_errors(jsonfile), key=str):
errors.append(error.message)
return errors
def test_schema_version_validation():
schema = get_json_schema()
error = jsonschema.Draft7Validator.check_schema(schema)
assert error is None
def test_invalid_files():
hpxml_filebase = 'townhouse_walls'
schema = get_json_schema()
js_schema = jsonschema.Draft7Validator(schema)
js = get_example_json(hpxml_filebase)
js1 = copy.deepcopy(js)
del js1['building']['about']['town_house_walls']
errors = get_error_messages(js1, js_schema)
assert "'town_house_walls' is a required property" in errors
js2 = copy.deepcopy(js)
js2_about = copy.deepcopy(js['building']['about'])
del js2['building']['about']
js2['building']['about'] = []
js2['building']['about'].append(js2_about)
js2['building']['about'].append(js2_about)
errors = get_error_messages(js2, js_schema)
assert any(error.startswith("[{'assessment_date': '2014-12-02', 'shape': 'town_house'") and
error.endswith("is not of type 'object'") for error in errors)
js3 = copy.deepcopy(js)
js3_zone = copy.deepcopy(js['building']['zone'])
del js3['building']['zone']
js3['building']['zone'] = []
js3['building']['zone'].append(js3_zone)
js3['building']['zone'].append(js3_zone)
errors = get_error_messages(js3, js_schema)
assert any(error.startswith("[{'zone_roof': [{'roof_name': 'roof1', 'roof_area': 1200.0") and
error.endswith("is not of type 'object'") for error in errors)
# TODO: Add more tests | Python | 0.000001 | |
5e4fd7fb37f9e16d27a7751221f6e3725509f2fc | Prepare to use unittests | tests/testapi.py | tests/testapi.py | #!/usr/bin/python
from fortigateconf import FortiOSConf
import sys
import json
import pprint
import json
from argparse import Namespace
import logging
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger = logging.getLogger('fortinetconflib')
hdlr = logging.FileHandler('/var/tmp/testapi.log')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
logger.debug('often makes a very good meal of %s', 'visiting tourists')
fgt = FortiOSConf()
def json2obj(data):
return json.loads(data, object_hook=lambda d: Namespace(**d))
def main():
# Login to the FGT ip
fgt.debug('on')
fgt.login('192.168.40.8','admin','')
data = {
# "action" : "add",
"seq-num" :"8",
"dst": "10.10.30.0 255.255.255.0",
"device": "port2",
"gateway": "192.168.40.254",
}
pp = pprint.PrettyPrinter(indent=4)
d=json2obj(json.dumps(data))
pp.pprint(fgt.get_name_path_dict( vdom="root"))
# resp = fgt.schema('diagnose__tree__','debug', vdom="root")
# pp.pprint(resp)
resp = fgt.post('diagnose__tree__','debug', vdom="root", mkey="enable")
pp.pprint(resp)
fgt.logout()
if __name__ == '__main__':
main()
| Python | 0 | |
e57a73ac2c1a22d97ce40a8954ecb44e3b92a53c | increase to 100% | lob/api_requestor.py | lob/api_requestor.py | import requests
import lob
import json
import resource
from lob import error
from version import VERSION
def _is_file_like(obj):
"""
Checks if an object is file-like enough to be sent to requests.
In particular, file, StringIO and cStringIO objects are file-like.
Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like
"""
return hasattr(obj, 'read') and hasattr(obj, 'seek')
class APIRequestor(object):
def __init__(self, key=None):
self.api_key = key or lob.api_key
def parse_response(self, resp):
payload = json.loads(resp.content)
if resp.status_code == 200:
return payload
elif resp.status_code == 401:
raise error.AuthenticationError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
elif resp.status_code in [404, 422]:
raise error.InvalidRequestError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
else: #pragma: no cover
raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp)
def request(self, method, url, params=None):
headers = {
'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION
}
if hasattr(lob, 'api_version'):
headers['Lob-Version'] = lob.api_version
if method == 'get':
return self.parse_response(
requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers)
)
elif method == 'delete':
return self.parse_response(
requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers)
)
elif method == 'post':
data = {}
files = params.pop('files', {})
explodedParams = {}
for k,v in params.iteritems():
if isinstance(v, dict) and not isinstance(v, resource.LobObject):
for k2,v2 in v.iteritems():
explodedParams[k + '[' + k2 + ']'] = v2
else:
explodedParams[k] = v
for k,v in explodedParams.iteritems():
if _is_file_like(v):
files[k] = v
else:
if isinstance(v, resource.LobObject):
data[k] = v.id
else:
data[k] = v
return self.parse_response(
requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers)
)
| import requests
import lob
import json
import resource
from lob import error
from version import VERSION
def _is_file_like(obj):
"""
Checks if an object is file-like enough to be sent to requests.
In particular, file, StringIO and cStringIO objects are file-like.
Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like
"""
return hasattr(obj, 'read') and hasattr(obj, 'seek')
class APIRequestor(object):
def __init__(self, key=None):
self.api_key = key or lob.api_key
def parse_response(self, resp):
payload = json.loads(resp.content)
if resp.status_code == 200:
return payload
elif resp.status_code == 401:
raise error.AuthenticationError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
elif resp.status_code in [404, 422]:
raise error.InvalidRequestError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
else:
#pragma: no cover
raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) # pragma: no cover
def request(self, method, url, params=None):
headers = {
'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION
}
if hasattr(lob, 'api_version'):
headers['Lob-Version'] = lob.api_version
if method == 'get':
return self.parse_response(
requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers)
)
elif method == 'delete':
return self.parse_response(
requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers)
)
elif method == 'post':
data = {}
files = params.pop('files', {})
explodedParams = {}
for k,v in params.iteritems():
if isinstance(v, dict) and not isinstance(v, resource.LobObject):
for k2,v2 in v.iteritems():
explodedParams[k + '[' + k2 + ']'] = v2
else:
explodedParams[k] = v
for k,v in explodedParams.iteritems():
if _is_file_like(v):
files[k] = v
else:
if isinstance(v, resource.LobObject):
data[k] = v.id
else:
data[k] = v
return self.parse_response(
requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers)
)
| Python | 0.000013 |
cd653c3657aa14d3845a253d916e9f0d336910ce | add logger convenience class | loggerglue/logger.py | loggerglue/logger.py | # -*- coding: utf-8 -*-
"""
An rfc5424/rfc5425 syslog server implementation
Copyright © 2011 Evax Software <contact@evax.fr>
"""
import socket,os,sys
from datetime import datetime
from loggerglue.rfc5424 import DEFAULT_PRIVAL,SyslogEntry
from loggerglue.emitter import UNIXSyslogEmitter
class Logger(object):
"""
Convenience class to log RFC5424 messages to the
local syslog daemon.
"""
def __init__(self, emitter=None, hostname=None, app_name=None, procid=None):
"""
Create a new logger object.
Keyword arguments:
emitter -- Emitter object to send syslog messages, default to Unix socket /dev/log
hostname -- Hostname to send with log messages, defaults to current hostname
app_name -- Application name to send with log messages, defaults to application name
procid -- Process ID to send with log messages, default to current process ID
"""
if hostname is None:
# Compute host name to submit to syslog
hostname = socket.gethostname()
if app_name is None:
# Compute default app name from name of executable,
# without extension.
app_name = os.path.basename(sys.argv[0])
(app_name, _, _) = app_name.partition(".")
if procid is None:
procid = os.getpid()
if emitter is None:
emitter = UNIXSyslogEmitter()
self.hostname = hostname
self.app_name = app_name
self.procid = procid
self.emitter = emitter
def log(self, msg=None, msgid=None, structured_data=None, prival=DEFAULT_PRIVAL,
timestamp=None):
"""
Log a message.
Example:
>>> logger.log("test", prival=LOG_DEBUG|LOG_MAIL)
Keyword arguments:
msg -- Human readable message to log
msgid -- Message identifier
structured_data -- Structured data to attach to log message
prival -- Priority and facility of message (defaults to INFO|USER)
timestamp -- UTC time of log message (default to current time)
"""
if timestamp is None:
timestamp = datetime.utcnow()
msg = SyslogEntry(
prival=prival, timestamp=datetime.utcnow(),
hostname=self.hostname, app_name=self.app_name, procid=self.procid, msgid=msgid,
structured_data=structured_data,
msg=msg
)
self.emitter.emit(msg)
def close(self):
"""
Close connection to logger.
"""
self.emitter.close()
| Python | 0 | |
1ac75fafc9c67e0fc1f898f4653593730ed66326 | Create uber.py | modules/uber.py | modules/uber.py | def uber(self):
self.send_chan("Prkl, toimii!")
| Python | 0.000036 | |
7b8d7bf81b094f554f3d820b1e0df5d54917f4c0 | Create getCITask.py | src/main/resources/xlr_xldeploy/getCITask.py | src/main/resources/xlr_xldeploy/getCITask.py | #
# Copyright 2017 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from xlr_xldeploy.XLDeployClientUtil import XLDeployClientUtil
xld_client = XLDeployClientUtil.create_xldeploy_client(xldeployServer, username, password)
test = xld_client.check_ci_exist(ciID)
if throwOnFail and not test:
raise Exception(ciID + " does not exist")
else:
response = xld_client.get_ci(ciID,accept)
| Python | 0.000001 | |
1e291eb5092d232d84be5914429808aa24c9ee9d | Add HTML 5 WebSocket support (see http://bret.appspot.com/entry/web-sockets-in-tornado) | tornado/websocket.py | tornado/websocket.py | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import tornado.escape
import tornado.web
class WebSocketHandler(tornado.web.RequestHandler):
"""A request handler for HTML 5 Web Sockets.
See http://www.w3.org/TR/2009/WD-websockets-20091222/ for details on the
JavaScript interface. We implement the protocol as specified at
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-55.
Here is an example Web Socket handler that echos back all received messages
back to the client:
class EchoWebSocket(websocket.WebSocketHandler):
def open(self):
self.receive_message(self.on_message)
def on_message(self, message):
self.write_message(u"You said: " + message)
Web Sockets are not standard HTTP connections. The "handshake" is HTTP,
but after the handshake, the protocol is message-based. Consequently,
most of the Tornado HTTP facilities are not available in handlers of this
type. The only communication methods available to you are send_message()
and receive_message(). Likewise, your request handler class should
implement open() method rather than get() or post().
If you map the handler above to "/websocket" in your application, you can
invoke it in JavaScript with:
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
"""
def __init__(self, application, request):
tornado.web.RequestHandler.__init__(self, application, request)
self.stream = request.connection.stream
def _execute(self, transforms, *args, **kwargs):
if self.request.headers.get("Upgrade") != "WebSocket" or \
self.request.headers.get("Connection") != "Upgrade" or \
not self.request.headers.get("Origin"):
message = "Expected WebSocket headers"
self.stream.write(
"HTTP/1.1 403 Forbidden\r\nContent-Length: " +
str(len(message)) + "\r\n\r\n" + message)
return
self.stream.write(
"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Server: TornadoServer/0.1\r\n"
"WebSocket-Origin: " + self.request.headers["Origin"] + "\r\n"
"WebSocket-Location: ws://" + self.request.host +
self.request.path + "\r\n\r\n")
self.async_callback(self.open)(*args, **kwargs)
def write_message(self, message):
"""Sends the given message to the client of this Web Socket."""
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
if isinstance(message, unicode):
message = message.encode("utf-8")
assert isinstance(message, str)
self.stream.write("\x00" + message + "\xff")
def receive_message(self, callback):
"""Calls callback when the browser calls send() on this Web Socket."""
callback = self.async_callback(callback)
self.stream.read_bytes(
1, functools.partial(self._on_frame_type, callback))
def close(self):
"""Closes this Web Socket.
The browser will receive the onclose event for the open web socket
when this method is called.
"""
self.stream.close()
def async_callback(self, callback, *args, **kwargs):
"""Wrap callbacks with this if they are used on asynchronous requests.
Catches exceptions properly and closes this Web Socket if an exception
is uncaught.
"""
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception, e:
logging.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self.stream.close()
return wrapper
def _on_frame_type(self, callback, byte):
if ord(byte) & 0x80 == 0x80:
raise Exception("Length-encoded format not yet supported")
self.stream.read_until(
"\xff", functools.partial(self._on_end_delimiter, callback))
def _on_end_delimiter(self, callback, frame):
callback(frame[:-1].decode("utf-8", "replace"))
def _not_supported(self, *args, **kwargs):
raise Exception("Method not supported for Web Sockets")
for method in ["write", "redirect", "set_header", "send_error", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method, WebSocketHandler._not_supported)
| Python | 0 | |
116babc38e2e4023eb0b45eabc02050ed433e240 | Include a helpful MOD analyser script | scripts/mod_info.py | scripts/mod_info.py | # mod_info.py
#
# Display information about a Protracker module.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys
with open(sys.argv[1], "rb") as f:
dat = f.read()
dlen = len(dat)
tname, = struct.unpack("20s", dat[:20])
print("Name: '%s'" % tname.decode('utf-8'))
dat = dat[20:]
samples_len = 0
for i in range(31):
name, wordlen, finetune, volume, repstart, replen = struct.unpack(
">22sH2B2H", dat[:30])
dat = dat[30:]
if wordlen == 0:
continue
samples_len += wordlen*2
print("Sample Data: %u" % samples_len)
songlen, pad = struct.unpack("2B", dat[:2])
dat = dat[2:]
#assert pad == 127
assert songlen <= 128
print("Song Length: %u" % songlen)
patterns = list(struct.unpack("128B", dat[:128]))
dat = dat[128:]
patterns = patterns[:songlen]
nr_patterns = max(patterns)+1
print("Nr Patterns: %u (%u bytes)" % (nr_patterns, nr_patterns*1024))
mksig, = struct.unpack("4s", dat[:4])
dat = dat[4:]
assert mksig == b'M.K.'
totlen = 1084 + nr_patterns*1024 + samples_len
print("Total Bytes: %u (0x%x)" % (totlen, totlen))
assert totlen <= dlen
| Python | 0 | |
e9576468046fd53195f139f5751c9d45f26c51c4 | handle NER exceptions. | aleph/analyze/polyglot_entity.py | aleph/analyze/polyglot_entity.py | from __future__ import absolute_import
import logging
from collections import defaultdict
from polyglot.text import Text
from aleph.core import db
from aleph.model import Reference, Entity, Collection
from aleph.model.entity_details import EntityIdentifier
from aleph.analyze.analyzer import Analyzer
log = logging.getLogger(__name__)
SCHEMAS = {
'I-PER': '/entity/person.json#',
'I-ORG': '/entity/organization.json#'
}
DEFAULT_SCHEMA = '/entity/entity.json#'
class PolyglotEntityAnalyzer(Analyzer):
origin = 'polyglot'
def prepare(self):
self.disabled = not self.document.source.generate_entities
self.entities = defaultdict(list)
def on_text(self, text):
if text is None or len(text) <= 100:
return
try:
text = Text(text)
if len(self.meta.languages) == 1:
text.hint_language_code = self.meta.languages[0]
for entity in text.entities:
if entity.tag == 'I-LOC':
continue
parts = [t for t in entity if t.lower() != t.upper()]
if len(parts) < 2:
continue
entity_name = ' '.join(parts)
if len(entity_name) < 5 or len(entity_name) > 150:
continue
schema = SCHEMAS.get(entity.tag, DEFAULT_SCHEMA)
self.entities[entity_name].append(schema)
except Exception as ex:
log.warning('NER failed: %r', ex)
def load_collection(self):
if not hasattr(self, '_collection'):
self._collection = Collection.by_foreign_id('polyglot:ner', {
'label': 'Automatically Extracted Persons and Companies',
'public': True
})
return self._collection
def load_entity(self, name, schema):
q = db.session.query(EntityIdentifier)
q = q.order_by(EntityIdentifier.deleted_at.desc().nullsfirst())
q = q.filter(EntityIdentifier.scheme == self.origin)
q = q.filter(EntityIdentifier.identifier == name)
ident = q.first()
if ident is not None:
if ident.deleted_at is None:
return ident.entity_id
if ident.entity.deleted_at is None:
return None
data = {
'name': name,
'$schema': schema,
'state': Entity.STATE_PENDING,
'identifiers': [{
'scheme': self.origin,
'identifier': name
}],
'collections': [self.load_collection()]
}
entity = Entity.save(data)
return entity.id
def finalize(self):
output = []
for entity_name, schemas in self.entities.items():
schema = max(set(schemas), key=schemas.count)
output.append((entity_name, len(schemas), schema))
Reference.delete_document(self.document.id, origin=self.origin)
for name, weight, schema in output:
entity_id = self.load_entity(name, schema)
if entity_id is None:
continue
ref = Reference()
ref.document_id = self.document.id
ref.entity_id = entity_id
ref.origin = self.origin
ref.weight = weight
db.session.add(ref)
log.info('Polyglot extraced %s entities.', len(output))
| from __future__ import absolute_import
import logging
from collections import defaultdict
from polyglot.text import Text
from aleph.core import db
from aleph.model import Reference, Entity, Collection
from aleph.model.entity_details import EntityIdentifier
from aleph.analyze.analyzer import Analyzer
log = logging.getLogger(__name__)
SCHEMAS = {
'I-PER': '/entity/person.json#',
'I-ORG': '/entity/organization.json#'
}
DEFAULT_SCHEMA = '/entity/entity.json#'
class PolyglotEntityAnalyzer(Analyzer):
origin = 'polyglot'
def prepare(self):
self.disabled = not self.document.source.generate_entities
self.entities = defaultdict(list)
def on_text(self, text):
if text is None or len(text) <= 100:
return
text = Text(text)
if len(self.meta.languages) == 1:
text.hint_language_code = self.meta.languages[0]
for entity in text.entities:
if entity.tag == 'I-LOC':
continue
parts = [t for t in entity if t.lower() != t.upper()]
if len(parts) < 2:
continue
entity_name = ' '.join(parts)
if len(entity_name) < 5 or len(entity_name) > 150:
continue
schema = SCHEMAS.get(entity.tag, DEFAULT_SCHEMA)
self.entities[entity_name].append(schema)
def load_collection(self):
if not hasattr(self, '_collection'):
self._collection = Collection.by_foreign_id('polyglot:ner', {
'label': 'Automatically Extracted Persons and Companies',
'public': True
})
return self._collection
def load_entity(self, name, schema):
q = db.session.query(EntityIdentifier)
q = q.order_by(EntityIdentifier.deleted_at.desc().nullsfirst())
q = q.filter(EntityIdentifier.scheme == self.origin)
q = q.filter(EntityIdentifier.identifier == name)
ident = q.first()
if ident is not None:
if ident.deleted_at is None:
return ident.entity_id
if ident.entity.deleted_at is None:
return None
data = {
'name': name,
'$schema': schema,
'state': Entity.STATE_PENDING,
'identifiers': [{
'scheme': self.origin,
'identifier': name
}],
'collections': [self.load_collection()]
}
entity = Entity.save(data)
return entity.id
def finalize(self):
output = []
for entity_name, schemas in self.entities.items():
schema = max(set(schemas), key=schemas.count)
output.append((entity_name, len(schemas), schema))
Reference.delete_document(self.document.id, origin=self.origin)
for name, weight, schema in output:
entity_id = self.load_entity(name, schema)
if entity_id is None:
continue
ref = Reference()
ref.document_id = self.document.id
ref.entity_id = entity_id
ref.origin = self.origin
ref.weight = weight
db.session.add(ref)
log.info('Polyglot extraced %s entities.', len(output))
| Python | 0 |
9e7acd4e7d80cffb0274e3a01aee517fb63d3db9 | Create Josuel_Concordance.py | Josuel_Concordance.py | Josuel_Concordance.py | # Author: Josuel Musambaghani
# library that breaks text into parts
import nltk
import string
with open('c:/Python27/fileIn.txt', 'r') as in_file:
text = in_file.read()
f = nltk.sent_tokenize(text)
# This code deals with the proble of parenthesis
for item in range(len(f)-1):
if '(' in f[item] and ')' in f[item+1]:
f[item] += ' ' + f[item+1]
f.remove(f[item+1])
'''
# This code solve the problem of having punctuations appended to words
# when running. For example 'english:' and 'english' that might be consider
# as different because of the punctuation mark
punctuations = ['.', ':', ':', "'", ',', '...', '?', '!', '~']
g = []
for elt in f:
for mark in punctuations:
if mark in elt:
z = elt.split(mark)
new = z[0] + z[1]
g.append(new)
print g
################################################################
for elt in f:
for let in elt[len(elt)-2:]:
if let in string.punctuation:
elt = elt.replace(let, "")
for elt in f:
for let in elt[:1]:
if let in string.punctuation:
elt = elt.replace(let, "")
print f
'''
# count and display results of counted words
myDict = {}
linenum = -1
for line in f:
line = line.strip()
line = line.lower()
line = line.split()
linenum += 1
for word in line:
###################################################
# Trying to eliminate punctuations that are appended to words
if word in string.punctuation:
line.remove(word)
for elt in word[len(word)-2:]:
if "e.g." in word:
continue
elif elt in string.punctuation:
word = word.replace(elt, "")
for elt in word[:1]:
if elt in string.punctuation:
word = word.replace(elt, "")
###################################################
# the code continues as normal ...
word = word.strip()
word = word.lower()
if not word in myDict:
myDict[word] = []
myDict[word].append(linenum)
print "%-15s %5s %s" %("Word", 'Count', "Line Numbers")
print "%-15s %5s %s" %("====", '=====', "============")
for key in sorted(myDict):
print '%-15s %5d: %s' % (key, len(myDict[key]), myDict[key])
| Python | 0 | |
2032a823b2dad6f7cebb63ee276bcfb6ea02b7a0 | improve notes | notes/code/lolviz.py | notes/code/lolviz.py | import graphviz
def lolviz(table):
"""
Given a list of lists such as:
[ [('a','3')], [], [('b',230), ('c',21)] ]
return the dot/graphviz to display as a two-dimensional
structure.
"""
s = """
digraph G {
nodesep=.05;
rankdir=LR;
node [shape=record,width=.1,height=.1];
"""
# Make outer list as vertical
labels = []
for i in range(len(table)):
bucket = table[i]
if len(bucket)==0: labels.append(str(i))
else: labels.append("<f%d> %d" % (i,i))
s += ' mainlist [color="#444443", fontsize="9", fontcolor="#444443", fontname="Helvetica", style=filled, fillcolor="#D9E6F5", label = "'+'|'.join(labels)+'"];\n'
# define inner lists
for i in range(len(table)):
bucket = table[i]
if not bucket or len(bucket)==0: continue
elements = []
for j, el in enumerate(bucket):
if type(el)==tuple and len(el)==2: els = "%s→%s" % el
else: els = repr(el)
elements.append('<table BORDER="0" CELLBORDER="1" CELLSPACING="0"><tr><td cellspacing="0" bgcolor="#FBFEB0" border="1" sides="b" valign="top"><font color="#444443" point-size="9">%d</font></td></tr><tr><td bgcolor="#FBFEB0" border="0" align="center">%s</td></tr></table>' % (j, els))
s += 'node%d [color="#444443", fontname="Helvetica", margin="0.01", space="0.0", shape=record label=<{%s}>];\n' % (i, '|'.join(elements))
# Do edges
for i in range(len(table)):
bucket = table[i]
if not bucket or len(bucket)==0: continue
s += 'mainlist:f%d -> node%d [arrowsize=.5]\n' % (i,i)
s += "}\n"
print s
return s
x = [ [('a','3')], [], [('b',230), ('c',21)] ]
dot = lolviz(x)
g = graphviz.Source(dot)
g.render(view=True) | Python | 0.000001 | |
70b6fde787018daf5b87f485e60c9a26fa542f2e | add basic affine 3D transforms | lab_3/affine_transform.py | lab_3/affine_transform.py | from util.matrix import Matrix
from math import cos, sin
def translation(x, y, z):
return Matrix([
[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]
])
# den = (phi ** 2 + psi ** 2) ** .5
# phi /= den
# psi /= den
# return Matrix([
# [phi, -psi, 0],
# [psi, phi, 0],
# [0, 0, 1]
# ])
def rotation_x(phi):
c = cos(phi)
s = sin(phi)
return Matrix([
[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]
])
def rotation_y(phi):
c = cos(phi)
s = sin(phi)
return Matrix([
[c, 0, s, 0],
[0, 1, 0, 0],
[-s, 0, c, 0],
[0, 0, 0, 1]
])
def rotation_z(phi):
c = cos(phi)
s = sin(phi)
return Matrix([
[c, -s, 0, 0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
def scaling(kx, ky=None, kz=None):
if ky is None and kz is None:
ky = kz = kx
return Matrix([
[kx, 0, 0, 0],
[0, ky, 0, 0],
[0, 0, kz, 0],
[0, 0, 0, 1]
])
mirroring_x = Matrix([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
mirroring_y = Matrix([
[-1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
mirroring_z = Matrix([
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
| Python | 0.000008 | |
786f75be946427024fa96ae8dcd06d8d1ecd49cc | Add the init method to the node model. | model/node.py | model/node.py | class NodeModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "node"
super(NodeModel, self).__init__() | Python | 0 | |
ebc6368c11048a9182d848cff7f47e3dd8532933 | Add files via upload | my_game_04.py | my_game_04.py | import pygame
import os
# ширина и высота игрового экрана
WIDTH = 640
HEIGHT = 480
# частота кадров
FPS = 60
# путь к изображениям
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "images");
# класс для корабля игрока
class PlayerShip(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "player_ship.png")).convert()
self.image.set_colorkey((0, 0, 0))
self.rect = self.image.get_rect()
self.rect.x = 10
self.rect.centery = HEIGHT / 2
class Meteor(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "meteor.png")).convert()
self.image.set_colorkey((0, 0, 0))
self.rect = self.image.get_rect()
self.rect.left = x
self.rect.top = y
def update(self):
self.rect.left -= 3
# инициализация библиотеки pygame
pygame.init()
# создание объекта для отслеживания времени
clock = pygame.time.Clock()
# создание игрового экрана
screen = pygame.display.set_mode((WIDTH, HEIGHT))
# смена залоголовка окна
pygame.display.set_caption("My Game")
# все спрайты будут храниться здесь
sprites = pygame.sprite.Group()
sprites.add(PlayerShip())
sprites.add(Meteor(WIDTH - 50, 40))
sprites.add(Meteor(WIDTH - 100, 200))
# цикл событий
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# изменение движения
sprites.update()
# очистка фона и рисование спрайтов
screen.fill((0, 0, 80))
sprites.draw(screen)
# переключение буферов
pygame.display.flip()
# задает частоту запуска цикла
clock.tick(FPS)
# завершение работы библиотеки pygame
pygame.quit()
| Python | 0 | |
07467664b699612e10b51bbeafdce79a9d1e0127 | Write unit test for utility functions | test/test_util.py | test/test_util.py | from __future__ import unicode_literals
try:
import io
StringIO = io.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
import os
import shutil
import sys
import tempfile
import unittest
import cudnnenv
class TestSafeTempDir(unittest.TestCase):
def test_safe_temp_dir(self):
with cudnnenv.safe_temp_dir() as path:
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def test_safe_temp_dir_error(self):
try:
with cudnnenv.safe_temp_dir() as path:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(path))
class TestSafeDir(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_safe_dir(self):
path = os.path.join(self.path, 'd')
with cudnnenv.safe_dir(path) as p:
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(path))
def test_safe_dir_error(self):
path = os.path.join(self.path, 'd')
try:
with cudnnenv.safe_dir(path) as p:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(p))
self.assertFalse(os.path.exists(path))
class TestYesNo(unittest.TestCase):
def tearDown(self):
sys.stdin = sys.__stdin__
def test_yes(self):
sys.stdin = StringIO('y\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
def test_no(self):
sys.stdin = StringIO('n\n')
self.assertFalse(cudnnenv.yes_no_query('q'))
def test_invalid(self):
sys.stdin = StringIO('a\nb\nc\nd\ny\nn\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
| Python | 0.000002 | |
fd7c31cf6039f7640fc01c8b76064afa0318cafd | Build update file | update_game_build.py | update_game_build.py | import pygame
import sys
from random import *
from pygame.locals import *
from operator import attrgetter
from variables import *
from Card import *
from Player import *
from create_board import *
from create_game_options import *
from create_player_info import *
from handle_mouse_event import *
from display_windows import *
"""
Rules :
1) Show property on which house/hotel can be built
- there should be equal number of houses on all property before increasing it by one
2) Player selects the property
3) Update the state of player and card
4) Repeat steps 1-3 till player does not selects cancel
"""
def update_game_build(screen,Players,Cards,cur_player,Cards_Rects,Option_Rects,Info_Cards_Rects,Mark):
card_list = []
# removing card which have hotels built on them
# removing cards which have greater no of houses than other cards of same color
for color_cards in Players[cur_player].color_cards_owned:
# color_cards = [1,3]
for card in color_cards:
card_list.append(Cards[card])
# card_list(Card[1],Card[3])
min_house_card = min(card_list,key=attrgetter('houses_built'))
if min_house_card.houses_built == 4: # hotel
for crd in color_cards:
if Cards[crd].hotel_built == 1:
if crd in Mark:
Mark.remove(crd)
else: # house
for crd in color_cards:
if Cards[crd].houses_built != min_house_card.houses_built:
if crd in Mark:
Mark.remove(crd)
# get the card index that was clicked
build_card = display_build_window(screen,Players,Cards,cur_player,Mark)
# check if the card is valid
# none implies user clicked cancel
# check if card is in mark
if build_card != None:
if build_card in Mark:
card = Cards[build_card]
screen.fill(BACKGROUND_COLOR)
create_board(screen)
create_game_options(screen)
for player in Players:
player.move_player(screen,player.cur_position)
create_player_info(screen,Players,Cards,cur_player)
# confirmation prompt
# show build infomation
build_prop = display_build_confirm_window(screen,card)
# update card status and player balance
if build_prop == True:
player = Players[cur_player]
# build hotel
if card.houses_built == 4 and card.hotel_built == 0:
if player.cur_balance > card.hotel_cost:
player.cur_balance -= card.hotel_cost
card.hotel_built = 1
else:
player.cur_balance -= card.hotel_cost
card.hotel_built = 1
player.isBankrupt = True
else:
if player.cur_balance > card.house_cost:
player.cur_balance -= card.house_cost
card.houses_built += 1
else:
player.cur_balance -= card.house_cost
card.hotel_built += 1
player.isBankrupt = True
else:
Mark = []
player = Players[cur_player]
for color_list in player.color_cards_owned:
Mark.extend(color_list)
# display build window
if Mark != []:
# adding static part
screen.fill(BACKGROUND_COLOR)
create_board(screen)
create_game_options(screen)
create_player_info(screen,Players,Cards,cur_player)
for player in Players:
player.move_player(screen,player.cur_position)
update_game_build(screen,Players,Cards,cur_player,Cards_Rects,Option_Rects,Info_Cards_Rects,Mark)
else:
Mark = []
player = Players[cur_player]
for color_list in player.color_cards_owned:
Mark.extend(color_list)
# display build window
if Mark != []:
# adding static part
screen.fill(BACKGROUND_COLOR)
create_board(screen)
create_game_options(screen)
create_player_info(screen,Players,Cards,cur_player)
for player in Players:
player.move_player(screen,player.cur_position)
update_game_build(screen,Players,Cards,cur_player,Cards_Rects,Option_Rects,Info_Cards_Rects,Mark)
| Python | 0 | |
448f18769d7c701d9dd03ff65489656380513d07 | Add test init. | tests/__init__.py | tests/__init__.py | from flexmock import flexmock
from flask.ext.storage import MockStorage
from flask_uploads import init
created_objects = []
added_objects = []
deleted_objects = []
committed_objects = []
class MockModel(object):
def __init__(self, **kw):
created_objects.append(self)
for key, val in kw.iteritems():
setattr(self, key, val)
db_mock = flexmock(
Column=lambda *a, **kw: ('column', a, kw),
Integer=('integer', [], {}),
Unicode=lambda *a, **kw: ('unicode', a, kw),
Model=MockModel,
session=flexmock(
add=added_objects.append,
commit=lambda: committed_objects.extend(
added_objects + deleted_objects
),
delete=deleted_objects.append,
),
)
class TestCase(object):
def setup_method(self, method, resizer=None):
init(db_mock, MockStorage, resizer)
self.db = db_mock
self.Storage = MockStorage
self.resizer = resizer
| Python | 0 | |
256648ad4effd9811d7c35ed6ef45de67f108926 | Add pytest option for specifying the typing module to use | tests/conftest.py | tests/conftest.py | import sys
def pytest_addoption(parser):
parser.addoption('--typing', action='store', default='typing')
def pytest_configure(config):
if config.option.typing == 'no':
sys.modules['typing'] = None
elif config.option.typing != 'typing':
sys.modules['typing'] = __import__(config.option.typing)
| Python | 0 | |
08f6d31feb493b24792eaabfa11d08faea68c62b | add textample plug | plugins/textample/textample.py | plugins/textample/textample.py | # coding=utf-8
import gzip
import os
import random
import re
def search(regex, base_dir, file_contains=''):
reg = re.compile(regex, re.IGNORECASE)
for root, _, files in os.walk(base_dir):
for file in files:
if file.endswith('.gz'):
file_path = os.path.join(root, file)
if file_contains not in file_path:
continue
with gzip.open(file_path) as f:
for line in f:
line = line.decode('utf-8')
if reg.search(line):
yield (file_path[len(base_dir) + 1:-3], ' '.join(line.split()))
@yui.threaded
@yui.command('example', 'ex')
def example(argv):
"""Regex search for sentences. Usage: example <regex> [file]"""
if len(argv) < 2:
return
base = os.path.join(os.path.dirname(__file__), 'texts')
if not os.path.isdir(base):
return 'Directory %s does not exist' % base
se = search(argv[1], base, file_contains=argv[2] if len(argv) > 2 else '')
try:
return '%s: %s' % random.choice(list(se))
except IndexError as e:
return 'No matching sentences found'
| Python | 0 | |
da3248f782d83c46b698c31736b29a42d380511c | Add the playground | micro/_playground.py | micro/_playground.py | CODE = '''
out str + 2 3
'''
if __name__ == '__main__':
import lexer
import preparser
import parser
import builtin_functions
import sys
import evaluate
specific_lexer = lexer.Lexer()
specific_preparser = preparser.Preparser(specific_lexer)
preast = specific_preparser.preparse(CODE)
specific_parser = parser.Parser()
ast = specific_parser.parse(preast, builtin_functions.BUILTIN_FUNCTIONS)
errors = specific_lexer.get_errors() + specific_preparser.get_errors() + specific_parser.get_errors()
for some_error in errors:
some_error.detect_position(CODE)
print(some_error)
if errors:
sys.exit()
evaluate.evaluate(ast, builtin_functions.BUILTIN_FUNCTIONS)
| Python | 0.999869 | |
686c0d0c8f2e520375315c84e2320b087b9a3831 | add scan dir test | tests/scan_dir.py | tests/scan_dir.py | import os
import json
import datetime
import random
import hashlib
import signal
import sys
from frontend.cli.irma import _scan_new, _scan_add, _scan_launch, \
_scan_progress, _scan_cancel, IrmaScanStatus, _scan_result
import time
RES_PATH = "."
SRC_PATH = "."
DEBUG = True
SCAN_TIMEOUT_SEC = 300
BEFORE_NEXT_PROGRESS = 5
DEBUG = False
Probelist = [u'ClamAV', u'VirusTotal', u'Kaspersky', u'Sophos',
u'McAfeeVSCL', u'Symantec', u'StaticAnalyzer']
scanner = None
def handler(_, _):
print 'Cancelling...'
if scanner is not None:
scanner.cancel()
sys.exit(0)
class ScannerError(Exception):
pass
class Scanner(object):
def __init__(self):
# test setup
date_str = str(datetime.datetime.now().date())
date_str = date_str.replace('-', '')
self.res_dir = os.path.join(RES_PATH, date_str)
self.scanid = None
try:
if not os.path.exists(self.res_dir):
os.mkdir(self.res_dir)
except OSError:
raise ScannerError("Can't create [{0}]".format(self.res_dir))
def cancel(self):
if self.scanid is not None:
_scan_cancel(self.scanid, DEBUG)
def scan_files(self, files,
force=False,
probe=None,
timeout=SCAN_TIMEOUT_SEC):
self.scanid = _scan_new(DEBUG)
_scan_add(self.scanid, files, DEBUG)
probelist = _scan_launch(self.scanid, force, probe, DEBUG)
scanid = self.scanid
nb = len(files)
probes = " - ".join(sorted(probelist))
print ("launching scan {0}".format(scanid) +
" of {0} files on {1}".format(scanid, nb, probes))
start = time.time()
while True:
time.sleep(BEFORE_NEXT_PROGRESS)
(status, fin, tot, suc) = _scan_progress(self.scanid, DEBUG)
if fin is not None:
# write in place
sys.stdout.write("\r\tjobs {0}({1})/{2}".format(fin, suc, tot))
sys.stdout.flush()
if status == IrmaScanStatus.label[IrmaScanStatus.finished]:
break
now = time.time()
if now > (start + timeout):
_scan_cancel(self.scanid, DEBUG)
raise ScannerError("Results Timeout")
return _scan_result(self.scanid, DEBUG)
def _write_result(self, res):
print "Writing results"
for (sha256, results) in res.items():
res_file = os.path.join(self.res_dir, sha256)
with open(res_file, "w") as dst:
dst.write(json.dumps(results))
return
def _write_timeout_result(self, file_list):
print "Timeout results"
for tf in file_list:
with open(tf) as t:
sha256 = hashlib.sha256(t.read()).hexdigest()
res_file = os.path.join(self.res_dir, sha256)
with open(res_file, "w") as dst:
dst.write("timeout")
def scan_dir(self, dirname, nb_files_per_scan):
if not os.path.exists(dirname):
raise ScannerError("dir to scan does not exits")
# get all files in dir
filenames = []
for _, _, filename in os.walk(dirname):
for f in filename:
filenames.append(os.path.join(dirname, f))
random.shuffle(filenames)
for i in xrange(0, len(filenames), nb_files_per_scan):
file_list = filenames[i:i + nb_files_per_scan]
try:
res = self.scan_files(file_list, force=True)
except ScannerError:
self._write_timeout_result(file_list)
res = _scan_result(self.scanid, DEBUG)
self._write_result(res)
return
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
scanner = Scanner()
scanner.scan_dir("samples", 5)
| Python | 0 | |
682b064f29c7a6cfea0c9866da03703822e70cb3 | Add machinery to slurp dhcpd.leases journal into usable format. | propernoun/leases.py | propernoun/leases.py | from . import parser
from . import watch
def gen_leases(path):
"""
Keep track of currently valid leases for ISC dhcpd.
Yields dictionaries that map ``ip`` to information about the
lease. Will block until new information is available.
"""
g = watch.watch_dhcp_leases(path)
for _ in g:
with file(path) as f:
s = f.read()
leases = {}
for l in parser.parse(s):
assert 'ip' in l
leases[l['ip']] = l
yield leases
| Python | 0 | |
ba6c50d0b2fd973c34f2df3779d78df11f671598 | Create mongo_import_keywords.py | mongo_import_keywords.py | mongo_import_keywords.py | """
Load mongo database with keywords for annie annotation.
The keyword_array pickle is packaged with the GRITS classifier.
"""
import sys
import re
import pickle
from pymongo import MongoClient
def load_keyword_array(file_path):
with open(file_path) as f:
keyword_array = pickle.load(f)
return keyword_array
def insert_set(names_set, collection):
"""Insert a list of names into a collection"""
for name in names_set:
collection.insert({'_id': name})
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--mongo_url", default='localhost'
)
parser.add_argument(
"--db_name", default='annotation'
)
args = parser.parse_args()
client = MongoClient(args.mongo_url)
db = client[args.db_name]
category_labels = {
'doid/diseases': 'diseases',
'eha/disease': 'diseases',
'pm/disease': 'diseases',
'hm/disease': 'diseases',
'biocaster/diseases': 'diseases',
'eha/symptom': 'symptoms',
'biocaster/symptoms': 'symptoms',
'doid/has_symptom': 'symptoms',
'pm/symptom': 'symptoms',
'symp/symptoms': 'symptoms',
'wordnet/hosts': 'hosts',
'eha/vector': 'hosts',
'wordnet/pathogens': 'pathogens',
'biocaster/pathogens': 'pathogens',
'pm/mode of transmission': 'modes',
'doid/transmitted_by': 'modes',
'eha/mode of transmission': 'modes'
}
collection_labels = set(category_labels.values())
for collection in collection_labels:
db[collection].drop()
keyword_array = load_keyword_array('current_classifier/keyword_array.p')
for keyword in keyword_array:
if keyword['category'] in category_labels:
collection = category_labels[keyword['category']]
db[collection].insert(
{ '_id': keyword['keyword'],
'source': keyword['category'],
'linked_keywords': keyword['linked_keywords'],
'case_sensitive': keyword['case_sensitive']} )
| Python | 0.00004 | |
a21ed2d12b763d93722b6c8e9f6d6ff39d15938c | add utility to fetch satellites and corresponding TLEs | python-files/get-satellites.py | python-files/get-satellites.py | #!/usr/bin/env python3
"""
Utility to get the station information from a SatNOGS Network server.
Collects the paginated objects into a single JSON list and stores in a file.
"""
import json
import sqlite3
import requests
import orbit
# default expire time is 24 hours
orbit.tle.requests_cache.configure(expire_after=60*60*6)
URL = 'https://db.satnogs.org/api/satellites'
SATELLITES_JSON = 'satellites.json'
TLE_DB = 'tle.db'
# fetch known satellites
r = requests.get(URL)
satellites = r.json()
with open(SATELLITES_JSON, 'w') as fp:
json.dump(satellites, fp)
conn = sqlite3.connect('file:' + TLE_DB, uri=True,
detect_types=sqlite3.PARSE_DECLTYPES)
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS tle
(norad integer,
epoch timestamp,
line0 text,
line1 text,
line2 text,
unique(norad, epoch)
);''')
for sat in satellites:
norad = sat['norad_cat_id']
print(norad, end='')
try:
tle = orbit.satellite(norad)
except KeyError:
print(' ** not at CelesTrak')
continue
try:
cur.execute(
'INSERT INTO tle VALUES (?,?,?,?,?);',
(norad, tle.epoch(), tle.tle_raw[0], tle.tle_raw[1], tle.tle_raw[2]))
# 'INSERT OR IGNORE INTO ...' will suppress the exception
except sqlite3.IntegrityError:
pass
else:
print(' TLE updated', end='')
finally:
print()
conn.commit()
conn.close()
| Python | 0 | |
550873226ec0879a86fea2527b56535a329981b1 | Add upcoming_match.py | upcoming_match.py | upcoming_match.py | #! /usr/bin/env python
#
# Tests sending an upcoming_match notification via adb to The Blue Alliance
# Android app.
import test_notification
json_data = {"match_key": "2007cmp_sf1m3",
"event_name": "Championship - Einstein Field",
"team_keys": ["frc173","frc1319","frc1902","frc177","frc987","frc190"],
"scheduled_time":12345,
"predicted_time":122345}
if __name__ == '__main__':
test_notification.upcoming_match_command(json_data)
| Python | 0.000001 | |
1d8cbf94f127571358aee97677a09f7cea3bf3a7 | Add helper functions for to/from bytes/unicode | p23serialize/util.py | p23serialize/util.py | from . import str_mode
if str_mode == 'bytes':
unicode_type = unicode
else: # str_mode == 'unicode'
unicode_type = str
def recursive_unicode(obj):
if isinstance(obj, bytes):
return obj.decode('latin1')
elif isinstance(obj, list):
return [recursive_unicode(_) for _ in obj]
else:
return obj
def recursive_bytes(obj):
if isinstance(obj, unicode_type):
return obj.encode('latin1')
elif isinstance(obj, list):
return [recursive_bytes(_) for _ in obj]
else:
return obj
| Python | 0.000001 | |
01f21a16e4bcecccf51a565b51222ab18b79adb4 | Add tests for shell utils. | st2common/tests/unit/test_util_shell.py | st2common/tests/unit/test_util_shell.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.shell import quote_unix
from st2common.util.shell import quote_windows
class ShellUtilsTestCase(unittest2.TestCase):
def test_quote_unix(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
def test_quote_windows(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
"foo bar"
""",
"""
"foo1 bar1"
""",
"""
\\"foo\\"
""",
"""
"\\"foo\\" \\"bar\\""
""",
"""
"'foo bar'"
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_windows(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
| Python | 0 | |
f56a902f2e7ca45bb4bf1dfa7dacefd3fefff524 | Create config.sample | zhwikt-broken-file-links/config.sample.py | zhwikt-broken-file-links/config.sample.py | # -*- coding: utf-8 -*-
cfg = {
"category": "Category:含有受损文件链接的页面"
}
| Python | 0 | |
076f65b4d67cb44cd48ee5eedc134a83ab01ca4a | Add unit test for md.pair.lj1208 (duplicated from test_pair_lj.py) | hoomd/md/test-py/test_pair_lj1208.py | hoomd/md/test-py/test_pair_lj1208.py | # -*- coding: iso-8859-1 -*-
# Maintainer: unassigned
from hoomd import *
from hoomd import deprecated
from hoomd import md;
context.initialize()
import unittest
import os
# md.pair.lj1208
class pair_lj1208_tests (unittest.TestCase):
def setUp(self):
print
self.s = deprecated.init.create_random(N=100, phi_p=0.05);
self.nl = md.nlist.cell()
context.current.sorter.set_params(grid=8)
# basic test of creation
def test(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0, alpha=1.0, r_cut=2.5, r_on=2.0);
lj1208.update_coeffs();
# test missing coefficients
def test_set_missing_epsilon(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', sigma=1.0, alpha=1.0);
self.assertRaises(RuntimeError, lj1208.update_coeffs);
# test missing coefficients
def test_set_missing_sigma(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', epsilon=1.0, alpha=1.0);
self.assertRaises(RuntimeError, lj1208.update_coeffs);
# test missing coefficients
def test_missing_AA(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
self.assertRaises(RuntimeError, lj1208.update_coeffs);
# test set params
def test_set_params(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.set_params(mode="no_shift");
lj1208.set_params(mode="shift");
lj1208.set_params(mode="xplor");
self.assertRaises(RuntimeError, lj1208.set_params, mode="blah");
# test default coefficients
def test_default_coeff(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
# (alpha, r_cut, and r_on are default)
lj1208.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
lj1208.update_coeffs()
# test max rcut
def test_max_rcut(self):
lj1208 = md.pair.lj1208(r_cut=2.5, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
self.assertAlmostEqual(2.5, lj1208.get_max_rcut());
lj1208.pair_coeff.set('A', 'A', r_cut = 2.0)
self.assertAlmostEqual(2.0, lj1208.get_max_rcut());
# test specific nlist subscription
def test_nlist_subscribe(self):
lj1208 = md.pair.lj1208(r_cut=2.5, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', sigma = 1.0, epsilon=1.0)
self.nl.update_rcut();
self.assertAlmostEqual(2.5, self.nl.r_cut.get_pair('A','A'));
lj1208.pair_coeff.set('A', 'A', r_cut = 2.0)
self.nl.update_rcut();
self.assertAlmostEqual(2.0, self.nl.r_cut.get_pair('A','A'));
# test coeff list
def test_coeff_list(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set(['A', 'B'], ['A', 'C'], epsilon=1.0, sigma=1.0, alpha=1.0, r_cut=2.5, r_on=2.0);
lj1208.update_coeffs();
# test adding types
def test_type_add(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0);
self.s.particles.types.add('B')
self.assertRaises(RuntimeError, lj1208.update_coeffs);
lj1208.pair_coeff.set('A', 'B', epsilon=1.0, sigma=1.0)
lj1208.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0)
lj1208.update_coeffs();
def tearDown(self):
del self.s, self.nl
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| Python | 0 | |
2a6036bcb7eda9c75b4ade025e757db961781905 | add src/cs_toico.py | src/cs_toico.py | src/cs_toico.py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''cs_toicon
image[.png/.bmp/.ico/etc] to masked .ico
inspired http://d.hatena.ne.jp/rsky/20070919/1190163713
'''
import sys, os
from PIL import Image
from cStringIO import StringIO
from struct import pack, unpack
def toicon(infile, outfile=None, dim=(32, 32), mp=(0, 0)):
width, height = dim
if width < 1 or width > 256 or height < 1 or height > 256:
raise ValueError('invalid dim')
im = Image.open(infile)
sys.stderr.write('original: %s %s %s\n' % (im.format, im.size, im.mode))
# tmp = im.copy() # save original
im = im.convert('P', palette=Image.ADAPTIVE, colors=256,
dither=Image.FLOYDSTEINBERG) # BGRA pal[0] will be ffffff00 when used white
# im.thumbnail(dim, Image.BICUBIC) # (fast) expect palette 256 colors
im.thumbnail(dim, Image.ANTIALIAS) # (slow) colors will be changed ?
width, height = im.size
sys.stderr.write('thumbnail: %s %s %s\n' % (im.format, im.size, im.mode))
# pal = im.getpalette()
# pal[] = ... # adjust palette
# im.putpalette(pal)
# dat = list(im.getdata())
# dat[] = ... # adjust palette number
# im.putdata(dat)
bmp = StringIO()
im.save(bmp, 'BMP')
image_size = bmp.tell() - 14 # 3368
# adjust src bytes per line
wlen = width # 1pixel=8bpp
# adjust mask bytes per line
if False:
blen = ((width + 31) / 32) * 4 # 1pixel=1bit
else:
if False: blen = width * 3 # 1pixel=24bit
else: blen = (width + 7) / 8 # 1pixel=1bit
if blen % 4: blen += 4 - blen % 4 # needs 4bytes alignment
mask_size = blen * height # 384=8x48
sys.stderr.write('%d (%d) (%d) %d\n' % (image_size, wlen, blen, mask_size))
ico = StringIO()
ico.write(pack('<3H', 0, 1, 1)) # (0), (1:ICO, 2:CUR), (N:number of images)
ico.write(pack('<BBBBHHII',
width & 255, # bWidth
height & 255, # bHeight
0, # bColorCount (0:8bpp, N:number of colors)
0, # bReserved (must be 0)
1, # wPlanes
24, # wBitCount
image_size + mask_size, # dwBytesInRes
6 + 16)) # dwImageOffset (6=<3H, 16=<BBBBHHII)
bmp.seek(14) # skip BITMAPFILEHEADER
ico.write(bmp.read(8)) # icHeader part BITMAPINFOHEADER (biSize, biWidth)
ico.write(pack('<I', height * 2)) # combined height of the XOR and AND masks
bmp.seek(4, 1) # part BITMAPINFOHEADER (biHeight)
ico.write(bmp.read(28)) # remain BITMAPINFOHEADER (biPlanes...biClrImportant)
if False:
biClrUsed = 256
else:
bmp.reset()
bf = bmp.read(14) # BITMAPFILEHEADER
bi = bmp.read(8 + 4 + 28) # BITMAPINFOHEADER
bfType, bfSize, bfReserved1, bfReserved2, bfOffBits = unpack('<HIHHI', bf)
biSize, biWidth, biHeight, biPlanes, biBitCount, biCompression, \
biSizeImage, biXPPM, biYPPM, biClrUsed, biClrImportant \
= unpack('<IIIHHIIIIII', bi)
sys.stderr.write('%04x %08x (%04x %04x) %08x\n' % (
bfType, bfSize, bfReserved1, bfReserved2, bfOffBits))
sys.stderr.write('%08x %08x %08x %04x %04x %08x\n' % (
biSize, biWidth, biHeight, biPlanes, biBitCount, biCompression))
sys.stderr.write('%08x %08x %08x %08x %08x\n' % (
biSizeImage, biXPPM, biYPPM, biClrUsed, biClrImportant))
pal = ''
if biClrUsed:
pal = bmp.read(4 * biClrUsed) # palette 256 x 4 BGRA
ico.write(pal) # icColors
for y in range(64):
q = y * 16
sys.stderr.write('%04x:' % q)
for x in range(4):
for z in range(4):
sys.stderr.write(' %02x' % ord(pal[q + x * 4 + z]))
sys.stderr.write(' ')
sys.stderr.write('\n')
img = bmp.read() # len(img)=2304=48x48
ico.write(img) # icXOR
c, r = mp
if c < 0: c = 0
if c >= width: c = width - 1
if r < 0: r = 0
if r >= height: r = height - 1
mpal = ord(img[r * wlen + c])
sys.stderr.write('(%d, %d) mask pal: %02x [%04x]\n' % (c, r, mpal, 4 * mpal))
if len(pal):
for h in range(height):
for w in range(blen): # bytes per line
b = 0
if w * 8 < wlen:
p = h * wlen + w * 8
for m in range(8): # bits
b |= (0x01 << (7 - m)) if ord(img[p + m]) == mpal else 0
ico.write(chr(b)) # icAND
else:
ico.write('\0' * mask_size) # icAND
ico.reset()
if outfile: open(outfile, 'wb').write(ico.read())
else: return ico.read()
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('Usage: %s infile\n' % sys.argv[0])
else:
infile = sys.argv[1]
name, ext = os.path.splitext(os.path.basename(infile))
outfile = '%s.ico' % name
toicon(infile, outfile, (48, 48), (47, 47))
print Image.open(outfile).size
| Python | 0.000001 | |
18d129613c5a576b770a812f18ff05873925fb2c | refactor to a shorter version. | restclients/digitlib/curric.py | restclients/digitlib/curric.py | """
This is the interface for interacting with the UW Libraries Web Service.
"""
import logging
from restclients.digitlib import get_resource
url_prefix = "/php/currics/service.php?code="
sln_prefix = "&sln="
quarter_prefix = "&quarter="
year_prefix = "&year="
logger = logging.getLogger(__name__)
def get_subject_guide(course_code, sln, quarter, year):
"""
:param sln: positive integer
:param year: four digit number
Return the string representing the url of
the Library subject guide page
"""
url = "%s%s%s%s%s%s%s%s" % (url_prefix,
course_code.replace(" ", "%20"),
sln_prefix, sln,
quarter_prefix, quarter,
year_prefix, year)
return _extract_url(get_resource(url))
def _extract_url(data_in_resp):
"""
:param data_in_resp: dict
Return the string representing the url
"""
if data_in_resp is not None:
if "Location" in data_in_resp:
return data_in_resp["Location"]
if "location" in data_in_resp:
return data_in_resp["location"]
logger.warn("Invalid library curric response: %s" % data_in_resp)
return None
| """
This is the interface for interacting with the UW Libraries Web Service.
"""
import logging
from restclients.digitlib import get_resource
url_prefix = "/php/currics/service.php?code="
sln_prefix = "&sln="
quarter_prefix = "&quarter="
year_prefix = "&year="
logger = logging.getLogger(__name__)
def get_subject_guide(course_code, sln, quarter, year):
"""
:param sln: positive integer
:param year: four digit number
Return the string representing the url of
the Library subject guide page
"""
url = "%s%s%s%s%s%s%s%s" % (url_prefix,
course_code.replace(" ", "%20"),
sln_prefix, sln,
quarter_prefix, quarter,
year_prefix, year)
return _extract_url(get_resource(url))
def _extract_url(data_in_resp):
"""
:param data_in_resp: dict
Return the string representing the url
"""
if data_in_resp is not None:
if data_in_resp.get("Location") is not None:
return data_in_resp.get("Location")
if data_in_resp.get("location") is not None:
return data_in_resp.get("location")
logger.warn("Invalid library curric response: %s" % data_in_resp)
return None
| Python | 0.000174 |
f22f833efb45bdfe0458d045cfd300721185dc84 | Revert "bug fix" | sabToSickBeardwithConverter.py | sabToSickBeardwithConverter.py | import os
import sys
import autoProcessTV
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from extensions import valid_input_extensions
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
path = str(sys.argv[1])
for r, d, f in os.walk(path):
for files in f:
if os.path.splitext(files)[1][1:] in valid_input_extensions:
file = os.path.join(r, files)
convert = MkvtoMp4(path, FFMPEG_PATH=settings.ffmpeg, FFPROBE_PATH=settings.ffprobe, delete=settings.delete, output_extension=settings.output_extension, relocate_moov=settings.relocate_moov, iOS=settings.iOS)
"""Contents of sabToSickbeard.py"""
if len(sys.argv) < 2:
print "No folder supplied - is this being called from SABnzbd?"
sys.exit()
elif len(sys.argv) >= 3:
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2])
else:
autoProcessTV.processEpisode(sys.argv[1])
| import os
import sys
import autoProcessTV
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from extensions import valid_input_extensions
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
path = str(sys.argv[1])
for r, d, f in os.walk(path):
for files in f:
if os.path.splitext(files)[1][1:] in valid_input_extensions:
file = os.path.join(r, files)
convert = MkvtoMp4(file, FFMPEG_PATH=settings.ffmpeg, FFPROBE_PATH=settings.ffprobe, delete=settings.delete, output_extension=settings.output_extension, relocate_moov=settings.relocate_moov, iOS=settings.iOS)
"""Contents of sabToSickbeard.py"""
if len(sys.argv) < 2:
print "No folder supplied - is this being called from SABnzbd?"
sys.exit()
elif len(sys.argv) >= 3:
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2])
else:
autoProcessTV.processEpisode(sys.argv[1])
| Python | 0 |
d3a9824ea2f7675e9e0008b5d914f02e63e19d85 | Add new package. (#22639) | var/spack/repos/builtin/packages/liblbfgs/package.py | var/spack/repos/builtin/packages/liblbfgs/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Liblbfgs(AutotoolsPackage):
"""libLBFGS is a C port of the implementation of Limited-memory
Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) method written by Jorge Nocedal.
The L-BFGS method solves the unconstrainted minimization problem:
minimize F(x), x = (x1, x2, ..., xN),
only if the objective function F(x) and its gradient G(x) are computable."""
homepage = "http://www.chokkan.org/software/liblbfgs/"
url = "https://github.com/downloads/chokkan/liblbfgs/liblbfgs-1.10.tar.gz"
git = "https://github.com/chokkan/liblbfgs.git"
maintainers = ['RemiLacroix-IDRIS']
version('master', branch='master')
version('1.10', sha256='4158ab7402b573e5c69d5f6b03c973047a91e16ca5737d3347e3af9c906868cf')
depends_on('autoconf', type='build', when='@master')
depends_on('automake', type='build', when='@master')
depends_on('libtool', type='build', when='@master')
depends_on('m4', type='build', when='@master')
| Python | 0 | |
a568663ebcf8b45a801df2cf2185dd3e7c969a79 | Fix fragile command description | vint/linting/policy/prohibit_command_rely_on_user.py | vint/linting/policy/prohibit_command_rely_on_user.py | import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy.reference.googlevimscriptstyleguide import get_reference_source
from vint.linting.policy_loader import register_policy
PROHIBITED_COMMAND_PATTERN = re.compile(r'norm(al)?\s|'
r's(u(bstitute)?)?/')
@register_policy
class ProhibitCommandRelyOnUser(AbstractPolicy):
def __init__(self):
super(ProhibitCommandRelyOnUser, self).__init__()
self.description = 'Avoid commands that rely on user settings'
self.reference = get_reference_source('FRAGILE')
self.level = Level.WARNING
def listen_node_types(self):
return [NodeType.EXCMD]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibit following commands:
- normal without !
- substitute
"""
command = node['str']
is_command_not_prohibited = PROHIBITED_COMMAND_PATTERN.search(command) is None
return is_command_not_prohibited
| import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy.reference.googlevimscriptstyleguide import get_reference_source
from vint.linting.policy_loader import register_policy
PROHIBITED_COMMAND_PATTERN = re.compile(r'norm(al)?\s|'
r's(u(bstitute)?)?/')
@register_policy
class ProhibitCommandRelyOnUser(AbstractPolicy):
def __init__(self):
super(ProhibitCommandRelyOnUser, self).__init__()
self.description = 'Prefer single quoted strings'
self.reference = get_reference_source('FRAGILE')
self.level = Level.WARNING
def listen_node_types(self):
return [NodeType.EXCMD]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibit following commands:
- normal without !
- substitute
"""
command = node['str']
is_command_not_prohibited = PROHIBITED_COMMAND_PATTERN.search(command) is None
return is_command_not_prohibited
| Python | 0.004767 |
b55277497559fad19f790ba8821f02ff2ce20c91 | add a minimal smoke test of multi-run | bluesky/tests/test_multi_runs.py | bluesky/tests/test_multi_runs.py | from bluesky import preprocessors as bpp
from bluesky import plans as bp
from bluesky import plan_stubs as bps
from bluesky.preprocessors import define_run_wrapper as drw
from ophyd.sim import motor, det
from bluesky.tests.utils import DocCollector
def test_multirun_smoke(RE, hw):
dc = DocCollector()
RE.subscribe(dc.insert)
def interlaced_plan(dets, motor):
to_read = (motor, *dets)
run_ids = list("abc")
for rid in run_ids:
yield from drw(bps.open_run(md={rid: rid}), run_id=rid)
for j in range(5):
for i, rid in enumerate(run_ids):
yield from bps.mov(motor, j + 0.1 * i)
yield from drw(bps.trigger_and_read(to_read), run_id=rid)
for rid in run_ids:
yield from drw(bps.close_run(), run_id=rid)
RE(interlaced_plan([hw.det], hw.motor))
assert len(dc.start) == 3
for start in dc.start:
desc, = dc.descriptor[start["uid"]]
assert len(dc.event[desc["uid"]]) == 5
for stop in dc.stop.values():
for start in dc.start:
assert start["time"] < stop["time"]
| Python | 0 | |
f31b11b2cf1f6924c4373fbfaf4b911102272876 | add base serializer | cla_backend/apps/complaints/serializers.py | cla_backend/apps/complaints/serializers.py | # -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import Category, Complaint
class CategorySerializerBase(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name')
class ComplaintSerializerBase(serializers.ModelSerializer):
category = CategorySerializerBase()
class Meta:
model = Complaint
| Python | 0 | |
9d348cba1c800a4de9a0078ded1e03540256f8a6 | Add backwards-compatible registration.urls, but have it warn pending deprecation. | registration/urls.py | registration/urls.py | import warnings
warnings.warn("Using include('registration.urls') is deprecated; use include('registration.backends.default.urls') instead",
PendingDeprecationWarning)
from registration.backends.default.urls import *
| Python | 0 | |
d028db776b92c4d968434a64b2c5d7e02867b32e | Create db_init.py | db_init.py | db_init.py | from sqlalchemy import create_engine, Column, Integer, String, Sequence, update
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///passwords.db')
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class Locker(Base):
__tablename__ = 'locker'
id = Column(Integer, Sequence('website_id_seq'), primary_key=True)
url = Column(String(60))
user = Column(String(60))
password = Column(String(60))
def __repr__(self):
return "<Website(url={}, user={}, password={}>".format(url,user,password)
Base.metadata.create_all(engine)
| Python | 0.000009 | |
e7fa141bc8fade9c6a34c0bbe95df9a77eb95e0e | Update __init__.py | tendrl/commons/objects/disk/__init__.py | tendrl/commons/objects/disk/__init__.py | from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons import objects
class Disk(objects.BaseObject):
def __init__(self, disk_id=None, device_name=None, disk_kernel_name=None,
parent_id=None, parent_name=None, disk_type=None, fsuuid=None,
mount_point=None, model=None, vendor=None, used=None,
serial_no=None, rmversion=None, fstype=None, ssd=None,
size=None, device_number=None, driver=None, group=None,
device=None, bios_id=None, state=None, driver_status=None,
label=None, req_queue_size=None,
mode=None, owner=None, min_io_size=None,
major_to_minor_no=None, device_files=None, sysfs_busid=None,
alignment=None, read_only=None, read_ahead=None,
removable_device=None, scheduler_name=None, sysfs_id=None,
sysfs_device_link=None, geo_bios_edd=None,
geo_bios_legacy=None, geo_logical=None, phy_sector_size=None,
discard_granularity=None, discard_align_offset=None,
discard_max_bytes=None, discard_zeros_data=None,
optimal_io_size=None, log_sector_size=None, drive_status=None,
driver_modules=None, *args, **kwargs):
super(Disk, self).__init__(*args, **kwargs)
self.value = 'nodes/%s/Disks/all/%s'
self.disk_id = disk_id
self.device_name = device_name
self.disk_kernel_name = disk_kernel_name
self.parent_id = parent_id
self.parent_name = parent_name
self.disk_type = disk_type
self.fsuuid = fsuuid
self.mount_point = mount_point
self.model = model
self.vendor = vendor
self.used = used
self.serial_no = serial_no
self.rmversion = rmversion
self.fstype = fstype
self.ssd = ssd
self.size = size
self.device_number = device_number
self.driver = driver
self.drive_status = drive_status
self.group = group
self.device = device
self.bios_id = bios_id
self.state = state
self.driver_status = driver_status
self.label = label
self.req_queue_size = req_queue_size
self.mode = mode
self.owner = owner
self.min_io_size = min_io_size
self.major_to_minor_no = major_to_minor_no
self.device_files = device_files
self.sysfs_busid = sysfs_busid
self.alignment = alignment
self.read_only = read_only
self.read_ahead = read_ahead
self.removable_device = removable_device
self.scheduler_name = scheduler_name
self.sysfs_id = sysfs_id
self.sysfs_device_link = sysfs_device_link
self.geo_bios_edd = geo_bios_edd
self.geo_bios_legacy = geo_bios_legacy
self.geo_logical = geo_logical
self.phy_sector_size = phy_sector_size
self.discard_granularity = discard_granularity
self.discard_align_offset = discard_align_offset
self.discard_max_bytes = discard_max_bytes
self.discard_zeros_data = discard_zeros_data
self.optimal_io_size = optimal_io_size
self.log_sector_size = log_sector_size
self.driver_modules = driver_modules
self._etcd_cls = _DiskEtcd
class _DiskEtcd(EtcdObj):
"""A table of the service, lazily updated
"""
__name__ = 'nodes/%s/Disks/all/%s'
_tendrl_cls = Disk
def render(self):
self.__name__ = self.__name__ % (
NS.node_context.node_id, self.disk_id
)
return super(_DiskEtcd, self).render()
| from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons import objects
class Disk(objects.BaseObject):
def __init__(self, disk_id=None, device_name=None, disk_kernel_name=None,
parent_id=None, parent_name=None, disk_type=None, fsuuid=None,
mount_point=None, model=None, vendor=None, used=None,
serial_no=None, rmversion=None, fstype=None, ssd=None,
size=None, device_number=None, driver=None, group=None,
device=None, bios_id=None, state=None, driver_status=None,
label=None, req_queue_size=None,
mode=None, owner=None, min_io_size=None,
major_to_minor_no=None, device_files=None, sysfs_busid=None,
alignment=None, read_only=None, read_ahead=None,
removable_device=None, scheduler_name=None, sysfs_id=None,
sysfs_device_link=None, geo_bios_edd=None,
geo_bios_legacy=None, geo_logical=None, phy_sector_size=None,
discard_granularity=None, discard_align_offset=None,
discard_max_bytes=None, discard_zeros_data=None,
optimal_io_size=None, log_sector_size=None, drive_status=None,
driver_modules=None, *args, **kwargs):
super(Disk, self).__init__(*args, **kwargs)
self.value = 'nodes/%s/Disks/%s'
self.disk_id = disk_id
self.device_name = device_name
self.disk_kernel_name = disk_kernel_name
self.parent_id = parent_id
self.parent_name = parent_name
self.disk_type = disk_type
self.fsuuid = fsuuid
self.mount_point = mount_point
self.model = model
self.vendor = vendor
self.used = used
self.serial_no = serial_no
self.rmversion = rmversion
self.fstype = fstype
self.ssd = ssd
self.size = size
self.device_number = device_number
self.driver = driver
self.drive_status = drive_status
self.group = group
self.device = device
self.bios_id = bios_id
self.state = state
self.driver_status = driver_status
self.label = label
self.req_queue_size = req_queue_size
self.mode = mode
self.owner = owner
self.min_io_size = min_io_size
self.major_to_minor_no = major_to_minor_no
self.device_files = device_files
self.sysfs_busid = sysfs_busid
self.alignment = alignment
self.read_only = read_only
self.read_ahead = read_ahead
self.removable_device = removable_device
self.scheduler_name = scheduler_name
self.sysfs_id = sysfs_id
self.sysfs_device_link = sysfs_device_link
self.geo_bios_edd = geo_bios_edd
self.geo_bios_legacy = geo_bios_legacy
self.geo_logical = geo_logical
self.phy_sector_size = phy_sector_size
self.discard_granularity = discard_granularity
self.discard_align_offset = discard_align_offset
self.discard_max_bytes = discard_max_bytes
self.discard_zeros_data = discard_zeros_data
self.optimal_io_size = optimal_io_size
self.log_sector_size = log_sector_size
self.driver_modules = driver_modules
self._etcd_cls = _DiskEtcd
class _DiskEtcd(EtcdObj):
"""A table of the service, lazily updated
"""
__name__ = 'nodes/%s/Disks/%s'
_tendrl_cls = Disk
def render(self):
self.__name__ = self.__name__ % (
NS.node_context.node_id, self.disk_id
)
return super(_DiskEtcd, self).render()
| Python | 0.000072 |
b40512e834e88f24c20885cddb220188fce11339 | Add verbose names to UserProfile fields. | accounts/migrations/0004_auto_20150227_2347.py | accounts/migrations/0004_auto_20150227_2347.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20150227_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email_on_comment_answer',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='email_on_message',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),
preserve_default=True,
),
]
| Python | 0.000001 | |
18baab37c3f1924b104f4ef86224c1b197ef1dad | add problem 054 | problem_054.py | problem_054.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
'''
import timeit
class Poker:
def __init__(self, cards):
self.numbers = {}
self.suits = {}
for card in cards:
n = self._to_number(card[0])
s = card[1]
self.numbers[n] = self.numbers.get(n, 0)+1
self.suits[s] = self.suits.get(s, 0)+1
def hand(self):
n_max, n_min, n_len = max(self.numbers), min(self.numbers), len(self.numbers)
sames = max(self.numbers.values())
s_len = len(self.suits)
n_diff = n_max-n_min
if n_len == 5:
if n_diff > 4:
if s_len == 1: return 5 # flush
else: return 0 # high card
elif s_len > 1: return 4 # straight
elif n_min == 10: return 9 # royal straight flush
else: return 8 # straight flush
elif n_len == 4: return 1 # one pair
elif n_len == 3:
if sames == 3: return 3 # three cards
else: return 2 # two pair
elif n_len == 2:
if sames == 4: return 7 # four cards
else: return 6 # full house
def rank(self):
s = ''
for k,v in sorted(self.numbers.items(), key=lambda (k, v): (v, k), reverse=True):
s += "{0:0>2}".format(str(k))*v
return s
def _to_number(self, s):
s = str(s).replace('T', '10').replace('J', '11')\
.replace('Q', '12').replace('K', '13').replace('A', '14')
return int(s)
def calc():
wins = [0]*3
for line in open('data/problem_054.txt', 'r').readlines():
cards = line.split(' ')
p1 = Poker([card.rstrip() for card in cards[:5]])
p2 = Poker([card.rstrip() for card in cards[5:]])
if p1.hand() > p2.hand(): wins[0] += 1
elif p1.hand() < p2.hand(): wins[2] += 1
else:
if p1.rank() > p2.rank(): wins[0] += 1
elif p1.rank() < p2.rank(): wins[2] += 1
else: wins[1] += 1
return wins
if __name__ == '__main__':
print calc()
# print timeit.Timer('problem_030.calc(5)', 'import problem_030').timeit(1)
| Python | 0.000847 | |
e21d6d88f49dbdeb2dfb96e68f174ba587eaa27a | Add pre-deploy version match | pre-deploy.py | pre-deploy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pre-deploy.py
Created by Stephan Hügel on 2017-06-06
A simple check to ensure that the tag version and the library version coincide
Intended to be called before a Wheel is written using "upload"
"""
import os
import sys
import subprocess
import re
import io
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def check():
git_version = subprocess.check_output(
["git", "describe", "--abbrev=0", "--tags"]
).strip()
library_version = unicode("v" + find_version("pyzotero/zotero.py")).strip()
return library_version == git_version
if __name__ == '__main__':
if check():
sys.exit(1)
else:
sys.exit(0)
| Python | 0 | |
bc1c65315fe22146b2d9a0955acc6e286b069657 | Add problem 48 | problem_48.py | problem_48.py | '''
Problem 48
@author: Kevin Ji
'''
def self_power_with_mod(number, mod):
product = 1
for _ in range(number):
product *= number
product %= mod
return product
MOD = 10000000000
number = 0
for power in range(1, 1000 + 1):
number += self_power_with_mod(power, MOD)
number %= MOD
print(number)
| Python | 0.02481 | |
07d89159ad53b404d39b56f133ca19209da98eaa | Create mostrans-trolleybus.py | mostrans-trolleybus.py | mostrans-trolleybus.py | #!/usr/bin/python
# -*- coding: utf8 -*-
import os
import psycopg2
import time
import config
import argparse
def download_osm_dump():
if not os.path.exists('osm'):
os.makedirs('osm')
os.system('wget --timestamping https://s3.amazonaws.com/metro-extracts.mapzen.com/moscow_russia.osm.pbf')
def filter_osm_dump():
import json
import pprint
pp=pprint.PrettyPrinter(indent=2)
refs=[]
print 'Filter step 1'
cmd='''
~/osmosis/bin/osmosis \
-q \
--read-pbf moscow_russia.osm.pbf \
--tf accept-relations route=trolleybus \
--used-way --used-node \
--write-pbf routes.osm.pbf
'''
os.system(cmd)
print 'Filter step 3'
cmd='''
~/osmosis/bin/osmosis \
-q \
--read-pbf routes.osm.pbf \
--tf accept-relations "type=route" \
--used-way --used-node \
--write-pbf routesFinal.osm.pbf
'''
os.system(cmd)
def argparser_prepare():
class PrettyFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
max_help_position = 35
parser = argparse.ArgumentParser(description='',
formatter_class=PrettyFormatter)
parser.add_argument('--download', dest='download', action='store_true')
parser.add_argument('--no-download', dest='download', action='store_false')
parser.set_defaults(download=False)
parser.epilog = \
'''Samples:
%(prog)s --download
%(prog)s --no-download
''' \
% {'prog': parser.prog}
return parser
def cleardb(host,dbname,user,password):
ConnectionString="dbname=" + dbname + " user="+ user + " host=" + host + " password=" + password
try:
conn = psycopg2.connect(ConnectionString)
except:
print 'I am unable to connect to the database '
print ConnectionString
return 0
cur = conn.cursor()
sql ='''
DROP TABLE IF EXISTS planet_osm_line CASCADE;
DROP TABLE IF EXISTS planet_osm_nodes CASCADE;
DROP TABLE IF EXISTS planet_osm_point CASCADE;
DROP TABLE IF EXISTS planet_osm_polygon CASCADE;
DROP TABLE IF EXISTS planet_osm_rels CASCADE;
DROP TABLE IF EXISTS planet_osm_roads CASCADE;
DROP TABLE IF EXISTS planet_osm_ways CASCADE;
DROP TABLE IF EXISTS route_line_labels CASCADE;
--TRUNCATE TABLE routes_with_refs CASCADE;
DROP TABLE IF EXISTS terminals CASCADE;
--TRUNCATE TABLE terminals_export CASCADE;
'''
cur.execute(sql)
conn.commit()
print ('Database wiped')
def importdb(host,dbname,user,password):
os.system('''
osm2pgsql --create --slim -E 3857 --cache-strategy sparse --cache 100 --database '''+dbname+''' --username '''+user+''' routesFinal.osm.pbf
''')
def filter_routes(host,dbname,user,password):
ConnectionString="dbname=" + dbname + " user="+ user + " host=" + host + " password=" + password
try:
conn = psycopg2.connect(ConnectionString)
except:
print 'I am unable to connect to the database '
print ConnectionString
return 0
cur = conn.cursor()
cmd='''
ogr2ogr -overwrite \
"PG:host='''+host+''' dbname='''+dbname+''' user='''+user+''' password='''+password+'''" -nln red_zone \
cfg/mostrans-bus_red_zone.geojson -t_srs EPSG:3857
'''
print cmd
os.system(cmd)
#выбираем веи, которые касаются красной зоны
sql='''
SELECT l.osm_id
FROM planet_osm_line l, red_zone
WHERE ST_Intersects(l.way , red_zone.wkb_geometry);'''
cur.execute(sql)
WaysInRedZone=[]
rows = cur.fetchall()
for row in rows:
WaysInRedZone.append(str(row[0]))
#удаляем релейшены, если в них есть веи, касающиеся красной зоны
sql='''DELETE FROM planet_osm_rels WHERE members::VARCHAR LIKE CONCAT('%w',''' + str(row[0])+''','%') '''
print sql
cur.execute(sql)
conn.commit()
#Удаление всех линий в красной зоне
sql='''DELETE FROM planet_osm_line l
USING red_zone
WHERE ST_Intersects(l.way , red_zone.wkb_geometry); '''
cur.execute(sql)
conn.commit()
#Удаление всех маршрутов с пустым ref
sql='''DELETE from planet_osm_rels WHERE tags::VARCHAR NOT LIKE CONCAT('%ref,%') '''
cur.execute(sql)
conn.commit()
#Удаление всех веев, по которым не проходит маршрутов
def process(host,dbname,user,password):
cmd='''python osmot/osmot.py -hs localhost -d '''+dbname+''' -u '''+user+''' -p '''+password+'''
'''
print cmd
os.system(cmd)
def postgis2geojson(host,dbname,user,password,table):
if os.path.exists(table+'.geojson'):
os.remove(table+'.geojson')
cmd='''
ogr2ogr -f GeoJSON '''+table+'''.geojson \
"PG:host='''+host+''' dbname='''+dbname+''' user='''+user+''' password='''+password+'''" "'''+table+'''"
'''
print cmd
os.system(cmd)
if __name__ == '__main__':
host=config.host
dbname=config.dbname
user=config.user
password=config.password
parser = argparser_prepare()
args = parser.parse_args()
import time
now = time.strftime("%c")
print ("Current time %s" % now )
is_download = args.download
if is_download == True:
print "downloading"
download_osm_dump()
filter_osm_dump()
os.system('export PGPASS='+password)
cleardb(host,dbname,user,password)
importdb(host,dbname,user,password)
filter_routes(host,dbname,user,password)
process(host,dbname,user,password)
postgis2geojson(host,dbname,user,password,'terminals_export')
postgis2geojson(host,dbname,user,password,'routes_with_refs')
os.system('python update_ngw_from_geojson.py --ngw_url '+config.ngw_url+' --ngw_resource_id 686 --ngw_login '+config.ngw_login+' --ngw_password '+config.ngw_password+' --check_field road_id --filename routes_with_refs.geojson')
os.system('python update_ngw_from_geojson.py --ngw_url '+config.ngw_url+' --ngw_resource_id 688 --ngw_login '+config.ngw_login+' --ngw_password '+config.ngw_password+' --check_field terminal_id --filename terminals_export.geojson')
| Python | 0.0001 | |
3e380228088a096ed20ee80f9928b368707e47ed | test parameters are fetched and placed correctly | tests/StarCatalog/test_HIPfromSimbad.py | tests/StarCatalog/test_HIPfromSimbad.py | import unittest
from tests.TestSupport.Utilities import RedirectStreams
import EXOSIMS.StarCatalog
from EXOSIMS.StarCatalog.HIPfromSimbad import HIPfromSimbad
from EXOSIMS.util.get_module import get_module
import os, sys
import pkgutil
from io import StringIO
import astropy.units as u
from EXOSIMS.util.get_dirs import get_downloads_dir
import shutil
import csv
import numpy as np
from astropy.coordinates import SkyCoord
from astroquery.exceptions import TableParseError
class TestHIPfromSimbad(unittest.TestCase):
"""
Sonny Rappaport, July 2021, Cornell
This class tests HIPfromSimbad.
"""
def setUp(self):
"""
Set up HIPfromSimbad modules via both a text file and a list, with the
particular stars being used arbitrarily.
"""
#list of HIP numbers to be used, chosen (arbitrarily) from min to max
hip_list = [37279,97649,32349]
self.list_fixture = HIPfromSimbad(catalogpath=hip_list)
path = 'tests/TestSupport/test-scripts/HIPfromSimbadTestText.txt'
self.text_fixture = HIPfromSimbad(catalogpath=path)
def test_init(self):
"""
Test of initialization and __init__.
Test method: Manually place data from the Simbad database into an CSV
file (called "HIPFromSimbaadTestCSV", placed in the test-scripts folder)
and load this into python. Check that HIPfromSimbad correctly gathers
this information using astroquery via numpy tests.
Note: Some data isn't taken from the Simbad database. They are noted
with comments.
Check both loading filenames from a text file (called
"HIPfromSimbadinput.txt" and from a list of identification HIP #'s.)
"""
#nickname for the overall object
HIP_list = self.list_fixture
HIP_text = self.text_fixture
#same data from before, just in CSV format for comparison
path = 'tests/TestSupport/test-scripts/HIPFromSimbadTestCSV.csv'
expected = np.genfromtxt(path, delimiter=',', names=True, dtype= None,
encoding=None)
#prepare expected lists
expected_names = np.array(expected['name'].astype('str'))
#not compared with HIPfromSimbad- just used to compute distances
expected_parallax = expected['parallax'].astype('float')*u.mas
expected_distance = expected_parallax.to('pc',
equivalencies=u.parallax())
expected_coord_str = expected['COORDS'].astype('str')
expected_coords = SkyCoord(expected_coord_str,
unit=(u.hourangle, u.deg,u.arcsec))
expected_spec = expected['spec'].astype('str')
#note- the vmag here isn't the visual magnitude (Vmag) found in simbad.
#instead, as per the code's specifications, it's the HPmag, the median
#magnitude. this is following how the code works in HIPfromSimbad.
#this is taken from the Gaia catalog, hipparcos_newreduction.
expected_vmag = expected['vmag'].astype('float')
expected_hmag = expected['hmag'].astype('float')
expected_imag = expected['imag'].astype('float')
expected_bmag = expected['bmag'].astype('float')
expected_kmag = expected['kmag'].astype('float')
#this BV value is taken from the Gaia catalog, hippercos_newreduction.
expected_BV = expected['BV'].astype('float')
expected_MV = expected_vmag - 5.*(np.log10(expected_distance.value) - 1.)
np.testing.assert_equal(expected_names,HIP_list.Name)
np.testing.assert_equal(expected_names,HIP_text.Name)
#prepare skycoord arrays for testing.
exp_coords_array = []
list_coords_array = []
text_coords_array = []
for i in range(len(expected_coords)):
exp_coords_array.append([expected_coords[i].ra.degree,
expected_coords[i].dec.degree])
list_coords_array.append([HIP_list.coords[i].ra.degree,
HIP_list.coords[i].dec.degree])
text_coords_array.append([HIP_list.coords[i].ra.degree,
HIP_list.coords[i].dec.degree])
#test skycoords RA and DEC
np.testing.assert_allclose(exp_coords_array, list_coords_array)
np.testing.assert_allclose(exp_coords_array, text_coords_array)
#test spectra ID. assume we are ignoring the "C" part of classifications
np.testing.assert_equal(expected_spec, HIP_list.Spec)
np.testing.assert_equal(expected_spec, HIP_text.Spec)
#test distance
np.testing.assert_allclose(expected_distance,HIP_list.dist)
np.testing.assert_allclose(expected_distance,HIP_text.dist)
#test vmag
np.testing.assert_allclose(expected_vmag,HIP_list.Vmag)
np.testing.assert_allclose(expected_vmag,HIP_text.Vmag)
#test hmag
np.testing.assert_allclose(expected_hmag,HIP_list.Hmag)
np.testing.assert_allclose(expected_hmag,HIP_text.Hmag)
#test imag
np.testing.assert_allclose(expected_imag,HIP_list.Imag)
np.testing.assert_allclose(expected_imag,HIP_text.Imag)
#test bmag
np.testing.assert_allclose(expected_bmag,HIP_list.Bmag)
np.testing.assert_allclose(expected_bmag,HIP_text.Bmag)
#test kmag
np.testing.assert_allclose(expected_kmag,HIP_list.Kmag)
np.testing.assert_allclose(expected_kmag,HIP_text.Kmag)
#test bv
np.testing.assert_allclose(expected_BV,HIP_list.BV)
np.testing.assert_allclose(expected_BV,HIP_text.BV)
#test mv
np.testing.assert_allclose(expected_MV,HIP_list.MV)
np.testing.assert_allclose(expected_MV,HIP_text.MV)
#luminosity seems incomplete. nevertheless, make sure it's an empty
#array of the appropriate length.
#just take the length of one of the parameter matrices
empty_list = np.empty(expected_bmag.size)
empty_list[:] = np.nan
np.testing.assert_equal(empty_list,HIP_list.L)
np.testing.assert_equal(empty_list,HIP_list.L)
| Python | 0 | |
008625fef55f8f58ab80b883d34ae5d40e55c721 | Add initial test for binheap | test_binheap.py | test_binheap.py | import pytest
from binheap import Binheap
def test_init_bh():
b = Binheap()
assert b.binlist is []
c = Binheap([1, 2])
assert c.binlist == [1, 2]
| Python | 0.000001 | |
d43c67a59dcf6c43667d633df8b6f8a3eb84d611 | add HelloKhalaClient2.py | examples/testClient/HelloKhalaClient2.py | examples/testClient/HelloKhalaClient2.py | #moss's HelloKhala Client
#add time type
import socket
import struct
import json
def login():
send = {'type': 'login'}
return send
def logout():
send = {'type': 'logout'}
return send
def devType():
send = {'type': 'dev'}
return send
def isLogin():
send = {'type': 'isLogin'}
return send
def nodeId():
send = {'type': 'nodeId'}
return send
def time():
send = {'type':'time'}
return send
def default():
return -1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 2007))
operator = {'login':login,'logout':logout,'devType':devType,'isLogin':isLogin,'nodeId':nodeId,'time':time}
while True:
input = raw_input('[input cmd]:')
sendStr = operator.get(input,default)()
if sendStr == -1:
print 'err type:',input,'please input again!'
continue
strjson = json.dumps(sendStr)
print '[send msg]:',strjson
inputLen = len(strjson)
pstr = '>I'+ str(inputLen)+'s'
bytes = struct.pack(pstr, inputLen,strjson)
s.send(bytes)
d = s.recv(1024)
if len(d) == 0:
print 'exit'
break
print '[rev msg]:',d
print ''
if d == 'logout success!':
print 'exit'
break
s.close()
| Python | 0.000001 | |
80e80bff7603e852710df6c9de613b1781877b2d | Test case for two classes with the same name in one module. | tests/python/typeinference/same_name.py | tests/python/typeinference/same_name.py | class A(object):
def method(self):
return 1
A().method() ## type int
class A(object):
def method(self):
return "test"
A().method() ## type str
| Python | 0 | |
4887a269a28656c288461165078943f99e2390be | add settings template for ansible later | ansible/crates_settings.py | ansible/crates_settings.py | from base_settings import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zhj_+x#q-&vqh7&)7a3it@tcsf50@fh9$3&&j0*4pmt1x=ye+1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['.']
# where will nginx look for static files for production?
# collect all static files by running ./manage.py collectstatic
STATIC_URL = '/static/'
STATIC_ROOT = '{{crates_dir}}'
CAS_DIRECTORY = abspath('{{cas_dir}}')
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# http://wiki.nginx.org/XSendfile
# Faster serving of CAS files. Backed by nginx using Django to authenticate the
# request.
X_SENDFILE = True
| Python | 0 | |
4143f5381b8ff47a80a550065e831c306551cd77 | solve problem 035 | python/035.py | python/035.py |
def base10_to_base2( n ):
base2n = 0
if n == 0:
return 0
return base10_to_base2( n/2 ) * 10 + n % 2
def palindromes( s ):
flag = True
str_len = len(s)
half_len = str_len / 2
for i in range( 0, half_len+1 ):
if s[i] != s[str_len-i-1]:
flag = False
break
return flag
def solve_35():
sum = 0
for i in range( 1, 1000001 ):
if palindromes( str(i) ):
#print i
base2n = base10_to_base2( i )
if palindromes( str(base2n) ):
sum = sum + i
print i
print sum
solve_35()
| Python | 0.000285 | |
a5a7d6c3097571a9ef050a75127a2eb24ad2746c | Remove test code. | packs/alertlogic/actions/scan_list_scan_executions.py | packs/alertlogic/actions/scan_list_scan_executions.py | #!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import requests
import json
import os
import yaml
from getpass import getpass
from st2actions.runners.pythonrunner import Action
from lib import GetScanList
from lib import GetScanExecutions
class ListScanExecutions(Action):
def run(self, scan_title, customer_id=None):
"""
The template class for
Returns: An blank Dict.
Raises:
ValueError: On lack of key in config.
"""
# Set up the results
results = {}
# ChatOps is not passing None, so catch 0...
if customer_id == 0:
customer_id = None
scans = GetScanList(self.config, customer_id)
return GetScanExecutions(self.config, scans[scan_title]['id'])
| #!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import requests
import json
import os
import yaml
from getpass import getpass
from st2actions.runners.pythonrunner import Action
from lib import GetScanList
from lib import GetScanExecutions
class ListScanExecutions(Action):
def run(self, scan_title, customer_id=None):
"""
The template class for
Returns: An blank Dict.
Raises:
ValueError: On lack of key in config.
"""
# Set up the results
results = {}
# ChatOps is not passing None, so catch 0...
if customer_id == 0:
customer_id = None
scans = GetScanList(self.config, customer_id)
return GetScanExecutions(self.config, scans[scan_title]['id'])
if __name__ == '__main__':
config_file = "/home/jjm/src/our-configs/alertlogic.yaml"
with open(config_file) as f:
config = yaml.safe_load(f)
action = ListScanExecutions(config)
ScanId = action.run(scan_title="ACI - RDG3 - Martin")
print(json.dumps( ScanId,
sort_keys=True, indent=2))
| Python | 0.000001 |
675b76f2bc36d7ce97d2e7227582597067be16bd | fix list problem. | crawler/git_crawler.py | crawler/git_crawler.py | # -*- coding: utf-8 -*-
from datetime import datetime
import envoy
from allmychanges.utils import cd, get_package_metadata
def git_clone(repo_path, path):
"""Clone git repo from repo_path to local path"""
r = envoy.run('git clone {repo} {path}'.format(repo=repo_path, path=path))
if r.status_code != 0 and r.std_err != '':
return False
return True
def git_log_hash(path):
"""Return list of tuples ('hash', 'date', 'commit message')"""
splitter = '-----======!!!!!!======-----'
ins = '--!!==!!--'
with cd(path):
r = envoy.run('git log --pretty=format:"%H%n{ins}%n%ai%n{ins}%n%B%n{splitter}"'.format(ins=ins, splitter=splitter))
lst = []
for group in r.std_out.split(splitter)[:-1]:
_hash, date, msg = group.strip().split(ins)
lst.append((_hash.strip(), date.strip(), msg.strip()))
return list(reversed(lst))
def git_checkout(path, revision_hash):
with cd(path):
r = envoy.run('git checkout {revision}'.format(revision=revision_hash))
if r.status_code == 0:
return True
return False
def aggregate_git_log(path):
"""Return versions and commits in standard format"""
versions = list()
current_version, current_commits = None, list()
for rev_hash, date, msg in git_log_hash(path):
current_commits.append(msg)
if git_checkout(path=path, revision_hash=rev_hash):
version = get_package_metadata(path=path, field_name='Version')
if version != current_version:
# memorize it
versions.insert(0,
dict(version=version,
date=datetime.strptime(date.rsplit(' ', 1)[0], '%Y-%m-%d %H:%M:%S'),
sections=[dict(notes='',
items=list(reversed(current_commits)))]))
current_version, current_commits = version, list()
if current_commits:
versions.insert(0,
dict(version='newest',
date=None,
sections=[dict(notes='',
items=list(reversed(current_commits)))]))
return versions
| # -*- coding: utf-8 -*-
from datetime import datetime
import envoy
from allmychanges.utils import cd, get_package_metadata
def git_clone(repo_path, path):
"""Clone git repo from repo_path to local path"""
r = envoy.run('git clone {repo} {path}'.format(repo=repo_path, path=path))
if r.status_code != 0 and r.std_err != '':
return False
return True
def git_log_hash(path):
"""Return list of tuples ('hash', 'date', 'commit message')"""
splitter = '-----======!!!!!!======-----'
ins = '--!!==!!--'
with cd(path):
r = envoy.run('git log --pretty=format:"%H%n{ins}%n%ai%n{ins}%n%B%n{splitter}"'.format(ins=ins, splitter=splitter))
lst = []
for group in r.std_out.split(splitter)[:-1]:
_hash, date, msg = group.strip().split(ins)
lst.append((_hash.strip(), date.strip(), msg.strip()))
return reversed(lst)
def git_checkout(path, revision_hash):
with cd(path):
r = envoy.run('git checkout {revision}'.format(revision=revision_hash))
if r.status_code == 0:
return True
return False
def aggregate_git_log(path):
"""Return versions and commits in standard format"""
versions = list()
current_version, current_commits = None, list()
for rev_hash, date, msg in git_log_hash(path):
current_commits.append(msg)
if git_checkout(path=path, revision_hash=rev_hash):
version = get_package_metadata(path=path, field_name='Version')
if version != current_version:
# memorize it
versions.insert(0,
dict(version=version,
date=datetime.strptime(date.rsplit(' ', 1)[0], '%Y-%m-%d %H:%M:%S'),
sections=dict(notes='',
items=reversed(current_commits))))
current_version, current_commits = version, list()
if current_commits:
versions.insert(0,
dict(version='newest',
date=None,
sections=dict(notes='',
items=reversed(current_commits))))
return versions
| Python | 0.000003 |
7ecec2d2b516d9ae22a3a0f652424045d547d811 | Put object_tools in the correct order in settings | test_settings.py | test_settings.py | DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = '123'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'object_tools',
'django.contrib.admin',
'object_tools.tests'
]
ROOT_URLCONF = 'object_tools.tests.urls'
STATIC_URL = '/static/'
| DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = '123'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'object_tools',
'object_tools.tests',
]
ROOT_URLCONF = 'object_tools.tests.urls'
STATIC_URL = '/static/'
| Python | 0.000037 |
35296b1c87a86a87fbcf317e26a497fc91c287c7 | Update receiver to catch value error | lexos/receivers/kmeans_receiver.py | lexos/receivers/kmeans_receiver.py | from typing import NamedTuple
from lexos.models.filemanager_model import FileManagerModel
from lexos.receivers.base_receiver import BaseReceiver
class KMeansOption(NamedTuple):
"""The typed tuple to hold kmeans options."""
n_init: int # number of iterations with different centroids.
k_value: int # k value-for k-means analysis. (k groups)
max_iter: int # maximum number of iterations.
tolerance: float # relative tolerance, inertia to declare convergence.
init_method: str # method of initialization: "K++" or "random".
class KMeansReceiver(BaseReceiver):
def options_from_front_end(self) -> KMeansOption:
"""Get the K-means option from front end.
:return: a KmeansOption object to hold all the options.
"""
n_init = int(self._front_end_data['n_init'])
max_iter = int(self._front_end_data['max_iter'])
tolerance = float(self._front_end_data['tolerance'])
init_method = self._front_end_data['init']
# Check if no input from front-end, use the default k value.
try:
k_value = int(self._front_end_data['nclusters'])
except ValueError:
k_value = int(len(FileManagerModel().load_file_manager().
get_active_files()) / 2)
return KMeansOption(n_init=n_init,
k_value=k_value,
max_iter=max_iter,
tolerance=tolerance,
init_method=init_method)
| from typing import NamedTuple
from lexos.models.filemanager_model import FileManagerModel
from lexos.receivers.base_receiver import BaseReceiver
class KMeansOption(NamedTuple):
"""The typed tuple to hold kmeans options."""
n_init: int # number of iterations with different centroids.
k_value: int # k value-for k-means analysis. (k groups)
max_iter: int # maximum number of iterations.
tolerance: float # relative tolerance, inertia to declare convergence.
init_method: str # method of initialization: "K++" or "random".
class KMeansReceiver(BaseReceiver):
def options_from_front_end(self) -> KMeansOption:
"""Get the K-means option from front end.
:return: a KmeansOption object to hold all the options.
"""
n_init = int(self._front_end_data['n_init'])
k_value = int(self._front_end_data['nclusters'])
max_iter = int(self._front_end_data['max_iter'])
tolerance = float(self._front_end_data['tolerance'])
init_method = self._front_end_data['init']
# Check if no input, use the default k value.
if k_value == '':
k_value = int(len(FileManagerModel().load_file_manager().
get_active_files()) / 2)
return KMeansOption(n_init=n_init,
k_value=k_value,
max_iter=max_iter,
tolerance=tolerance,
init_method=init_method)
| Python | 0 |
b6f6eb362c8637839cbce4bd133895e73a695cc0 | Fix wrong args | cupy/cuda/compiler.py | cupy/cuda/compiler.py | import hashlib
import os
import re
import tempfile
import filelock
from pynvrtc import compiler
import six
from cupy.cuda import device
from cupy.cuda import function
_nvrtc_version = None
def _get_nvrtc_version():
global _nvrtc_version
if _nvrtc_version is None:
interface = compiler.NVRTCInterface()
_nvrtc_version = interface.nvrtcVersion()
return _nvrtc_version
def _get_arch():
cc = device.Device().compute_capability
return 'compute_%s' % cc
class TemporaryDirectory(object):
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is not None:
return
for name in os.listdir(self.path):
os.unlink(os.path.join(self.path, name))
os.rmdir(self.path)
def nvrtc(source, options=(), arch=None):
if not arch:
arch = _get_arch()
options += ('-arch={}'.format(arch),)
with TemporaryDirectory() as root_dir:
path = os.path.join(root_dir, 'kern')
cu_path = '%s.cu' % path
with open(cu_path, 'w') as cu_file:
cu_file.write(source)
prog = compiler.Program(
six.b(source), six.b(os.path.basename(cu_path)))
ptx = prog.compile([six.b(o) for o in options])
return six.b(ptx)
def preprocess(source, options=()):
pp_src = compiler.Program(six.b(source), six.b('')).compile()
if isinstance(pp_src, six.binary_type):
pp_src = pp_src.decode('utf-8')
return re.sub('(?m)^#.*$', '', pp_src)
_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
def get_cache_dir():
return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
_empty_file_preprocess_cache = {}
def compile_with_cache(source, options=(), arch=None, cache_dir=None):
global _empty_file_preprocess_cache
if cache_dir is None:
cache_dir = get_cache_dir()
if arch is None:
arch = _get_arch()
options += ('-ftz=true',)
env = (arch, options, _get_nvrtc_version())
if '#include' in source:
pp_src = '%s %s' % (env, preprocess(source, options))
else:
base = _empty_file_preprocess_cache.get(env, None)
if base is None:
base = _empty_file_preprocess_cache[env] = preprocess('', options)
pp_src = '%s %s %s' % (env, base, source)
if isinstance(pp_src, six.text_type):
pp_src = pp_src.encode('utf-8')
name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
mod = function.Module()
if not os.path.isdir(cache_dir):
try:
os.makedirs(cache_dir)
except OSError:
if not os.path.isdir(cache_dir):
raise
lock_path = os.path.join(cache_dir, 'lock_file.lock')
path = os.path.join(cache_dir, name)
with filelock.FileLock(lock_path) as lock:
if os.path.exists(path):
with open(path, 'rb') as file:
cubin = file.read()
else:
lock.release()
cubin = nvrtc(source, options, arch)
lock.acquire()
with open(path, 'wb') as cubin_file:
cubin_file.write(cubin)
mod.load(cubin)
return mod
| import hashlib
import os
import re
import tempfile
import filelock
from pynvrtc import compiler
import six
from cupy.cuda import device
from cupy.cuda import function
_nvrtc_version = None
def _get_nvrtc_version():
global _nvrtc_version
if _nvrtc_version is None:
interface = compiler.NVRTCInterface()
_nvrtc_version = interface.nvrtcVersion()
return _nvrtc_version
def _get_arch():
cc = device.Device().compute_capability
return 'compute_%s' % cc
class TemporaryDirectory(object):
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is not None:
return
for name in os.listdir(self.path):
os.unlink(os.path.join(self.path, name))
os.rmdir(self.path)
def nvrtc(source, options=(), arch=None):
if not arch:
arch = _get_arch()
options += ('-arch{}'.format(arch),)
with TemporaryDirectory() as root_dir:
path = os.path.join(root_dir, 'kern')
cu_path = '%s.cu' % path
with open(cu_path, 'w') as cu_file:
cu_file.write(source)
prog = compiler.Program(
six.b(source), six.b(os.path.basename(cu_path)))
ptx = prog.compile([six.b(o) for o in options])
return six.b(ptx)
def preprocess(source, options=()):
pp_src = compiler.Program(six.b(source), six.b('')).compile()
if isinstance(pp_src, six.binary_type):
pp_src = pp_src.decode('utf-8')
return re.sub('(?m)^#.*$', '', pp_src)
_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
def get_cache_dir():
return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
_empty_file_preprocess_cache = {}
def compile_with_cache(source, options=(), arch=None, cache_dir=None):
global _empty_file_preprocess_cache
if cache_dir is None:
cache_dir = get_cache_dir()
if arch is None:
arch = _get_arch()
options += ('-ftz=true',)
env = (arch, options, _get_nvrtc_version())
if '#include' in source:
pp_src = '%s %s' % (env, preprocess(source, options))
else:
base = _empty_file_preprocess_cache.get(env, None)
if base is None:
base = _empty_file_preprocess_cache[env] = preprocess('', options)
pp_src = '%s %s %s' % (env, base, source)
if isinstance(pp_src, six.text_type):
pp_src = pp_src.encode('utf-8')
name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
mod = function.Module()
if not os.path.isdir(cache_dir):
try:
os.makedirs(cache_dir)
except OSError:
if not os.path.isdir(cache_dir):
raise
lock_path = os.path.join(cache_dir, 'lock_file.lock')
path = os.path.join(cache_dir, name)
with filelock.FileLock(lock_path) as lock:
if os.path.exists(path):
with open(path, 'rb') as file:
cubin = file.read()
else:
lock.release()
cubin = nvrtc(source, options, arch)
lock.acquire()
with open(path, 'wb') as cubin_file:
cubin_file.write(cubin)
mod.load(cubin)
return mod
| Python | 0.999193 |
89d83b9ca8c1c52537aae0c5339b0cb5ae64c6c4 | Add additional test for template filters: for filter queries and filter with variable argument | tests/filters.py | tests/filters.py | """Test cases for variable fields
"""
import unittest
from lighty.templates import Template
from lighty.templates.filter import filter_manager
def simple_filter(value):
return str(value).upper()
filter_manager.register(simple_filter)
def argument_filter(value, arg):
return str(value) + ', ' + str(arg)
filter_manager.register(argument_filter)
def multiarg_filter(value, *args):
return ', '.join([str(arg) for arg in (value, ) + args])
filter_manager.register(multiarg_filter)
class TemplateFiltersTestCase(unittest.TestCase):
"""Test case for block template tag
"""
def assertResult(self, result, value):
assert result == value, 'Error template execution: %s' % ' '.join((
result, 'except', value))
def testSimpleFilter(self):
simple_template = Template(name='simple-filter.html')
simple_template.parse("{{ simple_var|simple_filter }}")
result = simple_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'HELLO')
def testArgFilter(self):
argument_template = Template(name='argument-filter.html')
argument_template.parse('{{ simple_var|argument_filter:"world" }}')
result = argument_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'Hello, world')
def testMultiargFilter(self):
multiarg_template = Template(name='multiarg-filter.html')
multiarg_template.parse(
'{{ simple_var|multiarg_filter:"John" "Peter" }}')
result = multiarg_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'Hello, John, Peter')
def testMultiFilter(self):
multifilter_template = Template(name='multifilter.html')
multifilter_template.parse(
'{{ simple_var|simple_filter|argument_filter:"world" }}')
result = multifilter_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'HELLO, world')
def testVaribaleArgFilter(self):
varargfilter_template = Template(name='vararg-filter.html')
varargfilter_template.parse('{{ simple_var|argument_filter:arg }}')
result = varargfilter_template.execute({
'simple_var': 'Hello',
'arg': 'world'
})
self.assertResult(result, 'Hello, world')
def test():
suite = unittest.TestSuite()
suite.addTest(TemplateFiltersTestCase('testSimpleFilter'))
suite.addTest(TemplateFiltersTestCase('testArgFilter'))
suite.addTest(TemplateFiltersTestCase('testMultiargFilter'))
suite.addTest(TemplateFiltersTestCase('testMultiFilter'))
suite.addTest(TemplateFiltersTestCase('testVaribaleArgFilter'))
return suite
| """Test cases for variable fields
"""
import unittest
from lighty.templates import Template
from lighty.templates.filter import filter_manager
def simple_filter(value):
return str(value).upper()
filter_manager.register(simple_filter)
def argument_filter(value, arg):
return str(value) + ', ' + str(arg)
filter_manager.register(argument_filter)
def multiarg_filter(value, *args):
return ', '.join([str(arg) for arg in (value, ) + args])
filter_manager.register(multiarg_filter)
class TemplateFiltersTestCase(unittest.TestCase):
"""Test case for block template tag
"""
def assertResult(self, result, value):
assert result == value, 'Error emplate execution: %s' % ' '.join((
result, 'except', value))
def testSimpleFilter(self):
simple_template = Template(name='simple-filter.html')
simple_template.parse("{{ simple_var|simple_filter }}")
result = simple_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'HELLO')
def testArgFilter(self):
argument_template = Template(name='argument-filter.html')
argument_template.parse('{{ simple_var|argument_filter:"world" }}')
result = argument_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'Hello, world')
def testMultiargFilter(self):
multiarg_template = Template(name='multiarg-filter.html')
multiarg_template.parse(
'{{ simple_var|multiarg_filter:"John" "Peter" }}')
result = multiarg_template.execute({'simple_var': 'Hello'})
self.assertResult(result, 'Hello, John, Peter')
def test():
suite = unittest.TestSuite()
suite.addTest(TemplateFiltersTestCase('testSimpleFilter'))
suite.addTest(TemplateFiltersTestCase('testArgFilter'))
suite.addTest(TemplateFiltersTestCase('testMultiargFilter'))
return suite
| Python | 0 |
c5382580601e25a9fb5b41f42548a6e49929fae0 | Put this four languages in options bring problems. | wagtailcodeblock/blocks.py | wagtailcodeblock/blocks.py | from django.forms import Media
from django.utils.translation import ugettext_lazy as _
# Wagtail 2.0 compatibility - new package paths
try:
from wagtail.core.blocks import (
StructBlock,
TextBlock,
ChoiceBlock,
)
except ImportError:
from wagtail.wagtailcore.blocks import (
StructBlock,
TextBlock,
ChoiceBlock,
)
from .settings import (
get_language_choices,
get_theme,
get_prism_version
)
class CodeBlock(StructBlock):
"""
Code Highlighting Block
"""
WCB_LANGUAGES = get_language_choices()
off_languages = ['html', 'mathml', 'svg', 'xml']
language = ChoiceBlock(choices=WCB_LANGUAGES, help_text=_('Coding language'), label=_('Language'))
code = TextBlock(label=_('Code'))
@property
def media(self):
theme = get_theme()
prism_version = get_prism_version()
if theme:
prism_theme = '-{}'.format(theme)
else:
prism_theme = ""
js_list = [
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/prism.min.js".format(
prism_version,
),
]
for lang_code, lang_name in self.WCB_LANGUAGES:
# Review: https://github.com/PrismJS/prism/blob/gh-pages/prism.js#L602
if lang_code not in self.off_languages:
js_list.append(
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/components/prism-{}.min.js".format(
prism_version,
lang_code,
)
)
return Media(
js=js_list,
css={
'all': [
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/themes/prism{}.min.css".format(
prism_version, prism_theme
),
]
}
)
class Meta:
icon = 'code'
template = 'wagtailcodeblock/code_block.html'
form_classname = 'code-block struct-block'
form_template = 'wagtailcodeblock/code_block_form.html'
| from django.forms import Media
from django.utils.translation import ugettext_lazy as _
# Wagtail 2.0 compatibility - new package paths
try:
from wagtail.core.blocks import (
StructBlock,
TextBlock,
ChoiceBlock,
)
except ImportError:
from wagtail.wagtailcore.blocks import (
StructBlock,
TextBlock,
ChoiceBlock,
)
from .settings import (
get_language_choices,
get_theme,
get_prism_version
)
class CodeBlock(StructBlock):
"""
Code Highlighting Block
"""
WCB_LANGUAGES = get_language_choices()
language = ChoiceBlock(choices=WCB_LANGUAGES, help_text=_('Coding language'), label=_('Language'))
code = TextBlock(label=_('Code'))
@property
def media(self):
theme = get_theme()
prism_version = get_prism_version()
if theme:
prism_theme = '-{}'.format(theme)
else:
prism_theme = ""
js_list = [
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/prism.min.js".format(
prism_version,
),
]
for lang_code, lang_name in self.WCB_LANGUAGES:
js_list.append(
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/components/prism-{}.min.js".format(
prism_version,
lang_code,
)
)
return Media(
js=js_list,
css={
'all': [
"https://cdnjs.cloudflare.com/ajax/libs/prism/{}/themes/prism{}.min.css".format(
prism_version, prism_theme
),
]
}
)
class Meta:
icon = 'code'
template = 'wagtailcodeblock/code_block.html'
form_classname = 'code-block struct-block'
form_template = 'wagtailcodeblock/code_block_form.html'
| Python | 0.000005 |
b32f01154cce6d7b7572b04e7218b04d052661e0 | use apply_detection_link in eval | examples/ssd/eval.py | examples/ssd/eval.py | from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import voc_detection_label_names
from chainercv.datasets import VOCDetectionDataset
from chainercv.evaluations import eval_detection_voc
from chainercv.links import SSD300
from chainercv.links import SSD512
from chainercv.utils import apply_detection_link
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(pretrained_model='voc0712')
elif args.model == 'ssd512':
model = SSD512(pretrained_model='voc0712')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
processed = 0
def hook(
pred_bboxes, pred_labels, pred_scores, gt_values):
global processed
processed += len(pred_bboxes)
fps = len(processed) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(processed), len(dataset), fps))
sys.stdout.flush()
pred_bboxes, pred_labels, pred_scores, gt_values = \
apply_detection_link(model, iterator, hook=hook)
gt_bboxes, gt_labels, gt_difficults = gt_values
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {:f}'.format(name, eval_[l]['ap']))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
| from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import voc_detection_label_names
from chainercv.datasets import VOCDetectionDataset
from chainercv.evaluations import eval_detection_voc
from chainercv.links import SSD300
from chainercv.links import SSD512
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(pretrained_model='voc0712')
elif args.model == 'ssd512':
model = SSD512(pretrained_model='voc0712')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
pred_bboxes = list()
pred_labels = list()
pred_scores = list()
gt_bboxes = list()
gt_labels = list()
gt_difficults = list()
while True:
try:
batch = next(iterator)
except StopIteration:
break
imgs, bboxes, labels, difficults = zip(*batch)
gt_bboxes.extend(bboxes)
gt_labels.extend(labels)
gt_difficults.extend(difficults)
bboxes, labels, scores = model.predict(imgs)
pred_bboxes.extend(bboxes)
pred_labels.extend(labels)
pred_scores.extend(scores)
fps = len(gt_bboxes) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(gt_bboxes), len(dataset), fps))
sys.stdout.flush()
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {:f}'.format(name, eval_[l]['ap']))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
| Python | 0 |
8874af7c0db371f63da687c5398db1c7b80f58cd | Fix import of django during install time (for environments like Heroku) (#120) | todo/__init__.py | todo/__init__.py | """
A multi-user, multi-group task management and assignment system for Django.
"""
__version__ = "2.4.10"
__author__ = "Scot Hacker"
__email__ = "shacker@birdhouse.org"
__url__ = "https://github.com/shacker/django-todo"
__license__ = "BSD License"
try:
from . import check
except ModuleNotFoundError:
# this can happen during install time, if django is not installed yet!
pass
| """
A multi-user, multi-group task management and assignment system for Django.
"""
__version__ = "2.4.10"
__author__ = "Scot Hacker"
__email__ = "shacker@birdhouse.org"
__url__ = "https://github.com/shacker/django-todo"
__license__ = "BSD License"
from . import check
| Python | 0.000005 |
e3c51012a36fd85781824bd6b66c7e5e1d6696a9 | Add documentation | app/resources/companies.py | app/resources/companies.py | from app.models import Company, PostalCode, CompanyPostalCode
from flask_restful import Resource, reqparse
class Companies(Resource):
def get(self):
"""
List all restaurants
---
tags:
- Restaurants
definitions:
- schema:
id: Restaurant
properties:
id:
type: integer
description: the restaurant's id
email:
type: string
description: the restaurant's email
name:
type: string
description: the restaurant's name
logo_url:
type: string
description: the restaurant's logo url
address:
type: string
description: the restaurant's address
phone_number:
type: string
description: the restaurant's phone number
country_code:
type: string
description: the restaurant's country code
responses:
200:
description: Lists all restaurants
schema:
title: Restaurants
type: array
items:
$ref: '#/definitions/Restaurant'
"""
parser = reqparse.RequestParser()
parser.add_argument('country')
parser.add_argument('postal_code')
args = parser.parse_args()
country_code = args.get('country')
postal_code = args.get('postal_code')
company_query = Company.query
if country_code is not None:
company_query = company_query.filter(Company.country_code == country_code)
if country_code is not None and postal_code is not None:
postal_code = PostalCode.query.filter(PostalCode.country_code == country_code,
PostalCode.postal_code == postal_code).first()
if postal_code is None:
return 'Country code or postal code not found', 400
company_postal_codes = CompanyPostalCode.query.filter(CompanyPostalCode.postal_code_id == postal_code.id).all()
response = []
for company_postal_code in company_postal_codes:
company = Company.query.get(company_postal_code.company.id)
response.append(company.dictionary())
return response
else:
companies = company_query.all()
companies_array = []
for company in companies:
companies_array.append(company.dictionary())
return companies_array
class SingleCompany(Resource):
def get(self, company_id):
company = Company.query.get(company_id)
if company is None:
return 'Company not found', 400
return company.dictionary()
| from app.models import Company, PostalCode, CompanyPostalCode
from flask_restful import Resource, reqparse
class Companies(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('country')
parser.add_argument('postal_code')
args = parser.parse_args()
country_code = args.get('country')
postal_code = args.get('postal_code')
company_query = Company.query
if country_code is not None:
company_query = company_query.filter(Company.country_code == country_code)
if country_code is not None and postal_code is not None:
postal_code = PostalCode.query.filter(PostalCode.country_code == country_code,
PostalCode.postal_code == postal_code).first()
if postal_code is None:
return 'Country code or postal code not found', 400
company_postal_codes = CompanyPostalCode.query.filter(CompanyPostalCode.postal_code_id == postal_code.id).all()
response = []
for company_postal_code in company_postal_codes:
company = Company.query.get(company_postal_code.company.id)
response.append(company.dictionary())
return response
else:
companies = company_query.all()
companies_array = []
for company in companies:
companies_array.append(company.dictionary())
return companies_array
class SingleCompany(Resource):
def get(self, company_id):
company = Company.query.get(company_id)
if company is None:
return 'Company not found', 400
return company.dictionary()
| Python | 0 |
4d2f3ee1343b9aef24f599b8acd07ed8340f0bff | convert that to a list so we can measure it's len in a template | tndata_backend/notifications/views.py | tndata_backend/notifications/views.py | from collections import defaultdict
from django.contrib.auth.decorators import user_passes_test
from django.contrib import messages
from django.shortcuts import render, redirect
from . import queue
from .models import GCMMessage
@user_passes_test(lambda u: u.is_staff, login_url='/')
def dashboard(request):
"""A simple dashboard for enqueued GCM notifications."""
jobs = queue.messages() # Get the enqueued messages
ids = [job.args[0] for job, _ in jobs]
message_data = defaultdict(dict)
fields = ['id', 'title', 'user__email', 'message']
messages = GCMMessage.objects.filter(pk__in=ids).values_list(*fields)
for msg in messages:
mid, title, email, message = msg
message_data[mid] = {
'id': mid,
'title': title,
'email': email,
'message': message,
}
jobs = [
(job, scheduled_for, message_data[job.args[0]])
for job, scheduled_for in jobs
]
context = {
'jobs': jobs,
'metrics': ['GCM Message Sent', 'GCM Message Scheduled', ]
}
return render(request, "notifications/index.html", context)
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_job(request):
"""Look for an enqueued job with the given ID and cancel it."""
job_id = request.POST.get('job_id', None)
if request.method == "POST" and job_id:
for job, _ in queue.messages():
if job.id == job_id:
job.cancel()
messages.success(request, "That notification has been cancelled")
break
return redirect("notifications:dashboard")
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_all_jobs(request):
"""Cancels all queued messages."""
if request.method == "POST":
count = 0
for job, _ in queue.messages():
job.cancel()
count += 1
messages.success(request, "Cancelled {} notifications.".format(count))
return redirect("notifications:dashboard")
| from collections import defaultdict
from django.contrib.auth.decorators import user_passes_test
from django.contrib import messages
from django.shortcuts import render, redirect
from . import queue
from .models import GCMMessage
@user_passes_test(lambda u: u.is_staff, login_url='/')
def dashboard(request):
"""A simple dashboard for enqueued GCM notifications."""
jobs = queue.messages() # Get the enqueued messages
ids = [job.args[0] for job, _ in jobs]
message_data = defaultdict(dict)
fields = ['id', 'title', 'user__email', 'message']
messages = GCMMessage.objects.filter(pk__in=ids).values_list(*fields)
for msg in messages:
mid, title, email, message = msg
message_data[mid] = {
'id': mid,
'title': title,
'email': email,
'message': message,
}
jobs = (
(job, scheduled_for, message_data[job.args[0]])
for job, scheduled_for in jobs
)
context = {
'jobs': jobs,
'metrics': ['GCM Message Sent', 'GCM Message Scheduled', ]
}
return render(request, "notifications/index.html", context)
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_job(request):
"""Look for an enqueued job with the given ID and cancel it."""
job_id = request.POST.get('job_id', None)
if request.method == "POST" and job_id:
for job, _ in queue.messages():
if job.id == job_id:
job.cancel()
messages.success(request, "That notification has been cancelled")
break
return redirect("notifications:dashboard")
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_all_jobs(request):
"""Cancels all queued messages."""
if request.method == "POST":
count = 0
for job, _ in queue.messages():
job.cancel()
count += 1
messages.success(request, "Cancelled {} notifications.".format(count))
return redirect("notifications:dashboard")
| Python | 0.000116 |
6618ea7c1b67d87acff86338415e2a322a01cc3c | add loopback support | testsniff.py | testsniff.py | #!/usr/bin/env python
import getopt, sys
import dpkt, pcap
def usage():
print >>sys.stderr, 'usage: %s [-i device] [pattern]' % sys.argv[0]
sys.exit(1)
def main():
opts, args = getopt.getopt(sys.argv[1:], 'i:h')
name = None
for o, a in opts:
if o == '-i': name = a
else: usage()
pc = pcap.pcap(name)
pc.setfilter(' '.join(args))
decode = { pcap.DLT_LOOP:dpkt.loopback.Loopback,
pcap.DLT_NULL:dpkt.loopback.Loopback,
pcap.DLT_EN10MB:dpkt.ethernet.Ethernet }[pc.datalink()]
try:
print 'listening on %s: %s' % (pc.name, pc.filter)
for ts, pkt in pc:
print ts, `decode(pkt)`
except KeyboardInterrupt:
nrecv, ndrop, nifdrop = pc.stats()
print '\n%d packets received by filter' % nrecv
print '%d packets dropped by kernel' % ndrop
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import getopt, sys
import pcap
from dpkt.ethernet import Ethernet
def usage():
print >>sys.stderr, 'usage: %s [-i device] [pattern]' % sys.argv[0]
sys.exit(1)
def main():
opts, args = getopt.getopt(sys.argv[1:], 'i:h')
name = None
for o, a in opts:
if o == '-i': name = a
else: usage()
pc = pcap.pcap(name)
pc.setfilter(' '.join(args))
try:
print 'listening on %s: %s' % (pc.name, pc.filter)
for ts, pkt in pc:
print ts, `Ethernet(pkt)`
except KeyboardInterrupt:
nrecv, ndrop, nifdrop = pc.stats()
print '\n%d packets received by filter' % nrecv
print '%d packets dropped by kernel' % ndrop
if __name__ == '__main__':
main()
| Python | 0.000001 |
4dc49433ad354b1530207db308f4c7b76f40db70 | Add command | tf2server.py | tf2server.py | import os
import time
import libtmux
class CorruptedTf2ServerInstanceError(Exception):
"""
Raised when an invalid TF2 server instance is found.
"""
class Tf2Server(object):
"""
The Tf2Server class represents a single Team Fortress 2 server.
"""
def __init__(self, name, path):
"""
Create the Tf2Server class instance that uses the given path.
:param name: The TF2 server instance name.
:param path: The absolute path to where the TF2 server is located.
"""
self.name = name
self.path = path
self.tmux_server = None
if not os.path.isdir(os.path.join(path, 'tf')):
raise CorruptedTf2ServerInstanceError()
def _get_tmux_session_name(self):
file_name = os.path.join(self.path, '.tmux-session')
if not os.path.isfile(file_name):
return self.name
else:
with open(file_name, 'r') as f:
content = f.read()
return content.strip()
def _get_log_file_path(self):
return os.path.join(self.path, self.name.join('.log'))
def _has_sourcemod(self):
path = os.path.join(self.path, 'tf/addons/sourcemod/plugins/basechat.smx')
return os.path.isfile(path)
def command(self, command):
"""
Execute a command on the running TF2 server instance.
:param command: str
"""
if not self.is_running():
return
session = self.tmux_server.find_where({'session_name': self._get_tmux_session_name()})
pane = session.attached_pane
print(command)
pane.send_keys(command)
def is_running(self):
"""
Check whether the server is running or not.
:return: True if the instance is running, False otherwise.
"""
session_name = self._get_tmux_session_name()
if not self.tmux_server:
self.tmux_server = libtmux.Server()
return self.tmux_server.has_session(session_name)
def start(self, ip, port=27015, map='cp_badlands', server_cfg_file='server.cfg'):
"""
Starts the server, if it is not yet running.
"""
if self.is_running():
print('Server already running')
else:
session = self.tmux_server.new_session(self._get_tmux_session_name())
pane = session.attached_pane
srcds_location = os.path.join(self.path, 'srcds_run')
command = '{0} -game tf -ip {1} -port {2} +map {3} +maxplayers 24 -secured -timeout 0 +servercfgfile {4}' \
.format(srcds_location, ip, port, map, server_cfg_file)
print(command)
pane.send_keys(command)
def stop(self):
if self.is_running():
msg = 'Server shutting down in 10 seconds!'
print(msg)
if self._has_sourcemod():
self.command('sm_csay "{0}"'.format(msg))
self.command('say "{0}"'.format(msg))
time.sleep(10)
self.command('quit')
time.sleep(5)
self.tmux_server.kill_session(self._get_tmux_session_name())
| import os
import libtmux
class CorruptedTf2ServerInstanceError(Exception):
"""
Raised when an invalid TF2 server instance is found.
"""
class Tf2Server(object):
"""
The Tf2Server class represents a single Team Fortress 2 server.
"""
def __init__(self, name, path):
"""
Creates the Tf2Server class instance that uses the given path.
:param name: The TF2 server instance name.
:param path: The absolute path to where the TF2 server is located.
"""
self.name = name
self.path = path
self.tmux_server = None
if not os.path.isdir(os.path.join(path, 'tf')):
raise CorruptedTf2ServerInstanceError()
def _get_tmux_session_name(self):
file_name = os.path.join(self.path, '.tmux-session')
if not os.path.isfile(file_name):
return self.name
else:
with open(file_name, 'r') as f:
content = f.read()
return content.strip()
def _get_log_file_path(self):
return os.path.join(self.path, self.name.join('.log'))
def is_running(self):
"""
Checks whether the server is running or not.
:return: True if the instance is running, False otherwise.
"""
session_name = self._get_tmux_session_name()
if not self.tmux_server:
self.tmux_server = libtmux.Server()
return self.tmux_server.has_session(session_name)
def start(self, ip, port=27015, map='cp_badlands', server_cfg_file='server.cfg'):
"""
Starts the server, if it is not yet running.
"""
if self.is_running():
print('Server already running')
else:
session = self.tmux_server.new_session(self._get_tmux_session_name())
pane = session.attached_pane
srcds_location = os.path.join(self.path, 'srcds_run')
exec = '{0} -game tf -ip {1} -port {2} +map {3} +maxplayers 24 -secured -timeout 0 +servercfgfile {4}' \
.format(srcds_location, ip, port, map, server_cfg_file)
print(exec)
pane.send_keys(exec)
def stop(self):
if self.is_running():
self.tmux_server.kill_session(self._get_tmux_session_name())
| Python | 0.001952 |
de695899f757df8f320ac560dea90e84b07e1bd3 | Reorganize newer tests a bit | tests/runners.py | tests/runners.py | import sys
from invoke.vendor.six import StringIO
from spec import Spec, trap, eq_, skip
from invoke import Local, Context
from _utils import mock_subprocess, mock_pty
class Local_(Spec):
def _run(self, *args, **kwargs):
return Local(Context()).run(*args, **kwargs)
class stream_control:
@trap
@mock_subprocess(out="sup")
def out_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
self._run("nope")
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_subprocess(err="sup")
def err_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
self._run("nope")
eq_(sys.stderr.getvalue(), "sup")
@trap
@mock_subprocess(out="sup")
def out_can_be_overridden(self):
"out_stream can be overridden"
out = StringIO()
self._run("nope", out_stream=out)
eq_(out.getvalue(), "sup")
eq_(sys.stdout.getvalue(), "")
@trap
@mock_subprocess(err="sup")
def err_can_be_overridden(self):
"err_stream can be overridden"
err = StringIO()
self._run("nope", err_stream=err)
eq_(err.getvalue(), "sup")
eq_(sys.stderr.getvalue(), "")
@trap
@mock_pty(out="sup")
def pty_defaults_to_sys(self):
self._run("nope", pty=True)
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_pty(out="yo")
def pty_out_can_be_overridden(self):
out = StringIO()
self._run("nope", pty=True, out_stream=out)
eq_(out.getvalue(), "yo")
eq_(sys.stdout.getvalue(), "")
class pty_fallback:
def warning_only_fires_once(self):
# I.e. if implementation checks pty-ness >1 time, only one warning
# is emitted. This is kinda implementation-specific, but...
skip()
@mock_pty(isatty=False)
def can_be_overridden(self):
# Do the stuff
self._run("true", pty=True, fallback=False)
# @mock_pty's asserts will be mad if pty-related os/pty calls
# didn't fire, so we're done.
class encoding:
def defaults_to_local_encoding(self):
skip()
def can_be_overridden(self):
skip()
| import sys
from invoke.vendor.six import StringIO
from spec import Spec, trap, eq_, skip
from invoke import Local, Context
from _utils import mock_subprocess, mock_pty
class Local_(Spec):
def _run(self, *args, **kwargs):
return Local(Context()).run(*args, **kwargs)
@trap
@mock_subprocess(out="sup")
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
self._run("nope")
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_subprocess(err="sup")
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
self._run("nope")
eq_(sys.stderr.getvalue(), "sup")
@trap
@mock_subprocess(out="sup")
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
out = StringIO()
self._run("nope", out_stream=out)
eq_(out.getvalue(), "sup")
eq_(sys.stdout.getvalue(), "")
@trap
@mock_subprocess(err="sup")
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
err = StringIO()
self._run("nope", err_stream=err)
eq_(err.getvalue(), "sup")
eq_(sys.stderr.getvalue(), "")
@trap
@mock_pty(out="sup")
def pty_output_stream_defaults_to_sys(self):
self._run("nope", pty=True)
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_pty(out="yo")
def pty_output_stream_overrides_work(self):
out = StringIO()
self._run("nope", pty=True, out_stream=out)
eq_(out.getvalue(), "yo")
eq_(sys.stdout.getvalue(), "")
def pty_fallback_warnings_only_fire_once(self):
# I.e. if implementation checks pty-ness >1 time, only one warning
# is emitted. This is kinda implementation-specific, but...
skip()
def encoding_defaults_to_local_encoding(self):
skip()
def encoding_can_be_overridden(self):
skip()
@mock_pty(isatty=False)
def fallback_can_be_overridden(self):
# Do the stuff
self._run("true", pty=True, fallback=False)
# @mock_pty's asserts will be mad if pty-related os/pty calls
# didn't fire, so we're done.
| Python | 0.000001 |
a1bda82bd06cbfd12e6074f22cb31d88f2abd96a | update py +x | tools/fuckGFW.py | tools/fuckGFW.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Update hosts for *nix
Author: cloud@txthinking.com
Version: 0.0.1
Date: 2012-10-24 14:35:39
'''
import urllib2
import os
import sys
HOSTS_PATH = "/etc/hosts"
HOSTS_SOURCE = "http://tx.txthinking.com/hosts"
SEARCH_STRING = "#TX-HOSTS"
def GetRemoteHosts(url):
f = urllib2.urlopen(url, timeout=5)
hosts = [line for line in f]
f.close()
return hosts
def main():
try:
hosts = GetRemoteHosts(HOSTS_SOURCE)
except IOError:
print "Could't connect to %s. Try again." % HOSTS_SOURCE
sys.exit(1)
yours = ""
if os.path.isfile(HOSTS_PATH):
f = open(HOSTS_PATH, "r")
for line in f:
if SEARCH_STRING in line:
break
yours += line
f.close()
os.rename(HOSTS_PATH, HOSTS_PATH + ".BAK")
yours += SEARCH_STRING + "\n"
fp = open(HOSTS_PATH, "w")
fp.write(yours)
fp.writelines(hosts)
fp.close()
print "Success"
if __name__ == "__main__":
main()
| '''
Update hosts for *nix
Author: cloud@txthinking.com
Version: 0.0.1
Date: 2012-10-24 14:35:39
'''
import urllib2
import os
import sys
HOSTS_PATH = "/etc/hosts"
HOSTS_SOURCE = "http://tx.txthinking.com/hosts"
SEARCH_STRING = "#TX-HOSTS"
def GetRemoteHosts(url):
f = urllib2.urlopen(url, timeout=5)
hosts = [line for line in f]
f.close()
return hosts
def main():
try:
hosts = GetRemoteHosts(HOSTS_SOURCE)
except IOError:
print "Could't connect to %s. Try again." % HOSTS_SOURCE
sys.exit(1)
yours = ""
if os.path.isfile(HOSTS_PATH):
f = open(HOSTS_PATH, "r")
for line in f:
if SEARCH_STRING in line:
break
yours += line
f.close()
os.rename(HOSTS_PATH, HOSTS_PATH + ".BAK")
yours += SEARCH_STRING + "\n"
fp = open(HOSTS_PATH, "w")
fp.write(yours)
fp.writelines(hosts)
fp.close()
print "Success"
if __name__ == "__main__":
main()
| Python | 0.000001 |
ed8fc99f0867779db8879b3f6cc142985d01fc58 | change RunInstances argument | qingcloud/cli/iaas_client/actions/instance/run_instances.py | qingcloud/cli/iaas_client/actions/instance/run_instances.py | # coding: utf-8
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class RunInstancesAction(BaseAction):
action = 'RunInstances'
command = 'run-instances'
usage = '%(prog)s --image_id <image_id> --instance_type <instance_type> [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-m', '--image_id', dest='image_id',
action='store', type=str, default='',
help='Image ID')
parser.add_argument('-t', '--instance_type', dest='instance_type',
action='store', type=str, default=None,
help='Instance type: small_b, small_c, medium_a, medium_b, medium_c,\
large_a, large_b, large_c')
parser.add_argument('-c', '--count', dest = 'count',
action='store', type=int, default=1,
help='The number of instances to launch, default 1.')
parser.add_argument('-C', '--cpu', dest='cpu',
action='store', type=int, default=0,
help='CPU core: 1, 2, 4, 8, 16')
parser.add_argument('-M', '--memory', dest='memory',
action='store', type=int, default=0,
help='Memory size in MB: 512, 1024, 2048, 4096, 8192, 16384')
parser.add_argument('-N', '--instance_name', dest='instance_name',
action='store', type=str, default='',
help='Instance name')
parser.add_argument('-n', '--vxnets', dest='vxnets',
action='store', type=str, default='',
help='Specifies the IDs of vxnets the instance will join.')
parser.add_argument('-s', '--security_group', dest='security_group',
action='store', type=str, default='',
help='The ID of security group that will be applied to instance')
parser.add_argument('-l', '--login_mode', dest='login_mode',
action='store', type=str, default='',
help='SSH login mode: keypair or passwd')
parser.add_argument('-p', '--login_passwd', dest='login_passwd',
action='store', type=str, default='',
help='Login_passwd, should specified when SSH login mode is "passwd".')
parser.add_argument('-k', '--login_keypair', dest='login_keypair',
action='store', type=str, default='',
help='Login_keypair, should specified when SSH login mode is "keypair".')
return parser
@classmethod
def build_directive(cls, options):
required_params = {
'image_id': options.image_id,
'instance_type': options.instance_type,
}
for param in required_params:
if required_params[param] is None or required_params[param] == '':
print 'error: param [%s] should be specified' % param
return None
return {
'image_id': options.image_id,
'instance_type' : options.instance_type,
'cpu': options.cpu,
'memory': options.memory,
'instance_name' : options.instance_name,
'count' : options.count,
'vxnets': explode_array(options.vxnets),
'security_group': options.security_group,
'login_mode': options.login_mode,
'login_passwd': options.login_passwd,
'login_keypair': options.login_keypair,
}
| # coding: utf-8
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class RunInstancesAction(BaseAction):
action = 'RunInstances'
command = 'run-instances'
usage = '%(prog)s --image_id <image_id> --instance_type <instance_type> [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-i', '--image_id', dest='image_id',
action='store', type=str, default='',
help='Image ID')
parser.add_argument('-t', '--instance_type', dest='instance_type',
action='store', type=str, default=None,
help='Instance type: small_b, small_c, medium_a, medium_b, medium_c,\
large_a, large_b, large_c')
parser.add_argument('-c', '--count', dest = 'count',
action='store', type=int, default=1,
help='The number of instances to launch, default 1.')
parser.add_argument('-C', '--cpu', dest='cpu',
action='store', type=int, default=0,
help='CPU core: 1, 2, 4, 8, 16')
parser.add_argument('-M', '--memory', dest='memory',
action='store', type=int, default=0,
help='Memory size in MB: 512, 1024, 2048, 4096, 8192, 16384')
parser.add_argument('-N', '--instance_name', dest='instance_name',
action='store', type=str, default='',
help='Instance name')
parser.add_argument('-n', '--vxnets', dest='vxnets',
action='store', type=str, default='',
help='Specifies the IDs of vxnets the instance will join.')
parser.add_argument('-s', '--security_group', dest='security_group',
action='store', type=str, default='',
help='The ID of security group that will be applied to instance')
parser.add_argument('-m', '--login_mode', dest='login_mode',
action='store', type=str, default='',
help='SSH login mode: keypair or passwd')
parser.add_argument('-p', '--login_passwd', dest='login_passwd',
action='store', type=str, default='',
help='Login_passwd, should specified when SSH login mode is "passwd".')
parser.add_argument('-k', '--login_keypair', dest='login_keypair',
action='store', type=str, default='',
help='Login_keypair, should specified when SSH login mode is "keypair".')
return parser
@classmethod
def build_directive(cls, options):
required_params = {
'image_id': options.image_id,
'instance_type': options.instance_type,
}
for param in required_params:
if required_params[param] is None or required_params[param] == '':
print 'error: param [%s] should be specified' % param
return None
return {
'image_id': options.image_id,
'instance_type' : options.instance_type,
'cpu': options.cpu,
'memory': options.memory,
'instance_name' : options.instance_name,
'count' : options.count,
'vxnets': explode_array(options.vxnets),
'security_group': options.security_group,
'login_mode': options.login_mode,
'login_passwd': options.login_passwd,
'login_keypair': options.login_keypair,
}
| Python | 0 |
3d5fc893cee6b7ab1596acedb052366ce86005b7 | Use requests module rather than mozsvc.http_helpers | tokenserver/assignment/sqlnode.py | tokenserver/assignment/sqlnode.py | """ SQL Mappers
"""
import json
import sys
from zope.interface import implements
import time
import requests
from mozsvc.util import dnslookup
from tokenserver.assignment import INodeAssignment
from tokenserver.util import get_logger
# try to have this changed upstream:
# XXX being able to set autocommit=1;
# forcing it for now
from pymysql.connections import Connection, COM_QUERY
def autocommit(self, value):
value = True
try:
self._execute_command(COM_QUERY, "SET AUTOCOMMIT = %s" % \
self.escape(value))
self.read_packet()
except:
exc, value, __ = sys.exc_info()
self.errorhandler(None, exc, value)
Connection.autocommit = autocommit
from mozsvc.exceptions import BackendError
from wimms.sql import SQLMetadata
from wimms.shardedsql import ShardedSQLMetadata
class SQLNodeAssignment(SQLMetadata):
"""Just a placeholder to mark with a zope interface.
Silly, isn't it ?
"""
implements(INodeAssignment)
def get_patterns(self):
res = super(SQLNodeAssignment, self).get_patterns()
return dict([(pattern.service, pattern.pattern) for pattern in res])
class ShardedSQLNodeAssignment(ShardedSQLMetadata):
"""Like the SQL backend, but with one DB per service
"""
implements(INodeAssignment)
class SecuredShardedSQLNodeAssignment(ShardedSQLMetadata):
"""Like the sharded backend, but proxies all writes to stoken
"""
implements(INodeAssignment)
def __init__(self, proxy_uri, databases, create_tables, **kw):
base = super(SecuredShardedSQLNodeAssignment, self)
base.__init__(databases, create_tables, **kw)
self.proxy_uri = proxy_uri
self.logger = None
self._resolved = None, time.time()
def get_logger(self):
if self.logger is None:
self.logger = get_logger()
return self.logger
def _proxy(self, method, url, data=None, headers=None):
if data is not None:
data = json.dumps(data)
try:
resp = requests.request(method, url, data=data, headers=headers)
except requests.exceptions.RequestException:
self.get_logger().exception("error talking to sreg (%s)" % (url,))
raise BackendError('Error talking to proxy')
body = resp.content
if body:
try:
body = json.loads(body)
except ValueError:
self.get_logger().error("bad json body from sreg (%s): %s" %
(url, body))
raise BackendError('Bad answer from proxy')
return resp.status_code, body
def _dnslookup(self, proxy):
# does a DNS lookup with gethostbyname and cache it in
# memory for one hour.
current, age = self._resolved
if current is None or age + 3600 < time.time():
current = dnslookup(proxy)
self._resolved = current, time.time()
return current
def allocate_node(self, email, service):
"""Calls the proxy to get an allocation"""
proxy_uri = self._dnslookup(self.proxy_uri)
url = '%s/1.0/%s' % (proxy_uri, service)
data = {'email': email}
status, body = self._proxy('POST', url, data)
if status != 200:
msg = 'Could not get an allocation\n'
msg += 'status: %s\n' % status
msg += 'body: %s\n' % str(body)
raise BackendError(msg, backend=url)
return body['uid'], body['node']
| """ SQL Mappers
"""
import json
import sys
from zope.interface import implements
import time
from mozsvc.util import dnslookup
from tokenserver.assignment import INodeAssignment
from tokenserver.util import get_logger
# try to have this changed upstream:
# XXX being able to set autocommit=1;
# forcing it for now
from pymysql.connections import Connection, COM_QUERY
def autocommit(self, value):
value = True
try:
self._execute_command(COM_QUERY, "SET AUTOCOMMIT = %s" % \
self.escape(value))
self.read_packet()
except:
exc, value, __ = sys.exc_info()
self.errorhandler(None, exc, value)
Connection.autocommit = autocommit
from mozsvc.exceptions import BackendError
from mozsvc.http_helpers import get_url
from wimms.sql import SQLMetadata
from wimms.shardedsql import ShardedSQLMetadata
class SQLNodeAssignment(SQLMetadata):
"""Just a placeholder to mark with a zope interface.
Silly, isn't it ?
"""
implements(INodeAssignment)
def get_patterns(self):
res = super(SQLNodeAssignment, self).get_patterns()
return dict([(pattern.service, pattern.pattern) for pattern in res])
class ShardedSQLNodeAssignment(ShardedSQLMetadata):
"""Like the SQL backend, but with one DB per service
"""
implements(INodeAssignment)
class SecuredShardedSQLNodeAssignment(ShardedSQLMetadata):
"""Like the sharded backend, but proxies all writes to stoken
"""
implements(INodeAssignment)
def __init__(self, proxy_uri, databases, create_tables, **kw):
base = super(SecuredShardedSQLNodeAssignment, self)
base.__init__(databases, create_tables, **kw)
self.proxy_uri = proxy_uri
self.logger = None
self._resolved = None, time.time()
def get_logger(self):
if self.logger is None:
self.logger = get_logger()
return self.logger
def _proxy(self, method, url, data=None, headers=None):
if data is not None:
data = json.dumps(data)
status, headers, body = get_url(url, method, data, headers)
if body:
try:
body = json.loads(body)
except ValueError:
self.get_logger().error("bad json body from sreg (%s): %s" %
(url, body))
raise BackendError('Bad answer from proxy')
return status, body
def _dnslookup(self, proxy):
# does a DNS lookup with gethostbyname and cache it in
# memory for one hour.
current, age = self._resolved
if current is None or age + 3600 < time.time():
current = dnslookup(proxy)
self._resolved = current, time.time()
return current
def allocate_node(self, email, service):
"""Calls the proxy to get an allocation"""
proxy_uri = self._dnslookup(self.proxy_uri)
url = '%s/1.0/%s' % (proxy_uri, service)
data = {'email': email}
status, body = self._proxy('POST', url, data)
if status != 200:
msg = 'Could not get an allocation\n'
msg += 'status: %s\n' % status
msg += 'body: %s\n' % str(body)
raise BackendError(msg, backend=url)
return body['uid'], body['node']
| Python | 0 |
8f22be125cabf38f00b360e0c6d48a5d1650bef0 | Remove wrong dependency | sponsorship_compassion/__openerp__.py | sponsorship_compassion/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2014-2015 Compassion CH (http://www.compassion.ch)
# @author: Cyril Sester, Emanuel Cino
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Compassion Sponsorships',
'version': '1.6',
'category': 'Other',
'author': 'Compassion CH',
'website': 'http://www.compassion.ch',
'depends': ['contract_compassion', 'crm',
'account_cancel', 'partner_compassion',
'web_m2x_options', 'account_invoice_split_invoice'],
'data': [
'view/sponsorship_contract_view.xml',
'view/sponsorship_contract_group_view.xml',
'view/end_sponsorship_wizard_view.xml',
'view/invoice_line_view.xml',
'view/res_partner_view.xml',
'view/generate_gift_view.xml',
'view/account_invoice_split_wizard_view.xml',
'view/child_view.xml',
'data/lang.xml',
'data/sequence.xml',
'data/sponsorship_product.xml',
'data/analytic_accounting.xml',
'workflow/contract_workflow.xml',
],
'demo': [
'demo/sponsorship_compassion_demo.xml'
],
'installable': True,
'auto_install': False,
}
| # -*- encoding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2014-2015 Compassion CH (http://www.compassion.ch)
# @author: Cyril Sester, Emanuel Cino
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Compassion Sponsorships',
'version': '1.6',
'category': 'Other',
'author': 'Compassion CH',
'website': 'http://www.compassion.ch',
'depends': ['contract_compassion', 'crm',
'l10n_ch', 'account_cancel', 'partner_compassion',
'web_m2x_options', 'account_invoice_split_invoice'],
'data': [
'view/sponsorship_contract_view.xml',
'view/sponsorship_contract_group_view.xml',
'view/end_sponsorship_wizard_view.xml',
'view/invoice_line_view.xml',
'view/res_partner_view.xml',
'view/generate_gift_view.xml',
'view/account_invoice_split_wizard_view.xml',
'view/child_view.xml',
'data/lang.xml',
'data/sequence.xml',
'data/sponsorship_product.xml',
'data/analytic_accounting.xml',
'workflow/contract_workflow.xml',
],
'demo': [
'demo/sponsorship_compassion_demo.xml'
],
'installable': True,
'auto_install': False,
}
| Python | 0.000086 |
145e9141af1e1abdf0a9ab3c035ed8df6298ba0f | rebase migration dependency. | accelerator/migrations/0015_expert_bio_add_max_length_validation.py | accelerator/migrations/0015_expert_bio_add_max_length_validation.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-25 15:00
from __future__ import unicode_literals
import django.core.validators
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0014_alter_fluent_page_type_managers'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='bio',
field=models.TextField(blank=True, default='', validators=[
django.core.validators.MaxLengthValidator(7500)]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-25 15:00
from __future__ import unicode_literals
import django.core.validators
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0013_allocator'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='bio',
field=models.TextField(blank=True, default='', validators=[
django.core.validators.MaxLengthValidator(7500)]),
),
]
| Python | 0 |
0992a05b5f199b6ade27f19af9271e5e8556c372 | Clarified an example param | apollo-ws/visualizer-services/GAIA/trunk/VisualizerServiceClient.py | apollo-ws/visualizer-services/GAIA/trunk/VisualizerServiceClient.py | # Copyright 2013 University of Pittsburgh
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
'''
Created on Feb 13, 2013
@author: John Levander
'''
from VisualizerService_services import *
#from SimulatorService_services_types import *
from ApolloFactory import *
import time
#create the service object
service = VisualizerServiceLocator().getVisualizerServiceEI("http://127.0.0.1:8087/gaia")
#create an epidemic model input object
factory = ApolloFactory()
runRequest = runRequest()
runRequest._visualizerConfiguration = factory.new_VisualizerConfiguration()
runRequest._visualizerConfiguration._authentication._requesterId = "fake"
runRequest._visualizerConfiguration._authentication._requesterPassword = "fake"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerDeveloper = "PSC"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerName = "GAIA"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerVersion = "v1.0"
runRequest._visualizerConfiguration._visualizationOptions._runId = "PSC_FRED_v1.0_42"
runRequest._visualizerConfiguration._visualizationOptions._location = "42003/Allegheny County"
runRequest._visualizerConfiguration._visualizationOptions._outputFormat = "mp4"
print 'Calling "run"'
run_response = service.run(runRequest)
print "Run submitted with ID: " + str(run_response._runId)
get_run_status_request = getRunStatusRequest()
get_run_status_request._runId = run_response._runId
run_status_response = service.getRunStatus(get_run_status_request)
print '\nCalling "getRunStatus"'
print "Status Code: " + run_status_response._runStatus._status + " Status Message: " + run_status_response._runStatus._message
| # Copyright 2013 University of Pittsburgh
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
'''
Created on Feb 13, 2013
@author: John Levander
'''
from VisualizerService_services import *
#from SimulatorService_services_types import *
from ApolloFactory import *
import time
#create the service object
service = VisualizerServiceLocator().getVisualizerServiceEI("http://127.0.0.1:8087/gaia")
#create an epidemic model input object
factory = ApolloFactory()
runRequest = runRequest()
runRequest._visualizerConfiguration = factory.new_VisualizerConfiguration()
runRequest._visualizerConfiguration._authentication._requesterId = "fake"
runRequest._visualizerConfiguration._authentication._requesterPassword = "fake"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerDeveloper = "PSC"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerName = "GAIA"
runRequest._visualizerConfiguration._visualizerIdentification._visualizerVersion = "v1.0"
runRequest._visualizerConfiguration._visualizationOptions._runId = "PSC_GAIA_v1.0_42"
runRequest._visualizerConfiguration._visualizationOptions._location = "42003/Allegheny County"
runRequest._visualizerConfiguration._visualizationOptions._outputFormat = "mp4"
print 'Calling "run"'
run_response = service.run(runRequest)
print "Run submitted with ID: " + str(run_response._runId)
get_run_status_request = getRunStatusRequest()
get_run_status_request._runId = run_response._runId
run_status_response = service.getRunStatus(get_run_status_request)
print '\nCalling "getRunStatus"'
print "Status Code: " + run_status_response._runStatus._status + " Status Message: " + run_status_response._runStatus._message
| Python | 0.999483 |
837f05228fac7f6addd28069c6387f798e01ff8c | Add checksum test. | tests/test_fs.py | tests/test_fs.py | from farmfs.fs import normpath as _normalize
from farmfs.fs import userPath2Path as up2p
from farmfs.fs import Path
import pytest
def test_create_path():
p1 = Path("/")
p2 = Path("/a")
p2 = Path("/a/b")
p3 = Path(p1)
p4 = Path("a", p1)
with pytest.raises(AssertionError):
p5 = Path("/a/b", p2)
with pytest.raises(ValueError):
p6 = Path(None)
with pytest.raises(ValueError):
p7 = Path(None, p1)
with pytest.raises(AssertionError):
p8 = Path("a", "b")
def test_normalize_abs():
assert _normalize("/") == "/"
assert _normalize("/a") == "/a"
assert _normalize("/a/") == "/a"
assert _normalize("/a/b") == "/a/b"
assert _normalize("/a/b/") == "/a/b"
assert _normalize("/a//b") == "/a/b"
assert _normalize("/a//b//") == "/a/b"
def test_normalize_relative():
assert _normalize("a") == "a"
assert _normalize("a/") == "a"
assert _normalize("a/b") == "a/b"
assert _normalize("a/b/") == "a/b"
assert _normalize("a//b") == "a/b"
assert _normalize("a//b//") == "a/b"
def test_userPath2Path():
assert up2p("c", Path("/a/b")) == Path("/a/b/c")
assert up2p("/c", Path("/a/b")) == Path("/c")
def test_cmp():
assert Path("/a/b") < Path("/a/c")
assert Path("/a/c") > Path("/a/b")
assert Path("/a/2") < Path("/b/1")
assert Path("/") < Path("/a")
@pytest.mark.skip(reason="bugs not impacting development at moment.")
def test_relative_to():
assert Path("/a/b").relative_to(Path("/")) == "a/b"
assert Path("/a/b").relative_to(Path("/a")) == "b"
assert Path("/a/b/c").relative_to(Path("/a")) == "b/c"
assert Path("/a/b/c").relative_to(Path("/a/b")) == "c"
assert Path("/a/b").relative_to(Path("/c")) == "../a/b"
@pytest.mark.parametrize(
"input,expected",
[
(b'', u"d41d8cd98f00b204e9800998ecf8427e"),
(b'abc', u"900150983cd24fb0d6963f7d28e17f72"),
(b'\xea\x80\x80abcd\xde\xb4', u'b8c6dee81075e87d348522b146c95ae3'),
],)
def test_checksum_empty(tmp_path, input, expected):
tmp = Path(str(tmp_path))
fp = tmp.join("empty.txt")
with fp.open("wb") as fd:
fd.write(input)
assert fp.checksum() == expected
| from farmfs.fs import normpath as _normalize
from farmfs.fs import userPath2Path as up2p
from farmfs.fs import Path
import pytest
def test_create_path():
p1 = Path("/")
p2 = Path("/a")
p2 = Path("/a/b")
p3 = Path(p1)
p4 = Path("a", p1)
with pytest.raises(AssertionError):
p5 = Path("/a/b", p2)
with pytest.raises(ValueError):
p6 = Path(None)
with pytest.raises(ValueError):
p7 = Path(None, p1)
with pytest.raises(AssertionError):
p8 = Path("a", "b")
def test_normalize_abs():
assert _normalize("/") == "/"
assert _normalize("/a") == "/a"
assert _normalize("/a/") == "/a"
assert _normalize("/a/b") == "/a/b"
assert _normalize("/a/b/") == "/a/b"
assert _normalize("/a//b") == "/a/b"
assert _normalize("/a//b//") == "/a/b"
def test_normalize_relative():
assert _normalize("a") == "a"
assert _normalize("a/") == "a"
assert _normalize("a/b") == "a/b"
assert _normalize("a/b/") == "a/b"
assert _normalize("a//b") == "a/b"
assert _normalize("a//b//") == "a/b"
def test_userPath2Path():
assert up2p("c", Path("/a/b")) == Path("/a/b/c")
assert up2p("/c", Path("/a/b")) == Path("/c")
def test_cmp():
assert Path("/a/b") < Path("/a/c")
assert Path("/a/c") > Path("/a/b")
assert Path("/a/2") < Path("/b/1")
assert Path("/") < Path("/a")
| Python | 0.000002 |
9786c5f242f2b70240e7bb23c866c864cb4ed4ca | Add registrations to admin | expeditions/admin.py | expeditions/admin.py | from django.contrib import admin
from expeditions.models import Expedition, Waypoint, Registration
# Register your models here.
class ExpeditionAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'start_date', 'end_date', 'published')
list_display_links = ('id', 'name')
search_fields = ('name', 'start_date')
list_filter = ('published', )
class WaypointAdmin(admin.ModelAdmin):
list_display = ('id', 'expedition', 'name', 'system', 'planet', 'datetime')
list_display_links = ('id', 'name')
list_filter = ('expedition', )
search_fields = ('name', 'expedition__name', 'system', 'planet', 'datetime')
class RegistrationAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'expedition', 'registration_number')
list_display_links = ('id', 'user')
list_filter = ('expedition', 'user')
search_fields = ('user__username', 'expedition__name')
admin.site.register(Expedition, ExpeditionAdmin)
admin.site.register(Waypoint, WaypointAdmin)
admin.site.register(Registration, RegistrationAdmin) | from django.contrib import admin
from expeditions.models import Expedition, Waypoint
# Register your models here.
class ExpeditionAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'start_date', 'end_date', 'published')
search_fields = ('name', 'start_date')
list_filter = ('published', )
class WaypointAdmin(admin.ModelAdmin):
list_display = ('id', 'expedition', 'name', 'system', 'planet', 'datetime')
list_filter = ('expedition', )
search_fields = ('name', 'expedition__name', 'system', 'planet', 'datetime')
admin.site.register(Expedition, ExpeditionAdmin)
admin.site.register(Waypoint, WaypointAdmin) | Python | 0 |
7184ba85fae4c539e557309f3b15b1b7c64cb8cb | Fix urls. | snapboard/urls.py | snapboard/urls.py | from django.conf.urls.defaults import *
from django.contrib.auth.models import User
from snapboard.feeds import LatestPosts
feeds = {'latest': LatestPosts}
js_info_dict = {
'packages': ('snapboard',),
}
rpc_lookup_dict = {
'queryset':User.objects.all(),
'field':'username',
}
urlpatterns = patterns('snapboard.views',
# Forum
(r'^$', 'category_index', {}, 'snapboard_category_index'),
(r'^threads/$', 'thread_index', {}, 'snapboard_index'),
(r'^post/(?P<post_id>\d+)/$', 'locate_post', {}, 'snapboard_locate_post'),
(r'^edit_post/(?P<post_id>\d+)/$', 'edit_post', {}, 'snapboard_edit_post'),
(r'^private/$', 'private_index', {}, 'snapboard_private_index'),
(r'^favorites/$', 'favorite_index', {}, 'snapboard_favorite_index'),
(r'^settings/$', 'edit_settings', {}, 'snapboard_edit_settings'),
(r'^(?P<slug>[-_\w]+)/new/$', 'new_thread', {}, 'snapboard_new_thread'),
(r'^(?P<cslug>[-_\w]+)/(?P<tslug>[-_\w]+)/$', 'thread', {}, 'snapboard_thread'),
(r'^(?P<slug>[-_\w]+)/$', 'category_thread_index', {}, 'snapboard_category_thread_index'),
# Groups
(r'^groups/(?P<group_id>\d+)/manage/$', 'manage_group', {}, 'snapboard_manage_group'),
(r'^groups/(?P<group_id>\d+)/invite/$', 'invite_user_to_group', {}, 'snapboard_invite_user_to_group'),
(r'^groups/(?P<group_id>\d+)/remuser/$', 'remove_user_from_group', {}, 'snapboard_remove_user_from_group'),
(r'^groups/(?P<group_id>\d+)/grant_admin/$', 'grant_group_admin_rights', {}, 'snapboard_grant_group_admin_rights'),
# Invitations
(r'invitations/(?P<invitation_id>\d+)/discard/$', 'discard_invitation', {}, 'snapboard_discard_invitation'),
(r'invitations/(?P<invitation_id>\d+)/answer/$', 'answer_invitation', {}, 'snapboard_answer_invitation'),
# RPC
(r'^rpc/action/$', 'rpc', {}, 'snapboard_rpc_action'),
)
urlpatterns += patterns('snapboard.rpc',
(r'^rpc/postrev/$', 'rpc_post', {}, 'snapboard_rpc_postrev'),
(r'^rpc/preview/$', 'rpc_preview', {}, 'snapboard_rpc_preview'),
(r'^rpc/user_lookup/$', 'rpc_lookup', rpc_lookup_dict, 'snapboard_rpc_user_lookup'),
)
urlpatterns += patterns('',
# Feeds
(r'^feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}, 'snapboard_feeds'),
# JavaScript
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict, 'snapboard_js_i18n'),
) | from django.conf.urls.defaults import *
from django.contrib.auth.models import User
from snapboard.feeds import LatestPosts
from snapboard.rpc import rpc_post, rpc_lookup, rpc_preview
from snapboard.views import *
feeds = {'latest': LatestPosts}
js_info_dict = {
'packages': ('snapboard',),
}
urlpatterns = patterns('',
(r'^$', category_index, {}, 'snapboard_category_index'),
(r'^threads/$', thread_index, {}, 'snapboard_index'),
(r'^post/(?P<post_id>\d+)/$', locate_post, {}, 'snapboard_locate_post'),
(r'^edit_post/(?P<post_id>\d+)/$', edit_post, {}, 'snapboard_edit_post'),
(r'^private/$', private_index, {}, 'snapboard_private_index'),
(r'^favorites/$', favorite_index, {}, 'snapboard_favorite_index'),
(r'^settings/$', edit_settings, {}, 'snapboard_edit_settings'),
(r'^(?P<slug>[-_\w]+)/new/$', new_thread, {}, 'snapboard_new_thread'),
(r'^(?P<cslug>[-_\w]+)/(?P<tslug>[-_\w]+)/$', thread, {}, 'snapboard_thread'),
(r'^(?P<slug>[-_\w]+)/$', category_thread_index, {}, 'snapboard_category_thread_index'),
# Groups
(r'^groups/(?P<group_id>\d+)/manage/$', manage_group, {}, 'snapboard_manage_group'),
(r'^groups/(?P<group_id>\d+)/invite/$', invite_user_to_group, {}, 'snapboard_invite_user_to_group'),
(r'^groups/(?P<group_id>\d+)/remuser/$', remove_user_from_group, {}, 'snapboard_remove_user_from_group'),
(r'^groups/(?P<group_id>\d+)/grant_admin/$', grant_group_admin_rights, {}, 'snapboard_grant_group_admin_rights'),
# Invitations
(r'invitations/(?P<invitation_id>\d+)/discard/$', discard_invitation, {}, 'snapboard_discard_invitation'),
(r'invitations/(?P<invitation_id>\d+)/answer/$', answer_invitation, {}, 'snapboard_answer_invitation'),
# RPC
(r'^rpc/action/$', rpc, {}, 'snapboard_rpc_action'),
(r'^rpc/postrev/$', rpc_post, {}, 'snapboard_rpc_postrev'),
(r'^rpc/preview/$', rpc_preview, {}, 'snapboard_rpc_preview'),
(r'^rpc/user_lookup/$', rpc_lookup,
{
'queryset':User.objects.all(),
'field':'username',
}, 'snapboard_rpc_user_lookup'
),
# feeds
(r'^feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}, 'snapboard_feeds'),
# javascript translations
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict, 'snapboard_js_i18n'),
)
# vim: ai ts=4 sts=4 et sw=4
| Python | 0.000033 |
44c78525ee886a369be66d671523dd3258ba37d5 | Remove hard FPU for Cortex M4 // Resolve #162 | platformio/builder/scripts/basearm.py | platformio/builder/scripts/basearm.py | # Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
"""
Base for ARM microcontrollers.
"""
from SCons.Script import Builder, DefaultEnvironment
env = DefaultEnvironment()
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rcs"],
ASPPFLAGS=["-x", "assembler-with-cpp"],
CPPFLAGS=[
"-g", # include debugging info (so errors include line numbers)
"-Os", # optimize for size
"-ffunction-sections", # place each function in its own section
"-fdata-sections",
"-Wall",
"-mthumb",
"-mcpu=${BOARD_OPTIONS['build']['cpu']}",
"-nostdlib",
"-MMD" # output dependancy info
],
CXXFLAGS=[
"-fno-rtti",
"-fno-exceptions"
],
CPPDEFINES=[
"F_CPU=$BOARD_F_CPU"
],
LINKFLAGS=[
"-Os",
"-Wl,--gc-sections,--relax",
"-mthumb",
"-mcpu=${BOARD_OPTIONS['build']['cpu']}",
"--specs=nano.specs"
],
LIBS=["c", "gcc", "m"],
SIZEPRINTCMD='"$SIZETOOL" -B -d $SOURCES'
)
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"]),
suffix=".bin"
),
ElfToHex=Builder(
action=" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"]),
suffix=".hex"
)
)
)
| # Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
"""
Base for ARM microcontrollers.
"""
from SCons.Script import Builder, DefaultEnvironment
env = DefaultEnvironment()
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rcs"],
ASPPFLAGS=["-x", "assembler-with-cpp"],
CPPFLAGS=[
"-g", # include debugging info (so errors include line numbers)
"-Os", # optimize for size
"-ffunction-sections", # place each function in its own section
"-fdata-sections",
"-Wall",
"-mthumb",
"-mcpu=${BOARD_OPTIONS['build']['cpu']}",
"-nostdlib",
"-MMD" # output dependancy info
],
CXXFLAGS=[
"-fno-rtti",
"-fno-exceptions"
],
CPPDEFINES=[
"F_CPU=$BOARD_F_CPU"
],
LINKFLAGS=[
"-Os",
"-Wl,--gc-sections,--relax",
"-mthumb",
"-mcpu=${BOARD_OPTIONS['build']['cpu']}",
"--specs=nano.specs"
],
LIBS=["c", "gcc", "m"],
SIZEPRINTCMD='"$SIZETOOL" -B -d $SOURCES'
)
if env.get("BOARD_OPTIONS", {}).get("build", {}).get(
"cpu", "")[-2:] == "m4" and env.get("BOARD") != "frdm_k20d50m":
env.Append(
CPPFLAGS=[
"-mfloat-abi=hard",
"-mfpu=fpv4-sp-d16",
"-fsingle-precision-constant"
],
LINKFLAGS=[
"-mfloat-abi=hard",
"-mfpu=fpv4-sp-d16",
"-fsingle-precision-constant"
]
)
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"]),
suffix=".bin"
),
ElfToHex=Builder(
action=" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"]),
suffix=".hex"
)
)
)
| Python | 0 |
98bacbc912513fa33775e2e6c2e41363aea7c793 | Remove strange code | stencil/base.py | stencil/base.py | import os
import optparse
import sys
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from .resources import Directory, File, Template
class WrongSource(Exception):
pass
class Stencil(object):
source = None
variables = []
help = None
def __init__(self):
self.resources = {}
self.context = {}
def get_absolute_path(self, source):
module_path = sys.modules[self.__class__.__module__].__file__
source_path = os.path.join(os.path.dirname(module_path), source)
return os.path.abspath(source_path)
def get_source_list(self):
if isinstance(self.source, (list, tuple)):
source_list = list(self.source)
else:
source_list = [self.source]
source_list = [self.get_absolute_path(source) for source in source_list]
return [path for path in source_list if os.path.isdir(path)]
def copy(self, target):
os.makedirs(target, 0755)
for path in sorted(self.resources):
real_path = os.path.join(target, path.format(**self.context))
self.resources[path].copy(real_path, self.context)
def fill_context(self, args):
for variable in self.variables:
value = getattr(args, variable.name, None)
if value is not None:
self.context[variable.name] = value
elif variable.name not in self.context:
if args.use_defaults and variable.default is not None:
self.context[variable.name] = variable.default
else:
self.context[variable.name] = variable.prompt()
def collect_resources(self):
source_list = self.get_source_list()
if not source_list:
raise WrongSource(
'None of the source directories exists: %r' % source_path)
resources = {}
for source in source_list:
for root, dirnames, filenames in os.walk(source):
root = os.path.relpath(root, source)
for dirname in dirnames:
path = os.path.normpath(os.path.join(root, dirname))
real_path = os.path.join(source, path)
resources[path] = Directory(real_path)
for filename in filenames:
path = os.path.normpath(os.path.join(root, filename))
real_path = os.path.join(source, path)
if path.endswith('_tmpl'):
path = path[:-5]
Resource = Template
else:
Resource = File
resources[path] = Resource(real_path)
self.resources = resources
@classmethod
def add_to_subparsers(cls, name, subparsers):
parser = subparsers.add_parser(name, help=cls.help)
for variable in cls.variables:
variable.add_to_parser(parser)
parser.add_argument('target', type=cls.absolute_path,
help='destination directory')
parser.set_defaults(func=cls.run)
@classmethod
def absolute_path(cls, arg):
return os.path.abspath(arg)
@classmethod
def run(cls, args):
stencil = cls()
stencil.fill_context(args)
stencil.collect_resources()
stencil.copy(args.target)
| import os
import optparse
import sys
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from .resources import Directory, File, Template
class WrongSource(Exception):
pass
class Stencil(object):
source = None
variables = []
help = None
def __init__(self):
self.resources = {}
self.context = {}
def get_absolute_path(self, source):
module_path = sys.modules[self.__class__.__module__].__file__
source_path = os.path.join(os.path.dirname(module_path), source)
return os.path.abspath(source_path)
def get_source_list(self):
if isinstance(self.source, (list, tuple)):
source_list = list(self.source)
else:
source_list = [self.source]
source_list = [self.get_absolute_path(source) for source in source_list]
return [path for path in source_list if os.path.isdir(path)]
def copy(self, target):
os.makedirs(target, 0755)
for path in sorted(self.resources):
real_path = os.path.join(target, path.format(**self.context))
self.resources[path].copy(real_path, self.context)
def fill_context(self, args):
for variable in self.variables:
value = getattr(args, variable.name, None)
if value is not None:
self.context[variable.name] = value
elif variable.name not in self.context:
if args.use_defaults and variable.default is not None:
self.context[variable.name] = variable.default
else:
self.context[variable.name] = variable.prompt()
def collect_resources(self):
source_list = self.get_source_list()
if not source_list:
raise WrongSource(
'None of the source directories exists: %r' % source_path)
resources = {}
for source in source_list:
for root, dirnames, filenames in os.walk(source):
root = os.path.relpath(root, source)
for dirname in dirnames:
path = os.path.normpath(os.path.join(root, dirname))
real_path = os.path.join(source, path)
resources[path % self.context] = Directory(real_path)
for filename in filenames:
path = os.path.normpath(os.path.join(root, filename))
real_path = os.path.join(source, path)
if path.endswith('_tmpl'):
path = path[:-5]
Resource = Template
else:
Resource = File
resources[path % self.context] = Resource(real_path)
self.resources = resources
@classmethod
def add_to_subparsers(cls, name, subparsers):
parser = subparsers.add_parser(name, help=cls.help)
for variable in cls.variables:
variable.add_to_parser(parser)
parser.add_argument('target', type=cls.absolute_path,
help='destination directory')
parser.set_defaults(func=cls.run)
@classmethod
def absolute_path(cls, arg):
return os.path.abspath(arg)
@classmethod
def run(cls, args):
stencil = cls()
stencil.fill_context(args)
stencil.collect_resources()
stencil.copy(args.target)
| Python | 0.000538 |
ab23ea60457720d0a7414b1b84191945f529b23c | Update _version.py | fabsetup/_version.py | fabsetup/_version.py | __version__ = "0.7.9" # semantic versioning: https://semver.org
| __version__ = "0.7.9"
| Python | 0.000002 |
28c8d1cc6df216dfe1f3bcfa3eb70bb590204613 | implement post_vote() | pybooru/api_danbooru.py | pybooru/api_danbooru.py | # -*- coding: utf-8 -*-
"""pybooru.api_danbooru
This module contains all API calls of Danbooru for Pybooru.
Classes:
Danbooru -- Contains all API calls.
"""
# __future__ imports
from __future__ import absolute_import
# pybooru imports
from .exceptions import PybooruAPIError
class DanbooruApi(object):
"""Contains all Danbooru API calls.
API Versions: v2.105.0
doc: https://danbooru.donmai.us/wiki_pages/43568
"""
def post_list(self, **params):
"""Get a list of posts.
Parameters:
limit: How many posts you want to retrieve. There is a hard limit
of 100 posts per request.
page: The page number.
tags: The tags to search for. Any tag combination that works on the
web site will work here. This includes all the meta-tags.
raw: When this parameter is set the tags parameter will not be
parsed for aliased tags, metatags or multiple tags, and will
instead be parsed as a single literal tag.
"""
return self._get('posts.json', params)
def post_show(self, id_):
"""Get a post.
Parameters:
id_: REQUIRED Where id_ is the post id.
"""
return self._get('/posts/{0}.json'.format(id_))
def post_update(self, id_, tag_string=None, rating=None, source=None,
parent_id=None):
"""Update a specific post (Requires login).
Parameters:
id_: REQUIRED The id number of the post to update.
tag_string: A space delimited list of tags.
rating: The rating for the post. Can be: safe, questionable, or
explicit.
source: If this is a URL, Danbooru will download the file.
parent_id: The ID of the parent post.
"""
params = {
'post[tag_string]': tag_string,
'post[rating]': rating,
'ost[source]': source,
'post[parent_id]': parent_id
}
return self._get('/posts/{0}.json'.format(id_), params, 'PUT')
def post_revert(self, id_, version_id):
"""Function to reverts a post to a previous version (Requires login).
Parameters:
id_: REQUIRED post id.
version_id: REQUIRED The post version id to revert to.
"""
return self._get('/posts/{0}/revert.json'.format(id_),
{'version_id': version_id}, 'PUT')
def post_copy_notes(self, id_, other_post_id):
"""Function to copy notes (requires login).
Parameters:
id_: REQUIRED Post id.
other_post_id: REQUIRED The id of the post to copy notes to.
"""
return self._get('/posts/{0}/copy_notes.json'.format(id_),
{'other_post_id': other_post_id}, 'PUT')
def post_vote(self, id_, score):
"""Action lets you vote for a post (Requires login).
Danbooru: Post votes/create
Parameters:
id_: REQUIRED Ppost id.
score: REQUIRED Can be: up, down.
"""
return self._get('/posts/{0}/votes.json'.format(id_), {'score': score},
'POST')
| # -*- coding: utf-8 -*-
"""pybooru.api_danbooru
This module contains all API calls of Danbooru for Pybooru.
Classes:
Danbooru -- Contains all API calls.
"""
# __future__ imports
from __future__ import absolute_import
# pybooru imports
from .exceptions import PybooruAPIError
class DanbooruApi(object):
"""Contains all Danbooru API calls.
API Versions: v2.105.0
doc: https://danbooru.donmai.us/wiki_pages/43568
"""
def post_list(self, **params):
"""Get a list of posts.
Parameters:
limit: How many posts you want to retrieve. There is a hard limit
of 100 posts per request.
page: The page number.
tags: The tags to search for. Any tag combination that works on the
web site will work here. This includes all the meta-tags.
raw: When this parameter is set the tags parameter will not be
parsed for aliased tags, metatags or multiple tags, and will
instead be parsed as a single literal tag.
"""
return self._get('posts.json', params)
def post_show(self, id_):
"""Get a post.
Parameters:
id_: where id_ is the post id.
"""
return self._get('/posts/{0}.json'.format(id_))
def post_update(self, id_, tag_string=None, rating=None, source=None,
parent_id=None):
"""Update a specific post (Requires login).
Parameters:
id_: The id number of the post to update.
tag_string: A space delimited list of tags.
rating: The rating for the post. Can be: safe, questionable, or
explicit.
source: If this is a URL, Danbooru will download the file.
parent_id: The ID of the parent post.
"""
params = {
'post[tag_string]': tag_string,
'post[rating]': rating,
'ost[source]': source,
'post[parent_id]': parent_id
}
return self._get('/posts/{0}.json'.format(id_), params, 'PUT')
def post_revert(self, id_, version_id):
"""Function to reverts a post to a previous version (Requires login).
Parameters:
id_: REQUIRED post id.
version_id: REQUIRED The post version id to revert to.
"""
return self._get('/posts/{0}/revert.json'.format(id_),
{'version_id': version_id}, 'PUT')
def post_copy_notes(self, id_, other_post_id):
"""Function to copy notes (requires login).
Parameters:
id_: Post id.
other_post_id: REQUIRED The id of the post to copy notes to.
"""
return self._get('/posts/{0}/copy_notes.json'.format(id_),
{'other_post_id': other_post_id}, 'PUT')
| Python | 0.000009 |
8c4edd4cc8fdd6c7c470e25436b6c6b4c146ad58 | Fix error casting datetime objects | data-analysis/utils.py | data-analysis/utils.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo <dizquierdo@bitergia.com>
# Santiago Dueñas <sduenas@bitergia.com>
#
from __future__ import absolute_import
try:
import configparser
except ImportError:
import ConfigParser as configparser
import elasticsearch
import numpy
try:
import pymysql as mysql
except ImportError:
import MySQLdb as mysql
def read_config_file(filepath):
"""Read configuration file"""
cfg_parser = configparser.SafeConfigParser()
cfg_parser.read(filepath)
config = {}
for section in ['mysql', 'elasticsearch']:
if section not in cfg_parser.sections():
cause = "Section %s not found in the %s file" % (section, filepath)
raise KeyError(cause)
config[section] = dict(cfg_parser.items(section))
return config
def to_dict(row, columns):
"""Translates from tuple to a dict"""
d = {}
for column in columns:
value = row[columns.index(column) + 1]
if isinstance(value, numpy.int64):
value = int(value)
elif isinstance(value, numpy.float64):
value = float(value)
d[column] = value
return d
def create_mysql_connection(user, password, host, db):
"""Connect to a MySQL server"""
db = mysql.connect(host=host, user=user, passwd=password, db=db,
charset='utf8')
return db.cursor()
def execute_mysql_query(conn, query):
"""Execute a MySQL query"""
n = int(conn.execute(query))
results = conn.fetchall() if n else []
return results
def create_elasticsearch_connection(url, user, password):
"""Connect to a ES server"""
conn = elasticsearch.Elasticsearch([url], http_auth=(user, password))
return conn
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo <dizquierdo@bitergia.com>
# Santiago Dueñas <sduenas@bitergia.com>
#
from __future__ import absolute_import
try:
import configparser
except ImportError:
import ConfigParser as configparser
import datetime
import elasticsearch
import numpy
try:
import pymysql as mysql
except ImportError:
import MySQLdb as mysql
def read_config_file(filepath):
"""Read configuration file"""
cfg_parser = configparser.SafeConfigParser()
cfg_parser.read(filepath)
config = {}
for section in ['mysql', 'elasticsearch']:
if section not in cfg_parser.sections():
cause = "Section %s not found in the %s file" % (section, filepath)
raise KeyError(cause)
config[section] = dict(cfg_parser.items(section))
return config
def to_dict(row, columns):
"""Translates from tuple to a dict"""
d = {}
for column in columns:
value = row[columns.index(column) + 1]
if isinstance(value, numpy.int64):
value = int(value)
elif isinstance(value, numpy.float64):
value = float(value)
elif isinstance(value, datetime.datetime):
value = str(value)
else:
value = str(value)
d[column] = value
return d
def create_mysql_connection(user, password, host, db):
"""Connect to a MySQL server"""
db = mysql.connect(host=host, user=user, passwd=password, db=db,
charset='utf8')
return db.cursor()
def execute_mysql_query(conn, query):
"""Execute a MySQL query"""
n = int(conn.execute(query))
results = conn.fetchall() if n else []
return results
def create_elasticsearch_connection(url, user, password):
"""Connect to a ES server"""
conn = elasticsearch.Elasticsearch([url], http_auth=(user, password))
return conn
| Python | 0.000003 |
9f405a3b4e01ee0a42a8530cfc5b509a38067250 | Remove unused import | mugloar/dragon.py | mugloar/dragon.py |
class Dragon:
# By default, stay home.
scaleThickness = 0
clawSharpness = 0
wingStrength = 0
fireBreath = 0
def __init__(self, weather_code):
if weather_code == 'T E':
# Draught requires a 'balanced' dragon, ha ha
self.scaleThickness = 5
self.clawSharpness = 5
self.wingStrength = 5
self.fireBreath = 5
elif weather_code == 'FUNDEFINEDG':
# Fog means we're unseen, no need to fly
self.scaleThickness = 8
self.clawSharpness = 8
self.wingStrength = 0
self.fireBreath = 4
elif weather_code == 'NMR':
self.scaleThickness = 3
self.clawSharpness = 6
self.wingStrength = 5
self.fireBreath = 6
elif weather_code == 'SRO':
# Stay at home if there's a storm.
pass
else:
# Fire is useless in the rain. Additional claw-sharpening is needed to destroy the umbrellaboats
self.scaleThickness = 5
self.clawSharpness = 10
self.wingStrength = 5
self.fireBreath = 0
def get_json(self):
return {"dragon": {
"scaleThickness": self.scaleThickness,
"clawSharpness": self.clawSharpness,
"wingStrength": self.wingStrength,
"fireBreath": self.fireBreath}}
| import json
class Dragon:
# By default, stay home.
scaleThickness = 0
clawSharpness = 0
wingStrength = 0
fireBreath = 0
def __init__(self, weather_code):
if weather_code == 'T E':
# Draught requires a 'balanced' dragon, ha ha
self.scaleThickness = 5
self.clawSharpness = 5
self.wingStrength = 5
self.fireBreath = 5
elif weather_code == 'FUNDEFINEDG':
# Fog means we're unseen, no need to fly
self.scaleThickness = 8
self.clawSharpness = 8
self.wingStrength = 0
self.fireBreath = 4
elif weather_code == 'NMR':
self.scaleThickness = 6
self.clawSharpness = 6
self.wingStrength = 4
self.fireBreath = 4
elif weather_code == 'SRO':
# Stay at home if there's a storm.
pass
else:
# Fire is useless in the rain. Additional claw-sharpening is needed to destroy the umbrellaboats
self.scaleThickness = 5
self.clawSharpness = 10
self.wingStrength = 5
self.fireBreath = 0
def get_json(self):
return {"dragon": {
"scaleThickness": self.scaleThickness,
"clawSharpness": self.clawSharpness,
"wingStrength": self.wingStrength,
"fireBreath": self.fireBreath}}
| Python | 0.000001 |
b8701f04d049101c8c92b468b4fc3dc863f1e292 | Add bulk accept and reject for talks | pygotham/admin/talks.py | pygotham/admin/talks.py | """Admin for talk-related models."""
from flask.ext.admin import actions
from flask.ext.admin.contrib.sqla import ModelView
from pygotham.admin.utils import model_view
from pygotham.core import db
from pygotham.talks import models
__all__ = ('CategoryModelView', 'talk_model_view', 'TalkReviewModelView')
CATEGORY = 'Talks'
class TalkModelView(ModelView, actions.ActionsMixin):
"""Admin view for :class:`~pygotham.models.Talk`."""
column_filters = ('status', 'duration', 'level')
column_list = ('name', 'status', 'duration', 'level', 'type', 'user')
column_searchable_list = ('name',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_actions()
@actions.action(
'accept', 'Accept', 'Are you sure you want to accept selected models?')
def approve(self, talks):
for pk in talks:
talk = models.Talk.query.get(pk)
talk.status = 'accepted'
self.session.commit()
@actions.action(
'reject', 'Reject', 'Are you sure you want to reject selected models?')
def reject(self, talks):
for pk in talks:
talk = models.Talk.query.get(pk)
talk.status = 'rejected'
self.session.commit()
CategoryModelView = model_view(
models.Category,
'Categories',
CATEGORY,
form_columns=('name', 'slug'),
)
talk_model_view = TalkModelView(
models.Talk, db.session, 'Talks', CATEGORY, 'talks')
TalkReviewModelView = model_view(
models.Talk,
'Review',
CATEGORY,
can_create=False,
can_delete=False,
column_list=('name', 'status', 'level', 'type', 'user'),
column_searchable_list=('name',),
edit_template='talks/review.html',
)
| """Admin for talk-related models."""
from pygotham.admin.utils import model_view
from pygotham.talks import models
__all__ = ('CategoryModelView', 'TalkModelView', 'TalkReviewModelView')
CategoryModelView = model_view(
models.Category,
'Categories',
'Talks',
form_columns=('name', 'slug'),
)
TalkModelView = model_view(
models.Talk,
'Talks',
'Talks',
column_filters=('status', 'duration', 'level'),
column_list=('name', 'status', 'duration', 'level', 'type', 'user'),
column_searchable_list=('name',),
)
TalkReviewModelView = model_view(
models.Talk,
'Review',
'Talks',
can_create=False,
can_delete=False,
column_list=('name', 'status', 'level', 'type', 'user'),
column_searchable_list=('name',),
edit_template='talks/review.html',
)
| Python | 0.000038 |
54bb5a2320fb88daf9c24ad7b6a9b6cb0a6ab0cc | add send_image_url | pymessenger/send_api.py | pymessenger/send_api.py | import json
import requests
from requests_toolbelt import MultipartEncoder
DEFAULT_API_VERSION = 2.6
class SendApiClient(object):
def __init__(self, access_token, api_version=DEFAULT_API_VERSION):
self.api_version = api_version
self.access_token = access_token
self.base_url = (
"https://graph.facebook.com"
"/v{0}/me/messages?access_token={1}"
).format(self.api_version, access_token)
def send(self, recipient_id, message_type, **kwargs):
if message_type == 'text':
message_text = kwargs['text']
response = self.send_text_message(recipient_id, message_text)
elif message_type == 'button':
message_text = kwargs['text']
buttons = kwargs['buttons']
response = self.send_button_message(recipient_id, message_text, buttons)
else:
response = "Message type {0} currently unsupported.".format(message_type)
return response
def send_text_message(self, recipient_id, message_text):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
'text': message_text
}
}
return self._send_payload(payload)
def send_message(self, recipient_id, message):
payload = {
'recipient': {
'id': recipient_id
},
'message': message
}
return self._send_payload(payload)
def send_generic_message(self, recipient_id, elements):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": elements
}
}
}
}
return self._send_payload(payload)
def send_button_message(self, recipient_id, text, buttons):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": text,
"buttons": buttons
}
}
}
}
return self._send_payload(payload)
def _send_payload(self, payload):
result = requests.post(self.base_url, json=payload).json()
return result
def send_image(self, recipient_id, image_path):
'''
This sends an image to the specified recipient.
Input:
recipient_id: recipient id to send to
image_path: path to image to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'image',
'payload': {}
}
}
),
'filedata': (image_path, open(image_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.base_url, data=multipart_data, headers=multipart_header).json()
def send_image_url(self, recipient_id, image_url):
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'image',
'payload': {
'url': image_url
}
}
}
)
}
return self._send_payload(payload)
| import json
import requests
from requests_toolbelt import MultipartEncoder
DEFAULT_API_VERSION = 2.6
class SendApiClient(object):
def __init__(self, access_token, api_version=DEFAULT_API_VERSION):
self.api_version = api_version
self.access_token = access_token
self.base_url = (
"https://graph.facebook.com"
"/v{0}/me/messages?access_token={1}"
).format(self.api_version, access_token)
def send(self, recipient_id, message_type, **kwargs):
if message_type == 'text':
message_text = kwargs['text']
response = self.send_text_message(recipient_id, message_text)
elif message_type == 'button':
message_text = kwargs['text']
buttons = kwargs['buttons']
response = self.send_button_message(recipient_id, message_text, buttons)
else:
response = "Message type {0} currently unsupported.".format(message_type)
return response
def send_text_message(self, recipient_id, message_text):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
'text': message_text
}
}
return self._send_payload(payload)
def send_message(self, recipient_id, message):
payload = {
'recipient': {
'id': recipient_id
},
'message': message
}
return self._send_payload(payload)
def send_generic_message(self, recipient_id, elements):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": elements
}
}
}
}
return self._send_payload(payload)
def send_button_message(self, recipient_id, text, buttons):
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": text,
"buttons": buttons
}
}
}
}
return self._send_payload(payload)
def _send_payload(self, payload):
result = requests.post(self.base_url, json=payload).json()
return result
def send_image(self, recipient_id, image_path):
'''
This sends an image to the specified recipient.
Input:
recipient_id: recipient id to send to
image_path: path to image to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'image',
'payload': {}
}
}
),
'filedata': (image_path, open(image_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.base_url, data=multipart_data, headers=multipart_header).json()
| Python | 0.000002 |
ec261fdaf41bd91558e4df143be8dfd9940bde81 | Rewrite bubble sort. | py/sorting/05_bubbleSort.py | py/sorting/05_bubbleSort.py | def bubbleSort(A):
for k in range(len(A)-1, 0, -1):
for i in range(k):
if A[i] > A[i+1]:
tempValue = A[i]
A[i] = A[i+1]
A[i+1] = tempValue
return A
print(bubbleSort([54,26,93,17,77,31,44,55,20]))
def bubbleSortReverse(A):
for k in range(len(A)-1, 0, -1):
for i in range(k):
if A[i+1] > A[i]:
tempValue = A[i+1]
A[i+1] = A[i]
A[i] = tempValue
return A
print(bubbleSortReverse([54,26,93,17,77,31,44,55,20]))
| def bubbleSort(A):
tempValue = 0
for k in range(1, len(A)):
flag = 0
for i in range(0, len(A) - k):
if A[i+1] > A[i]:
tempValue = A[i+1]
A[i+1] = A[i]
A[i] = tempValue
flag += 1
if flag == 0:
break
return A
print(bubbleSort([1,4,55,3]))
| Python | 0.000001 |
2998a776a702d8d8fbd3e5f54f263fce55ba621c | Correct number of new links printed by first scrape | mutube/mutuber.py | mutube/mutuber.py | """
Script to scrape /bleep/ and post to YouTube playlist
Upcoming improvements include
Management of `live_threads`.
This could then be presented as a combination of a module and a script,
with command line argument parsing, and inserted to mutube
"""
from .exceptions import NoPlaylist, BadVideo
from .playlister import Playlister, encode_tag, HttpError
from .scraper import Scraper
import time
class Mutuber():
""" Scrape from 4chan and post to YouTube playlists. """
def __init__(self, board, subjects, prefix, time_format, client_json,
playlister_pause=1, scraper_pause=None):
""" .
Args:
board ::: (str) abbreviated name of 4chan board to scrape
subjects ::: (list) titles of `board` threads to scrape
prefix, time_format ::: (str) playlist tag specs, see documentation
playlister_pause, scraper_pause ::: (int) minutes to pause between
posting to playlist and between scrape cycles, respectively
client_json ::: (str) path to YouTube OAuth 2.0 client credentials JSON
"""
# Initialise objects
self.scraper = Scraper(board, subjects)
self.playlister = Playlister(prefix, time_format, client_json)
# Initialise options ! should check within acceptable ranges
self.playlister_pause = playlister_pause
self.scraper_pause = scraper_pause
# Get existing id's
#! should not be on init -- let user choose whether to consider all playlists or just current
self.existing_ids = self.get_existing_ids()
self.scraper.yt_ids.update(self.existing_ids)
def run_forever(self):
""" Run continuous scrape-post cycles, with a delay. """
while True:
self.run_once()
time.sleep(self.scraper_pause * 60) # space out scrapes
def run_once(self):
self.playlist = self.get_current_playlist() # get current playlist
self.scrape_and_insert_videos_to_playlist()
# Should be optionable for 'all' or 'current'
def get_existing_ids(self):
""" Return all video_ids posted in playlists tagged as specified. """
playlists = self.playlister.get_tagged_playlists()
existing_ids = set()
for playlist in playlists.values():
existing_ids.update(self.playlister.get_posted_yt_ids(playlist))
return existing_ids
def get_current_playlist(self):
""" Return current tagged playlist, creating one if necessary. """
# Create current tag
tag = encode_tag(self.playlister.prefix, time.localtime(),
self.playlister.time_format)
try: # retrieve existing playlist
playlist = self.playlister.get_playlist(tag)
print("Retrieved playlist for tag: {}".format(tag))
except NoPlaylist: # create new playlist
playlist = self.playlister.create_new_playlist(tag)
print("Created new playlist for tag: {}".format(tag))
return playlist
def scrape_and_insert_videos_to_playlist(self):
""" Scrape videos from 4chan and post to specified playlist. """
# Scrape videos from 4chan
self.scraper.scrape()
# Add scraped videos to playlist
for yt_id in self.scraper.yt_ids - self.existing_ids: # new videos only
try:
response = self.playlister.insert_vid_to_playlist(self.playlist,
yt_id)
self.existing_ids.add(yt_id)
print('Inserted: {}'.format(yt_id))
except BadVideo: # skip dead links
print('Failed to insert: {}'.format(yt_id))
time.sleep(self.playlister_pause * 60) # space out write requests
| """
Script to scrape /bleep/ and post to YouTube playlist
Upcoming improvements include
Management of `live_threads`.
This could then be presented as a combination of a module and a script,
with command line argument parsing, and inserted to mutube
"""
from .exceptions import NoPlaylist, BadVideo
from .playlister import Playlister, encode_tag, HttpError
from .scraper import Scraper
import time
class Mutuber():
""" Scrape from 4chan and post to YouTube playlists. """
def __init__(self, board, subjects, prefix, time_format, client_json,
playlister_pause=1, scraper_pause=None):
""" .
Args:
board ::: (str) abbreviated name of 4chan board to scrape
subjects ::: (list) titles of `board` threads to scrape
prefix, time_format ::: (str) playlist tag specs, see documentation
playlister_pause, scraper_pause ::: (int) minutes to pause between
posting to playlist and between scrape cycles, respectively
client_json ::: (str) path to YouTube OAuth 2.0 client credentials JSON
"""
# Initialise objects
self.scraper = Scraper(board, subjects)
self.playlister = Playlister(prefix, time_format, client_json)
# Initialise options ! should check within acceptable ranges
self.playlister_pause = playlister_pause
self.scraper_pause = scraper_pause
#! should not be on init -- let user choose whether to consider all playlists or just current
self.existing_ids = self.get_existing_ids()
def run_forever(self):
""" Run continuous scrape-post cycles, with a delay. """
while True:
self.run_once()
time.sleep(self.scraper_pause * 60) # space out scrapes
def run_once(self):
self.playlist = self.get_current_playlist() # get current playlist
self.scrape_and_insert_videos_to_playlist()
# Should be optionable for 'all' or 'current'
def get_existing_ids(self):
""" Return all video_ids posted in playlists tagged as specified. """
playlists = self.playlister.get_tagged_playlists()
existing_ids = set()
for playlist in playlists.values():
existing_ids.update(self.playlister.get_posted_yt_ids(playlist))
return existing_ids
def get_current_playlist(self):
""" Return current tagged playlist, creating one if necessary. """
# Create current tag
tag = encode_tag(self.playlister.prefix, time.localtime(),
self.playlister.time_format)
try: # retrieve existing playlist
playlist = self.playlister.get_playlist(tag)
print("Retrieved playlist for tag: {}".format(tag))
except NoPlaylist: # create new playlist
playlist = self.playlister.create_new_playlist(tag)
print("Created new playlist for tag: {}".format(tag))
return playlist
def scrape_and_insert_videos_to_playlist(self):
""" Scrape videos from 4chan and post to specified playlist. """
# Scrape videos from 4chan
self.scraper.scrape()
# Add scraped videos to playlist
for yt_id in self.scraper.yt_ids - self.existing_ids: # new videos only
try:
response = self.playlister.insert_vid_to_playlist(self.playlist,
yt_id)
self.existing_ids.add(yt_id)
print('Inserted: {}'.format(yt_id))
except BadVideo: # skip dead links
print('Failed to insert: {}'.format(yt_id))
time.sleep(self.playlister_pause * 60) # space out write requests
| Python | 0 |
c2a99a33455e3b01ccce3faebd3a541b4a76e579 | Bump version | yamale/__init__.py | yamale/__init__.py | from .yamale import make_schema, make_data, validate
VERSION = (1, 0, 1, 'final', 0)
# Dynamically calculate the version based on VERSION.
def get_version():
"Returns a PEP 386-compliant version number from VERSION."
version = VERSION
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
__version__ = get_version()
| from .yamale import make_schema, make_data, validate
VERSION = (1, 0, 0, 'final', 0)
# Dynamically calculate the version based on VERSION.
def get_version():
"Returns a PEP 386-compliant version number from VERSION."
version = VERSION
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
__version__ = get_version()
| Python | 0 |
33b4c181b2d9a3d74f45ee1ced971b5bca58b35b | remove unused import | treenav/admin.py | treenav/admin.py | from django.contrib import admin
from django.contrib.contenttypes import generic
from treenav import models as treenav
from treenav.forms import MenuItemForm, GenericInlineMenuItemForm
class GenericMenuItemInline(generic.GenericStackedInline):
"""
Add this inline to your admin class to support editing related menu items
from that model's admin page.
"""
extra = 0
max_num = 1
model = treenav.MenuItem
form = GenericInlineMenuItemForm
class SubMenuItemInline(admin.TabularInline):
model = treenav.MenuItem
extra = 1
form = MenuItemForm
prepopulated_fields = {'slug': ('label',)}
exclude = ('new_parent',)
class MenuItemAdmin(admin.ModelAdmin):
list_display = (
'menu_items',
'slug',
'label',
'parent',
'link',
'href_link',
'order',
'is_enabled',
)
list_filter = ('parent', 'is_enabled')
raw_id_fields = ('parent',)
prepopulated_fields = {'slug': ('label',)}
inlines = (SubMenuItemInline, )
fieldsets = (
(None, {
'fields': ('new_parent', 'label', 'slug', 'order', 'is_enabled')
}),
('URL', {
'fields': ('link', ('content_type', 'object_id')),
'description': "The URL for this menu item, which can be a "
"fully qualified URL, an absolute URL, a named "
"URL, a path to a Django view, a regular "
"expression, or a generic relation to a model that "
"supports get_absolute_url()"
}),
)
list_editable = ('label',)
form = MenuItemForm
def menu_items(self, obj):
if obj.level == 0:
return obj.label
return ' '*obj.level + '- %s' % obj.label
menu_items.allow_tags = True
def href_link(self, obj):
return '<a href="%s">%s</a>' % (obj.href, obj.href)
href_link.short_description = 'HREF'
href_link.allow_tags = True
admin.site.register(treenav.MenuItem, MenuItemAdmin)
| from django.contrib import admin
from django import forms
from django.contrib.contenttypes import generic
from treenav import models as treenav
from treenav.forms import MenuItemForm, GenericInlineMenuItemForm
class GenericMenuItemInline(generic.GenericStackedInline):
"""
Add this inline to your admin class to support editing related menu items
from that model's admin page.
"""
extra = 0
max_num = 1
model = treenav.MenuItem
form = GenericInlineMenuItemForm
class SubMenuItemInline(admin.TabularInline):
model = treenav.MenuItem
extra = 1
form = MenuItemForm
prepopulated_fields = {'slug': ('label',)}
exclude = ('new_parent',)
class MenuItemAdmin(admin.ModelAdmin):
list_display = (
'menu_items',
'slug',
'label',
'parent',
'link',
'href_link',
'order',
'is_enabled',
)
list_filter = ('parent', 'is_enabled')
raw_id_fields = ('parent',)
prepopulated_fields = {'slug': ('label',)}
inlines = (SubMenuItemInline, )
fieldsets = (
(None, {
'fields': ('new_parent', 'label', 'slug', 'order', 'is_enabled')
}),
('URL', {
'fields': ('link', ('content_type', 'object_id')),
'description': "The URL for this menu item, which can be a "
"fully qualified URL, an absolute URL, a named "
"URL, a path to a Django view, a regular "
"expression, or a generic relation to a model that "
"supports get_absolute_url()"
}),
)
list_editable = ('label',)
form = MenuItemForm
def menu_items(self, obj):
if obj.level == 0:
return obj.label
return ' '*obj.level + '- %s' % obj.label
menu_items.allow_tags = True
def href_link(self, obj):
return '<a href="%s">%s</a>' % (obj.href, obj.href)
href_link.short_description = 'HREF'
href_link.allow_tags = True
admin.site.register(treenav.MenuItem, MenuItemAdmin)
| Python | 0.000001 |
712989db37532a7810139dd2f7007c66652a0dd7 | Fix documentation | ditto/flickr/management/commands/__init__.py | ditto/flickr/management/commands/__init__.py | from django.core.management.base import CommandError
from ....core.management.commands import DittoBaseCommand
class FetchCommand(DittoBaseCommand):
"""
Parent for all classes that fetch some things from Flickr. Photos,
Photosets, Files, etc.
"""
def add_arguments(self, parser):
"All children will have the --account option."
super().add_arguments(parser)
parser.add_argument(
"--account",
action="store",
default=False,
help=(
"The NSID of the Flickr User associated with the one "
"Account to fetch for."
),
)
class FetchPhotosCommand(FetchCommand):
# What we're fetching:
singular_noun = "Photo"
plural_noun = "Photos"
# Child classes should supply some help text for the --days and --range arguments:
days_help = ""
range_help = ""
def add_arguments(self, parser):
super().add_arguments(parser)
group = parser.add_mutually_exclusive_group()
group.add_argument("--days", action="store", default=False, help=self.days_help)
group.add_argument(
"--range", action="store", default=False, help=self.range_help
)
def handle(self, *args, **options):
# We might be fetching for a specific account or all (None).
nsid = options["account"] if options["account"] else None
if options["days"]:
# Will be either 'all' or a number; make the number an int.
if options["days"].isdigit():
options["days"] = int(options["days"])
elif options["days"] != "all":
raise CommandError("--days should be an integer or 'all'.")
results = self.fetch_photos(nsid, options["days"], range=None)
self.output_results(results, options.get("verbosity", 1))
elif options["range"]:
results = self.fetch_photos(nsid, options["days"], options["range"])
self.output_results(results, options.get("verbosity", 1))
elif options["account"]:
raise CommandError("Specify --days as well as --account.")
else:
raise CommandError("Specify --days , eg --days=3 or --days=all.")
def fetch_photos(self, nsid, days, range):
"""Child classes should override this method to call a method that
fetches photos and returns results, eg:
return RecentPhotosMultiAccountFetcher(nsid=nsid).fetch(days=days)
"""
return {}
| from django.core.management.base import CommandError
from ....core.management.commands import DittoBaseCommand
class FetchCommand(DittoBaseCommand):
"""
Parent for all classes that fetch some things from Flickr. Photos,
Photosets, Files, etc.
"""
def add_arguments(self, parser):
"All children will have the --account option."
super().add_arguments(parser)
parser.add_argument(
"--account",
action="store",
default=False,
help=(
"The NSID of the Flickr User associated with the one "
"Account to fetch for."
),
)
class FetchPhotosCommand(FetchCommand):
# What we're fetching:
singular_noun = "Photo"
plural_noun = "Photos"
# Child classes should supply some help text for the --days and --start --end arguments:
days_help = ""
range_help = ""
def add_arguments(self, parser):
super().add_arguments(parser)
group = parser.add_mutually_exclusive_group()
group.add_argument("--days", action="store", default=False, help=self.days_help)
group.add_argument(
"--range", action="store", default=False, help=self.range_help
)
def handle(self, *args, **options):
# We might be fetching for a specific account or all (None).
nsid = options["account"] if options["account"] else None
if options["days"]:
# Will be either 'all' or a number; make the number an int.
if options["days"].isdigit():
options["days"] = int(options["days"])
elif options["days"] != "all":
raise CommandError("--days should be an integer or 'all'.")
results = self.fetch_photos(nsid, options["days"], range=None)
self.output_results(results, options.get("verbosity", 1))
elif options["range"]:
results = self.fetch_photos(nsid, options["days"], options["range"])
self.output_results(results, options.get("verbosity", 1))
elif options["account"]:
raise CommandError("Specify --days as well as --account.")
else:
raise CommandError("Specify --days , eg --days=3 or --days=all.")
def fetch_photos(self, nsid, days, range):
"""Child classes should override this method to call a method that
fetches photos and returns results, eg:
return RecentPhotosMultiAccountFetcher(nsid=nsid).fetch(days=days)
"""
return {}
| Python | 0.000001 |
6c8a9edb6d733ac680ea2cbcb1c8d12511aa72be | Update webserver.py | webserver.py | webserver.py | #!/usr/bin/env python
# author: brendan@shellshockcomputer.com.au
import ConfigParser
from bottle import route, install, run, template, static_file, PasteServer
from bottle_sqlite import SQLitePlugin
import json
import urllib
import urllib2
import datetime
config = ConfigParser.RawConfigParser()
config.read('config.ini')
install(SQLitePlugin(dbfile=(config.get("pool", "database"))))
@route('/')
def default():
output = template('default')
return output
@route('/static/:path#.+#', name='static')
def static(path):
return static_file(path, root='static')
@route('/accounts')
def accounts():
poolAccount = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+config.get("pool", "poolaccount")).read())
clean = poolAccount["lessors"]
output = template('accounts', leased=clean)
return output
@route('/blocks')
def blocks(db):
c = db.execute("SELECT timestamp, block, totalfee FROM blocks WHERE totalfee > 0")
result = c.fetchall()
c.close()
payload = {
'requestType': 'getForging',
'secretPhrase': config.get("pool", "poolphrase")
}
opener = urllib2.build_opener(urllib2.HTTPHandler())
data = urllib.urlencode(payload)
forging = json.loads(opener.open(config.get("pool", "nhzhost")+'/nhz', data=data).read())
getdl = forging["deadline"]
deadline = str(datetime.timedelta(seconds=getdl))
output = template('blocks', rows=result, fg=deadline)
return output
@route('/payouts')
def payouts(db):
c = db.execute("SELECT account, percentage, amount, paid, blocktime FROM accounts")
result = c.fetchall()
output = template('payouts', rows=result)
return output
run(server=PasteServer, port=8888, host='0.0.0.0')
| #!/usr/bin/env python
# author: brendan@shellshockcomputer.com.au
import ConfigParser
from bottle import route, install, run, template, static_file, PasteServer
from bottle_sqlite import SQLitePlugin
import json
import urllib
import urllib2
import datetime
config = ConfigParser.RawConfigParser()
config.read('config.ini')
install(SQLitePlugin(dbfile=(config.get("pool", "database"))))
@route('/')
def default():
output = template('default')
return output
@route('/static/:path#.+#', name='static')
def static(path):
return static_file(path, root='static')
@route('/accounts')
def accounts():
poolAccount = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+config.get("pool", "poolaccount")).read())
clean = poolAccount["lessors"]
output = template('accounts', leased=clean)
return output
@route('/blocks')
def blocks(db):
c = db.execute("SELECT timestamp, block, totalfee FROM blocks")
result = c.fetchall()
c.close()
payload = {
'requestType': 'getForging',
'secretPhrase': config.get("pool", "poolphrase")
}
opener = urllib2.build_opener(urllib2.HTTPHandler())
data = urllib.urlencode(payload)
forging = json.loads(opener.open(config.get("pool", "nhzhost")+'/nhz', data=data).read())
getdl = forging["deadline"]
deadline = str(datetime.timedelta(seconds=getdl))
output = template('blocks', rows=result, fg=deadline)
return output
@route('/payouts')
def payouts(db):
c = db.execute("SELECT account, percentage, amount, paid, blocktime FROM accounts")
result = c.fetchall()
output = template('payouts', rows=result)
return output
run(server=PasteServer, port=8888, host='0.0.0.0') | Python | 0 |
5f839240a4223d599ad57393097bbc19502ae213 | add condition not is_retracted | website/discovery/views.py | website/discovery/views.py | import datetime
from website import settings
from website.project import Node
from website.project.utils import recent_public_registrations
from modularodm.query.querydialect import DefaultQueryDialect as Q
from framework.analytics.piwik import PiwikClient
def activity():
popular_public_projects = []
popular_public_registrations = []
hits = {}
# get the date for exactly one week ago
target_date = datetime.date.today() - datetime.timedelta(weeks=1)
if settings.PIWIK_HOST:
client = PiwikClient(
url=settings.PIWIK_HOST,
auth_token=settings.PIWIK_ADMIN_TOKEN,
site_id=settings.PIWIK_SITE_ID,
period='week',
date=target_date.strftime('%Y-%m-%d'),
)
popular_project_ids = [
x for x in client.custom_variables if x.label == 'Project ID'
][0].values
for nid in popular_project_ids:
node = Node.load(nid.value)
if node is None:
continue
if node.is_public and not node.is_registration and not node.is_deleted:
if len(popular_public_projects) < 10:
popular_public_projects.append(node)
elif node.is_public and node.is_registration and not node.is_deleted and not node.is_retracted:
if len(popular_public_registrations) < 10:
popular_public_registrations.append(node)
if len(popular_public_projects) >= 10 and len(popular_public_registrations) >= 10:
break
hits = {
x.value: {
'hits': x.actions,
'visits': x.visits
} for x in popular_project_ids
}
# Projects
recent_query = (
Q('category', 'eq', 'project') &
Q('is_public', 'eq', True) &
Q('is_deleted', 'eq', False)
)
recent_public_projects = Node.find(
recent_query &
Q('is_registration', 'eq', False)
).sort(
'-date_created'
).limit(10)
return {
'recent_public_projects': recent_public_projects,
'recent_public_registrations': recent_public_registrations(),
'popular_public_projects': popular_public_projects,
'popular_public_registrations': popular_public_registrations,
'hits': hits,
}
| import datetime
from website import settings
from website.project import Node
from website.project.utils import recent_public_registrations
from modularodm.query.querydialect import DefaultQueryDialect as Q
from framework.analytics.piwik import PiwikClient
def activity():
popular_public_projects = []
popular_public_registrations = []
hits = {}
# get the date for exactly one week ago
target_date = datetime.date.today() - datetime.timedelta(weeks=1)
if settings.PIWIK_HOST:
client = PiwikClient(
url=settings.PIWIK_HOST,
auth_token=settings.PIWIK_ADMIN_TOKEN,
site_id=settings.PIWIK_SITE_ID,
period='week',
date=target_date.strftime('%Y-%m-%d'),
)
popular_project_ids = [
x for x in client.custom_variables if x.label == 'Project ID'
][0].values
for nid in popular_project_ids:
node = Node.load(nid.value)
if node is None:
continue
if node.is_public and not node.is_registration and not node.is_deleted:
if len(popular_public_projects) < 10:
popular_public_projects.append(node)
elif node.is_public and node.is_registration and not node.is_deleted:
if len(popular_public_registrations) < 10:
popular_public_registrations.append(node)
if len(popular_public_projects) >= 10 and len(popular_public_registrations) >= 10:
break
hits = {
x.value: {
'hits': x.actions,
'visits': x.visits
} for x in popular_project_ids
}
# Projects
recent_query = (
Q('category', 'eq', 'project') &
Q('is_public', 'eq', True) &
Q('is_deleted', 'eq', False)
)
recent_public_projects = Node.find(
recent_query &
Q('is_registration', 'eq', False)
).sort(
'-date_created'
).limit(10)
return {
'recent_public_projects': recent_public_projects,
'recent_public_registrations': recent_public_registrations(),
'popular_public_projects': popular_public_projects,
'popular_public_registrations': popular_public_registrations,
'hits': hits,
}
| Python | 0.001134 |
5820a92c7945657a38eb5b54eef5e47b2ff1ec39 | Fix url coding | src/checker/plugin/links_finder_plugin.py | src/checker/plugin/links_finder_plugin.py | from bs4 import BeautifulSoup
from yapsy.IPlugin import IPlugin
from requests.exceptions import InvalidSchema
from requests.exceptions import ConnectionError
from requests.exceptions import MissingSchema
import requests
import urlparse
import urllib
import marisa_trie
class LinksFinder(IPlugin):
def __init__(self):
self.database = None
self.types = None
self.trie = None
def setDb(self, DB):
self.database = DB
def setTypes(self, types):
self.types = types
self.trie = marisa_trie.Trie(types)
def check(self, transactionId, content):
""" Najde tagy <a>, <link>, vybere atribut href, ulozi jako odkazy,
stahne obsah jako dalsi transakci.
"""
soup = BeautifulSoup(content, 'html.parser')
uri = self.database.getUri(transactionId)
self.make_links_absolute(soup, uri,'a')
links = soup.find_all('a')
self.check_links(links, "Link to ", transactionId, 'href')
self.make_links_absolute(soup, uri, 'link')
links2 = soup.find_all('link')
self.check_links(links2, "Linked resource: ", transactionId, 'href')
self.make_sources_absolute(soup, uri, 'img')
images = soup.find_all('img')
self.check_links(images, "Image: ", transactionId, 'src')
return
def getId(self):
return "linksFinder"
def getLink(self, url, reqId, srcId):
try:
print "Inspecting "+url
r = requests.head(url)
if r.status_code != 200:
self.database.setDefect(srcId, "badlink", 0, url)
if 'content-type' in r.headers.keys():
ct = r.headers['content-type']
else:
ct = ''
if self.getMaxPrefix(ct) in self.types:
print "Downloading "+url
r = requests.get(url)
self.database.setResponse(reqId, r.status_code, ct, r.text.encode("utf-8").strip()[:65535])
else: print "Content type not accepted: "+ct
except InvalidSchema:
print "Invalid schema"
except ConnectionError:
print "Connection error"
except MissingSchema:
print "Missing schema"
def make_links_absolute(self, soup, url, tag):
print "Make links absolute: "+url
for tag in soup.findAll(tag, href=True):
if 'href' in tag.attrs:
tag['href'] = urlparse.urljoin(url, tag['href'])
def make_sources_absolute(self, soup, url, tag):
for tag in soup.findAll(tag):
tag['src'] = urlparse.urljoin(url, tag['src'])
def check_links(self, links, logMsg, transactionId, tag):
for link in links:
url = link.get(tag)
if url is not None:
urlNoAnchor = url.split('#')[0]
reqId = self.database.setLink(transactionId, urllib.quote(urlNoAnchor.encode('utf-8'())
print logMsg+str(url)
if reqId != -1:
self.getLink(url, reqId, transactionId)
def getMaxPrefix(self, ctype):
prefList = self.trie.prefixes(unicode(ctype, encoding="utf-8"))
if len(prefList) > 0:
return prefList[-1]
else: return ctype
| from bs4 import BeautifulSoup
from yapsy.IPlugin import IPlugin
from requests.exceptions import InvalidSchema
from requests.exceptions import ConnectionError
from requests.exceptions import MissingSchema
import requests
import urlparse
import urllib
import marisa_trie
class LinksFinder(IPlugin):
def __init__(self):
self.database = None
self.types = None
self.trie = None
def setDb(self, DB):
self.database = DB
def setTypes(self, types):
self.types = types
self.trie = marisa_trie.Trie(types)
def check(self, transactionId, content):
""" Najde tagy <a>, <link>, vybere atribut href, ulozi jako odkazy,
stahne obsah jako dalsi transakci.
"""
soup = BeautifulSoup(content, 'html.parser')
uri = self.database.getUri(transactionId)
self.make_links_absolute(soup, uri,'a')
links = soup.find_all('a')
self.check_links(links, "Link to ", transactionId, 'href')
self.make_links_absolute(soup, uri, 'link')
links2 = soup.find_all('link')
self.check_links(links2, "Linked resource: ", transactionId, 'href')
self.make_sources_absolute(soup, uri, 'img')
images = soup.find_all('img')
self.check_links(images, "Image: ", transactionId, 'src')
return
def getId(self):
return "linksFinder"
def getLink(self, url, reqId, srcId):
try:
print "Inspecting "+url
r = requests.head(url)
if r.status_code != 200:
self.database.setDefect(srcId, "badlink", 0, url)
if 'content-type' in r.headers.keys():
ct = r.headers['content-type']
else:
ct = ''
if self.getMaxPrefix(ct) in self.types:
print "Downloading "+url
r = requests.get(url)
self.database.setResponse(reqId, r.status_code, ct, r.text.encode("utf-8").strip()[:65535])
else: print "Content type not accepted: "+ct
except InvalidSchema:
print "Invalid schema"
except ConnectionError:
print "Connection error"
except MissingSchema:
print "Missing schema"
def make_links_absolute(self, soup, url, tag):
print "Make links absolute: "+url
for tag in soup.findAll(tag, href=True):
if 'href' in tag.attrs:
tag['href'] = urlparse.urljoin(url, tag['href'])
def make_sources_absolute(self, soup, url, tag):
for tag in soup.findAll(tag):
tag['src'] = urlparse.urljoin(url, tag['src'])
def check_links(self, links, logMsg, transactionId, tag):
for link in links:
url = link.get(tag)
if url is not None:
urlNoAnchor = url.split('#')[0]
reqId = self.database.setLink(transactionId, urllib.quote(urlNoAnchor))
print logMsg+str(url)
if reqId != -1:
self.getLink(url, reqId, transactionId)
def getMaxPrefix(self, ctype):
prefList = self.trie.prefixes(unicode(ctype, encoding="utf-8"))
if len(prefList) > 0:
return prefList[-1]
else: return ctype
| Python | 0.999981 |
6902b88472826f6042dda6acda6f8a22d2fef64f | Change food color. | enactiveagents/model/structure.py | enactiveagents/model/structure.py | """
Module that holds classes that represent structures.
"""
import world
class Structure(world.Entity):
"""
Class representing structures in the world (i.e., static but potentially
interactable with by agents).
"""
def collidable(self):
return True
class Wall(Structure):
"""
Class representing a wall structure.
"""
def __init__(self):
super(Wall, self).__init__()
self.height = 1
self.width = 1
class Block(Structure):
"""
Class representing a block structure.
"""
color = (122, 179, 62, 255)
def collidable(self):
return False
class Food(Structure):
"""
Class representing food.
"""
color = (62, 179, 122, 255)
def collidable(self):
return False
| """
Module that holds classes that represent structures.
"""
import world
class Structure(world.Entity):
"""
Class representing structures in the world (i.e., static but potentially
interactable with by agents).
"""
def collidable(self):
return True
class Wall(Structure):
"""
Class representing a wall structure.
"""
def __init__(self):
super(Wall, self).__init__()
self.height = 1
self.width = 1
class Block(Structure):
"""
Class representing a block structure.
"""
color = (122, 179, 62, 255)
def collidable(self):
return False
class Food(Structure):
"""
Class representing food.
"""
color = (179, 122, 62, 255)
def collidable(self):
return False
| Python | 0.000019 |
738ec72f78847bb31c89305247fcbe2d994117f0 | Optimize case ObjectMixin.setUp | feder/cases/tests.py | feder/cases/tests.py | from django.core.urlresolvers import reverse
from django.test import TestCase
from feder.users.factories import UserFactory
from feder.main.mixins import PermissionStatusMixin
from .factories import CaseFactory
class ObjectMixin(object):
def setUp(self):
self.user = UserFactory(username="john")
self.case = CaseFactory()
self.permission_object = self.case.monitoring
class CaseListViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse('cases:list')
class CaseDetailViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse('cases:details', kwargs={'slug': self.case.slug})
class CaseCreateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.add_case', ]
def get_url(self):
return reverse('cases:create', kwargs={'monitoring': str(self.case.monitoring.pk)})
class CaseUpdateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.change_case', ]
def get_url(self):
return reverse('cases:update', kwargs={'slug': self.case.slug})
class CaseDeleteViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.delete_case', ]
def get_url(self):
return reverse('cases:delete', kwargs={'slug': self.case.slug})
| from django.core.urlresolvers import reverse
from django.test import RequestFactory, TestCase
from feder.monitorings.factories import MonitoringFactory
from feder.cases.models import Case
from feder.users.factories import UserFactory
from feder.institutions.factories import InstitutionFactory
from feder.main.mixins import PermissionStatusMixin
class ObjectMixin(object):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory(username="john")
self.monitoring = self.permission_object = MonitoringFactory(user=self.user)
self.institution = InstitutionFactory()
self.case = Case.objects.create(name="blabla",
monitoring=self.monitoring,
institution=self.institution,
user=self.user)
class CaseListViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse('cases:list')
class CaseDetailViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse('cases:details', kwargs={'slug': self.case.slug})
class CaseCreateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.add_case', ]
def get_url(self):
return reverse('cases:create', kwargs={'monitoring': str(self.monitoring.pk)})
class CaseUpdateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.change_case', ]
def get_url(self):
return reverse('cases:update', kwargs={'slug': self.case.slug})
class CaseDeleteViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ['monitorings.delete_case', ]
def get_url(self):
return reverse('cases:delete', kwargs={'slug': self.case.slug})
| Python | 0.001096 |
8f280cece4d59e36ebfeb5486f25c7ac92718c13 | Clean it up a bit | third_problem.py | third_problem.py | not_vowel = 'bcdfghjklmnpqrtvwxyzBCDFGHJKLMNPQRTVWXYZ'
phrase = input()
output = ''
vowels = ''
# Remove sapces
phrase = phrase.replace(' ', '')
for char in phrase:
if char in not_vowel:
output += char # Add non vowel to output
else:
vowels += char # Add vowels to vowels
print(output)
print(vowels)
| letters = 'bcdfghjklmnpqrtvwxyzBCDFGHJKLMNPQRTVWXYZ'
phrase = input()
output = ''
vowels = ''
phrase = phrase.replace(' ', '')
for char in phrase:
if char in letters:
output += char
else:
vowels += char
print(output)
print(vowels) | Python | 0.000027 |
ac0d1036e56e8c24945abedbc372c717b5d7064a | improve imprort style. | zcode/constants.py | zcode/constants.py | """Common Numerical and Physical Constants.
"""
import numpy as np
import astropy as ap
import astropy.constants
import astropy.cosmology
# from astropy.cosmology import WMAP9 as cosmo
cosmo = astropy.cosmology.WMAP9
# Fundamental Constants
# ---------------------
NWTG = ap.constants.G.cgs.value
SPLC = ap.constants.c.cgs.value
MSOL = ap.constants.M_sun.cgs.value
LSOL = ap.constants.L_sun.cgs.value
RSOL = ap.constants.R_sun.cgs.value
PC = ap.constants.pc.cgs.value
AU = ap.constants.au.cgs.value
YR = ap.units.year.to(ap.units.s)
MELC = ap.constants.m_e.cgs.value
MPRT = ap.constants.m_p.cgs.value
H0 = cosmo.H0.cgs.value # Hubble Constants at z=0.0
HPAR = cosmo.H0.value/100.0
OMEGA_M = cosmo.Om0
OMEGA_B = cosmo.Ob0
OMEGA_DM = cosmo.Odm0
RHO_CRIT = cosmo.critical_density0.cgs.value
# Higher order constants
# ----------------------
# Thomson-Scattering (Electron-Scattering) cross-section
try:
SIGMA_T = ap.constants.sigma_T.cgs.value
except:
SIGMA_T = 6.652458734e-25 # cm^2 (i.e. per electron)
# Electron-Scattering Opacity ($\kappa_{es} = n_e \sigma_T / \rho = \mu_e \sigma_T / m_p$)
# Where $\mu_e$ is the mean-mass per electron, for a total mass-density $\rho$.
KAPPA_ES = SIGMA_T/MPRT
# Derived Constants
# -----------------
PIFT = 4.0*np.pi/3.0 # (4.0/3.0)*Pi
SCHW = 2*NWTG/(SPLC*SPLC) # Schwarzschild Constant (2*G/c^2)
HTAU = 1.0/H0 # Hubble Time - 1/H0 [sec]
MYR = 1.0e6*YR
GYR = 1.0e9*YR
KPC = 1.0e3*PC
MPC = 1.0e6*PC
GPC = 1.0e9*PC
| """Common Numerical and Physical Constants.
"""
import numpy as np
import astropy as ap
import astropy.constants
import astropy.cosmology
from astropy.cosmology import WMAP9 as cosmo
# Fundamental Constants
# ---------------------
NWTG = ap.constants.G.cgs.value
SPLC = ap.constants.c.cgs.value
MSOL = ap.constants.M_sun.cgs.value
LSOL = ap.constants.L_sun.cgs.value
RSOL = ap.constants.R_sun.cgs.value
PC = ap.constants.pc.cgs.value
AU = ap.constants.au.cgs.value
YR = ap.units.year.to(ap.units.s)
MELC = ap.constants.m_e.cgs.value
MPRT = ap.constants.m_p.cgs.value
H0 = cosmo.H0.cgs.value # Hubble Constants at z=0.0
HPAR = cosmo.H0.value/100.0
OMEGA_M = cosmo.Om0
OMEGA_B = cosmo.Ob0
OMEGA_DM = cosmo.Odm0
RHO_CRIT = cosmo.critical_density0.cgs.value
# Higher order constants
# ----------------------
# Thomson-Scattering (Electron-Scattering) cross-section
try:
SIGMA_T = ap.constants.sigma_T.cgs.value
except:
SIGMA_T = 6.652458734e-25 # cm^2 (i.e. per electron)
# Electron-Scattering Opacity ($\kappa_{es} = n_e \sigma_T / \rho = \mu_e \sigma_T / m_p$)
# Where $\mu_e$ is the mean-mass per electron, for a total mass-density $\rho$.
KAPPA_ES = SIGMA_T/MPRT
# Derived Constants
# -----------------
PIFT = 4.0*np.pi/3.0 # (4.0/3.0)*Pi
SCHW = 2*NWTG/(SPLC*SPLC) # Schwarzschild Constant (2*G/c^2)
HTAU = 1.0/H0 # Hubble Time - 1/H0 [sec]
MYR = 1.0e6*YR
GYR = 1.0e9*YR
KPC = 1.0e3*PC
MPC = 1.0e6*PC
GPC = 1.0e9*PC
| Python | 0 |
68206c7f6b396d03470b0499716181f978996364 | implement url_fix() | feedservice/utils.py | feedservice/utils.py | # -*- coding: utf-8 -*-
#
# This file is part of my.gpodder.org.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
#
import time
from itertools import chain
import urllib
import urlparse
def parse_time(value):
"""
>>> parse_time(10)
10
>>> parse_time('05:10') #5*60+10
310
>>> parse_time('1:05:10') #60*60+5*60+10
3910
"""
if value is None:
raise ValueError('None value in parse_time')
if isinstance(value, int):
# Don't need to parse already-converted time value
return value
if value == '':
raise ValueError('Empty valueing in parse_time')
for format in ('%H:%M:%S', '%M:%S'):
try:
t = time.strptime(value, format)
return t.tm_hour * 60*60 + t.tm_min * 60 + t.tm_sec
except ValueError, e:
continue
return int(value)
# from http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
# this does not increase asymptotical complexity
# but can still waste more time than it saves.
def shortest_of(strings):
return min(strings, key=len)
def longest_substr(strings):
"""
Returns the longest common substring of the given strings
"""
substr = ""
if not strings:
return substr
reference = shortest_of(strings) #strings[0]
length = len(reference)
#find a suitable slice i:j
for i in xrange(length):
#only consider strings long at least len(substr) + 1
for j in xrange(i + len(substr) + 1, length):
candidate = reference[i:j]
if all(candidate in text for text in strings):
substr = candidate
return substr
def flatten(l):
return chain.from_iterable(l)
# http://stackoverflow.com/questions/120951/how-can-i-normalize-a-url-in-python
def url_fix(s, charset='utf-8'):
"""Sometimes you get an URL by a user that just isn't a real
URL because it contains unsafe characters like ' ' and so on. This
function can fix some of the problems in a similar way browsers
handle data entered by the user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
:param charset: The target charset for the URL if the url was
given as unicode string.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
path = urllib.quote(path, '/%')
qs = urllib.quote_plus(qs, ':&=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
| #
# This file is part of my.gpodder.org.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
#
import time
from itertools import chain
def parse_time(value):
"""
>>> parse_time(10)
10
>>> parse_time('05:10') #5*60+10
310
>>> parse_time('1:05:10') #60*60+5*60+10
3910
"""
if value is None:
raise ValueError('None value in parse_time')
if isinstance(value, int):
# Don't need to parse already-converted time value
return value
if value == '':
raise ValueError('Empty valueing in parse_time')
for format in ('%H:%M:%S', '%M:%S'):
try:
t = time.strptime(value, format)
return t.tm_hour * 60*60 + t.tm_min * 60 + t.tm_sec
except ValueError, e:
continue
return int(value)
# from http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
# this does not increase asymptotical complexity
# but can still waste more time than it saves.
def shortest_of(strings):
return min(strings, key=len)
def longest_substr(strings):
"""
Returns the longest common substring of the given strings
"""
substr = ""
if not strings:
return substr
reference = shortest_of(strings) #strings[0]
length = len(reference)
#find a suitable slice i:j
for i in xrange(length):
#only consider strings long at least len(substr) + 1
for j in xrange(i + len(substr) + 1, length):
candidate = reference[i:j]
if all(candidate in text for text in strings):
substr = candidate
return substr
def flatten(l):
return chain.from_iterable(l)
| Python | 0.000018 |
50e1edf150a715367e46d28f15ac8958bcc18644 | Remove unused import | tinymce/views.py | tinymce/views.py | # coding: utf-8
# License: MIT, see LICENSE.txt
"""
django-tinymce4-lite views
"""
from __future__ import absolute_import
import json
import logging
from django import VERSION
from django.core.urlresolvers import reverse
from django.http import JsonResponse, HttpResponse
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from django.utils.html import strip_tags
from django.conf import settings
from jsmin import jsmin
__all__ = ['spell_check', 'css', 'filebrowser']
logging.basicConfig(format='[%(asctime)s] %(module)s: %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@csrf_exempt
def spell_check(request):
"""
Implements the TinyMCE 4 spellchecker protocol
:param request: Django http request with JSON-RPC payload from TinyMCE 4
containing a language code and a text to check for errors.
:type request: django.http.request.HttpRequest
:return: Django http response containing JSON-RPC payload
with spellcheck results for TinyMCE 4
:rtype: django.http.JsonResponse
"""
data = json.loads(request.body.decode('utf-8'))
output = {'id': data['id']}
error = None
try:
import enchant
from enchant.checker import SpellChecker
if data['params']['lang'] not in enchant.list_languages():
error = 'Missing {0} dictionary!'.format(data['params']['lang'])
raise RuntimeError(error)
checker = SpellChecker(data['params']['lang'])
checker.set_text(strip_tags(data['params']['text']))
output['result'] = {checker.word: checker.suggest() for err in checker}
except ImportError:
error = 'The pyenchant package is not installed!'
logger.exception(error)
except RuntimeError:
logger.exception(error)
except Exception:
error = 'Unknown error!'
logger.exception(error)
if error is not None:
output['error'] = error
return JsonResponse(output)
def css(request):
"""
Custom CSS for TinyMCE 4 widget
By default it fixes widget's position in Django Admin
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with CSS file for TinyMCE 4
:rtype: django.http.HttpResponse
"""
if 'grappelli' in settings.INSTALLED_APPS:
margin_left = 0
elif VERSION[0] == 1 and VERSION[1] <= 8:
margin_left = 110 # For old style admin
else:
margin_left = 170 # For Django >= 1.9 style admin
content = render_to_string('tinymce/tinymce4.css',
context={'margin_left': margin_left},
request=request)
response = HttpResponse(content, content_type='text/css')
response['Cache-Control'] = 'no-store'
return response
def filebrowser(request):
"""
JavaScript callback function for `django-filebrowser`_
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with filebrowser JavaScript code for for TinyMCE 4
:rtype: django.http.HttpResponse
.. _django-filebrowser: https://github.com/sehmaschine/django-filebrowser
"""
try:
fb_url = request.build_absolute_uri(reverse('fb_browse'))
except:
fb_url = request.build_absolute_uri(reverse('filebrowser:fb_browse'))
content = jsmin(render_to_string('tinymce/filebrowser.js',
context={'fb_url': fb_url},
request=request))
return HttpResponse(content, content_type='application/javascript')
| # coding: utf-8
# License: MIT, see LICENSE.txt
"""
django-tinymce4-lite views
"""
from __future__ import absolute_import
import json
import logging
from django import VERSION
from django.core.urlresolvers import reverse
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from django.utils.html import strip_tags
from django.conf import settings
from jsmin import jsmin
__all__ = ['spell_check', 'css', 'filebrowser']
logging.basicConfig(format='[%(asctime)s] %(module)s: %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@csrf_exempt
def spell_check(request):
"""
Implements the TinyMCE 4 spellchecker protocol
:param request: Django http request with JSON-RPC payload from TinyMCE 4
containing a language code and a text to check for errors.
:type request: django.http.request.HttpRequest
:return: Django http response containing JSON-RPC payload
with spellcheck results for TinyMCE 4
:rtype: django.http.JsonResponse
"""
data = json.loads(request.body.decode('utf-8'))
output = {'id': data['id']}
error = None
try:
import enchant
from enchant.checker import SpellChecker
if data['params']['lang'] not in enchant.list_languages():
error = 'Missing {0} dictionary!'.format(data['params']['lang'])
raise RuntimeError(error)
checker = SpellChecker(data['params']['lang'])
checker.set_text(strip_tags(data['params']['text']))
output['result'] = {checker.word: checker.suggest() for err in checker}
except ImportError:
error = 'The pyenchant package is not installed!'
logger.exception(error)
except RuntimeError:
logger.exception(error)
except Exception:
error = 'Unknown error!'
logger.exception(error)
if error is not None:
output['error'] = error
return JsonResponse(output)
def css(request):
"""
Custom CSS for TinyMCE 4 widget
By default it fixes widget's position in Django Admin
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with CSS file for TinyMCE 4
:rtype: django.http.HttpResponse
"""
if 'grappelli' in settings.INSTALLED_APPS:
margin_left = 0
elif VERSION[0] == 1 and VERSION[1] <= 8:
margin_left = 110 # For old style admin
else:
margin_left = 170 # For Django >= 1.9 style admin
content = render_to_string('tinymce/tinymce4.css',
context={'margin_left': margin_left},
request=request)
response = HttpResponse(content, content_type='text/css')
response['Cache-Control'] = 'no-store'
return response
def filebrowser(request):
"""
JavaScript callback function for `django-filebrowser`_
:param request: Django http request
:type request: django.http.request.HttpRequest
:return: Django http response with filebrowser JavaScript code for for TinyMCE 4
:rtype: django.http.HttpResponse
.. _django-filebrowser: https://github.com/sehmaschine/django-filebrowser
"""
try:
fb_url = request.build_absolute_uri(reverse('fb_browse'))
except:
fb_url = request.build_absolute_uri(reverse('filebrowser:fb_browse'))
content = jsmin(render_to_string('tinymce/filebrowser.js',
context={'fb_url': fb_url},
request=request))
return HttpResponse(content, content_type='application/javascript')
| Python | 0.000001 |
b64e7714e581cfc0c0a0d0f055b22c5edca27e24 | Raise KeyboardInterrupt to allow the run to handle logout | susumutakuan.py | susumutakuan.py | import discord
import asyncio
import os
import signal
import sys
#Set up Client State
CLIENT_TOKEN=os.environ['TOKEN']
#Create Discord client
client = discord.Client()
#Handle shutdown gracefully
async def sigterm_handler(signum, frame):
print("Logging out...")
raise KeyboardInterrupt
print('Shutting down...')
sys.exit(1)
#Register SIGTERM Handler
signal.signal(signal.SIGTERM, sigterm_handler)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
if message.content.startswith('!test'):
counter = 0
tmp = await client.send_message(message.channel, 'Calculating messages...')
async for log in client.logs_from(message.channel, limit=100):
if log.author == message.author:
counter += 1
await client.edit_message(tmp, 'You have {} messages.'.format(counter))
elif message.content.startswith('!sleep'):
await asyncio.sleep(5)
await client.send_message(message.channel, 'Done sleeping')
client.run(CLIENT_TOKEN) | import discord
import asyncio
import os
import signal
import sys
#Set up Client State
CLIENT_TOKEN=os.environ['TOKEN']
#Create Discord client
client = discord.Client()
#Handle shutdown gracefully
def sigterm_handler(signum, frame):
print("Logging out...")
client.logout()
print('Shutting down...')
sys.exit(1)
#Register SIGTERM Handler
signal.signal(signal.SIGTERM, sigterm_handler)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
if message.content.startswith('!test'):
counter = 0
tmp = await client.send_message(message.channel, 'Calculating messages...')
async for log in client.logs_from(message.channel, limit=100):
if log.author == message.author:
counter += 1
await client.edit_message(tmp, 'You have {} messages.'.format(counter))
elif message.content.startswith('!sleep'):
await asyncio.sleep(5)
await client.send_message(message.channel, 'Done sleeping')
client.run(CLIENT_TOKEN) | Python | 0 |
13e30fe6af93bbb48a4795ee22f4f3ba760adc14 | add get_session_names | tmuxback/tmux.py | tmuxback/tmux.py | # -*- coding:utf-8 -*-
import subprocess
import re
#tmux commands
#list sessions
CMD_LIST_SESSIONS='tmux list-sessions -F#S'
def get_session_names():
""" return a list of tmux session names """
s = subprocess.check_output(CMD_LIST_SESSIONS.split(' '))
s = re.sub('\n$','',s)
return s.split('\n')
#if __name__ == '__main__':
# print get_session_names()
| # -*- coding:utf-8 -*-
def get_session_names():
"""get session names"""
pass
| Python | 0.000002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.