commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
09f6530a79e744bb5d4267bab0de27bf7aa34598
|
Make default cursor start at now-70m instead of now-10m.
|
vt/feed.py
|
vt/feed.py
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Copyright © 2019 The vt-py authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from datetime import timedelta
import enum
import io
import json
import time
import asyncio
import bz2
from .error import APIError
from .object import Object
__all__ = [
'Feed',
'FeedType']
class FeedType(enum.Enum):
"""Feed types."""
FILES = 'files'
URLS = 'urls'
class Feed:
"""Feed represents a stream of objects received from VirusTotal in real-time.
For more information about VirusTotal Feeds see:
https://developers.virustotal.com/v3.0/reference#feeds
In the example below the loop iterates forever, retrieving file objects as
they are processed by VirusTotal. For a more elaborate example see the file
examples/file_feed.py in this repository.
>>> with vt.Client(<apikey>) as client:
>>> for file_obj in client.feed(vt.FeedType.FILES):
>>> print(file_obj.id)
Instances of this class are not created directly, you should use the
:func:`vt.Client.feed` method instead.
"""
def __init__(self, client, feed_type, cursor=None):
"""Initializes a Feed object.
This function is not intended to be called directly. Client.feed() is
the preferred way for creating a feed.
"""
self._client = client
self._type = feed_type
self._batch = None
self._count = 0
# This class tolerates a given number of missing batches in the feed,
# if self._missing_batches_tolerancy is set to 0, there's no tolerancy
# for missing batches and even a single missing batch will cause an error.
# However, missing batches can occur from time to time.
self._missing_batches_tolerancy = 1
if cursor:
batch_time, _, batch_skip = cursor.partition('-')
self._batch_time = datetime.strptime(batch_time, '%Y%m%d%H%M')
self._batch_skip = int(batch_skip) if batch_skip else 0
else:
self._batch_time = datetime.utcnow() - timedelta(minutes=10)
self._batch_skip = 0
self._next_batch_time = self._batch_time
async def _get_batch_async(self, batch_time):
""""Retrieves a specific batch from the backend.
There's one batch per minute, each identified by the date in YYYYMMDDhhmm
format. The batch_time argument is a datetime object that is converted to
this format, the seconds in the datetime are ignored.
"""
while True:
response = await self._client.get_async('/feeds/{}/{}'.format(
self._type.value, batch_time.strftime('%Y%m%d%H%M')))
error = await self._client.get_error_async(response)
if not error:
break
if error.code == 'NotAvailableYet':
await asyncio.sleep(60)
else:
raise error
return io.BytesIO(bz2.decompress(await response.content.read_async()))
def _get_batch(self, *args, **kwargs):
return asyncio.get_event_loop().run_until_complete(
self._get_batch_async(*args, **kwargs))
async def _get_next_batch_async(self):
"""Retrieves the next batch from the feed.
This function tolerates a certain number of missing batches. If some batch
is missing the next one will be retrieved. If more than
"""
missing_batches = 0
while True:
try:
self._batch_time = self._next_batch_time
self._next_batch_time += timedelta(seconds=60)
self._batch = await self._get_batch_async(self._batch_time)
self._batch_cursor = 0
break
except APIError as error:
# The only acceptable error here is NotFoundError, if such an error
# occurrs we try to get the next batch.
if error.code != 'NotFoundError':
raise error
missing_batches += 1
if missing_batches > self._missing_batches_tolerancy:
raise error
def _get_next_batch(self):
return asyncio.get_event_loop().run_until_complete(
self._get_next_batch_async())
def _skip(self, n):
for _ in range(n):
self._batch.readline()
self._batch_cursor += 1
def __iter__(self):
return self
async def __aiter__(self):
return self
def __next__(self):
if self._batch:
next_item = self._batch.readline()
else:
next_item = None
if not next_item:
self._get_next_batch()
self._skip(self._batch_skip)
self._batch_skip = 0
next_item = self._batch.readline()
self._batch_cursor += 1
self._count += 1
return Object.from_dict(json.loads(next_item.decode('utf-8')))
async def __anext__(self):
if self._batch:
next_item = self._batch.readline()
else:
next_item = None
if not next_item:
await self._get_next_batch_async()
self._skip(self._batch_skip)
self._batch_skip = 0
next_item = self._batch.readline()
self._batch_cursor += 1
self._count += 1
return Object.from_dict(json.loads(next_item.decode('utf-8')))
@property
def cursor(self):
"""Returns a cursor indicating the last item retrieved from the feed.
This cursor can be used for creating a new Feed object that continues where
a previous one left.
"""
return self._batch_time.strftime('%Y%m%d%H%M-') + str(self._batch_cursor)
|
Python
| 0.000001
|
@@ -2523,17 +2523,17 @@
minutes=
-1
+7
0)%0A
|
8c4555d3471324b2fc87bd914d3f10992424cecb
|
update ICD url
|
weather.py
|
weather.py
|
#!/usr/bin/env python
import sys
import html5lib
import urllib2
from numpy import median, array
from xml_icd import parseICD
from html5lib import treebuilders
def salt():
wx = {}
try:
tcs = parseICD("http://sgs.salt/xml/salt-tcs-icd.xml")
time = tcs['tcs xml time info']
bms = tcs['bms external conditions']
temps = bms['Temperatures']
wx["Temp"] = median(array(temps.values()))
wx["Temp 2m"] = temps["2m"]
wx["Temp 30m"] = temps["30m"]
# get time
wx["SAST"] = time["SAST"].split()[1]
wx["Date"] = time["SAST"].split()[0]
# set up other values of interest
wx["Air Pressure"] = bms["Air pressure"] * 10.0
wx["Dewpoint"] = bms["Dewpoint"]
wx["RH"] = bms["Rel Humidity"]
wx["Wind Speed (30m)"] = bms["Wind mag 30m"] * 3.6
wx["Wind Speed"] = bms["Wind mag 10m"] * 3.6
wx["Wind Dir (30m)"] = bms["Wind dir 30m"]
wx["Wind Dir"] = bms["Wind dir 10m"]
wx["T - DP"] = wx["Temp 2m"] - bms["Dewpoint"]
wx["Raining"] = bms["Rain detected"]
return wx
except:
return False
def wasp():
wx = {}
try:
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
doc = p.parse(urllib2.urlopen("http://swaspgateway.suth/",
timeout=1).read())
t = doc.getElementsByTagName("table")[0]
tds = t.getElementsByTagName("td")
wx["Temp"] = float(tds[7].firstChild.nodeValue)
if tds[10].firstChild.nodeValue == "RAIN":
wx["Sky"] = "Rain"
wx["Sky Temp"] = wx["Temp"]
else:
sky, stemp = tds[10].firstChild.nodeValue.split('(')
stemp = stemp[0:-1]
wx["Sky"] = sky
wx["Sky Temp"] = stemp
wx["T - DP"] = float(tds[9].firstChild.nodeValue)
wx["RH"] = float(tds[8].firstChild.nodeValue)
tds[6].normalize()
wx["Wind Dir"] = tds[6].firstChild.nodeValue[1:]
wx["Wind Speed"] = float(tds[5].firstChild.nodeValue)
rain = tds[4].firstChild.nodeValue
if rain == "DRY":
wx["Raining"] = False
else:
wx["Raining"] = True
wx["UT"] = tds[3].firstChild.nodeValue.strip()
tds[31].normalize()
wx["Status"] = tds[31].firstChild.nodeValue.strip()
return wx
except:
return False
def grav():
wx = {}
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
kan11 = p.parse(urllib2.urlopen("http://sg1.suth/tmp/kan11.htm",
timeout=1).read())
kan16 = p.parse(urllib2.urlopen("http://sg1.suth/tmp/kan16.htm",
timeout=1).read())
kan11_tds = kan11.getElementsByTagName("td")
kan16_tds = kan16.getElementsByTagName("td")
wx["Date"], wx["UT"] = kan11_tds[12].firstChild.nodeValue.split()
kan11_tds[14].normalize()
kan11_tds[15].normalize()
wx["Temp"] = float(kan11_tds[14].firstChild.nodeValue)
wx["RH"] = float(kan11_tds[15].firstChild.nodeValue)
kan16_tds[13].normalize()
kan16_tds[14].normalize()
wx["Wind Dir"] = int(kan16_tds[13].firstChild.nodeValue)
wx["Wind Speed"] = float(kan16_tds[14].firstChild.nodeValue) * 3.6
return wx
if __name__ == '__main__':
if len(sys.argv) == 1:
print "Usage: weather.py <salt|wasp|grav>"
else:
wx = eval("%s()" % sys.argv[1].lower())
if wx:
for k, v in sorted(wx.items()):
print "%20s : \t %s" % (k, v)
else:
print "No information received."
|
Python
| 0.000001
|
@@ -224,11 +224,11 @@
p://
-sgs
+icd
.sal
|
26f5a4fd2ed66d4a9b6fcef296b1365e8364aaf7
|
Add messages to assertions
|
web/web.py
|
web/web.py
|
#!/usr/bin/env python3.6
import json
# import webcolors
import re
from flask import Flask, request, render_template, send_file
from flask_cors import CORS
import pygame
import pygame.camera
# file to write to
FILE_PATH = 'data.json'
# regex to match that colors are hexadecimal values
regex_color = '^#[A-Fa-f0-9]{6}$'
APP = Flask(__name__,static_url_path='/static',
template_folder='templates')
# allow cross origin requests, we need this because of jscolor
CORS(APP)
def get_image():
"""
Uses the first camera to get an image and saves it to a file.
"""
pygame.camera.init()
c = pygame.camera.list_cameras()
if c is None:
# no cameras found
return
# use the first camera, get the image and save it
cam = pygame.camera.Camera(c[0], (1280, 720))
cam.start()
img = cam.get_image()
pygame.image.save(img, "capture.jpg")
cam.stop()
@APP.route('/', methods=['GET'])
def index():
return render_template('index.html')
# writes the data to the output file
def write(data):
d = json.dumps(data)
with open(FILE_PATH, 'w') as f:
f.write(d)
f.close()
@APP.route('/state', methods=['GET'])
def get_state():
"""
GET /state
Returns the current state of the lights.
This will just return the current state of the data file, without checking that it is valid json.
Not sure if there is any point in converting from json back into json again.
"""
with open(FILE_PATH, 'r') as f:
return f.readlines()
@APP.route('/image', methods=['GET'])
def get_webcam():
"""
Gets an image from the webcam
"""
get_image()
return send_file('capture.jpg')
def validate_state(data: dict):
"""
Checks that the provied object contains valid data
Throws an assertionerror if invalid
"""
assert isinstance(data, dict)
# validate colors
assert data['color1'] is not None
assert re.match(regex_color, data['color1'])
assert data['color2'] is not None
assert re.match(regex_color, data['color2'])
# validate types for the values that we cast
# this check is redundant when getting values from the form
# but is not when we just get json from the user
assert isinstance(data['random1'], bool)
assert isinstance(data['random2'], bool)
assert isinstance(data['pattern'], int)
assert isinstance(data['length'], int)
assert isinstance(data['delay'], int)
# check the bounds for some of the parameters
assert data['length'] > 0
assert data['delay'] >= 0
assert data['pattern'] >= 0
@APP.route('/form', methods=['POST'])
def form_state():
"""
POST /form
Request body are url encoded form params
Updates the state using the form parameters.
"""
data = {}
# get the state from request args
# these can be part of the query string, but we don't really care if it's one or the other
try:
data['color1'] = webcolors.hex_to_rgb('#' + request.args['color1'])
data['color2'] = webcolors.hex_to_rgb('#' + request.args['color2'])
data['random1'] = bool(request.args['random1'])
data['random2'] = bool(request.args['random2'])
data['pattern'] = int(request.args['pattern'])
data['length'] = int(request.args['length'])
data['delay'] = int(request.args['delay'])
validate_state(data)
except Exception:
# if we caught something, probably invalid request data
return 'Bad!', 400
write(data)
return data, 200
@APP.route('/state', methods=['POST'])
def post_state():
"""
POST /state
Expects the following json from the request body
{
"color1": "#00f00f",
"color2": "#00f00f",
"random2": true,
"random1": true,
"length": 1,
"delay": 1,
"pattern": 1
}
Updates the current state from the json provided in the request body.
"""
# from the request body, load some json
try:
data = request.get_json(force=True)
validate_state(data)
except Exception as e:
print(e)
# todo make this exception handling more specific
# just return bad request, something was likely invalid
return 'Bad! ' + str(type(e)) + str(e), 400
# write this data to the state file if valid
write(data)
return json.dumps(data), 200
if __name__ == '__main__':
# host the server on port 80
# while we shouldn't require using the builtin server in production environment
# I think that this is just fine for the use case that I require
# we can just turn off debug mode in production
APP.run(debug=False, host='0.0.0.0', port=80)
|
Python
| 0.000001
|
@@ -1864,16 +1864,39 @@
a, dict)
+, %22Data was not a dict%22
%0A%0A #
@@ -1940,32 +1940,53 @@
r1'%5D is not None
+, %22Color 1 was none.%22
%0A assert re.m
@@ -2014,24 +2014,47 @@
a%5B'color1'%5D)
+, %22Color 1 was invalid%22
%0A assert
@@ -2079,16 +2079,36 @@
not None
+, %22Color 2 was none%22
%0A ass
@@ -2148,16 +2148,39 @@
olor2'%5D)
+, %22color 2 was invalid%22
%0A%0A #
@@ -2375,32 +2375,59 @@
random1'%5D, bool)
+, %22Random 1 was not a bool%22
%0A assert isin
@@ -2455,16 +2455,43 @@
%5D, bool)
+, %22random 2 was not a bool%22
%0A ass
@@ -2518,28 +2518,62 @@
'pattern'%5D,
+(
int
-)
+, float), %22pattern was not an int%22
%0A assert
@@ -2599,20 +2599,60 @@
ngth'%5D,
+(
int
-)
+, float)), %22length was not int or float%22
%0A ass
@@ -2685,12 +2685,51 @@
'%5D,
+(
int
-)
+, float)), %22delay was not int or float%22
%0A%0A
@@ -2805,16 +2805,40 @@
th'%5D %3E 0
+, %22length out of bounds%22
%0A ass
@@ -2859,16 +2859,39 @@
y'%5D %3E= 0
+, %22delay out of bounds%22
%0A ass
@@ -2914,16 +2914,35 @@
n'%5D %3E= 0
+, %22pattern invalid%22
%0A%0A@APP.r
|
367b9004cfbfbb8fe950a26c9c87948a8bac17d1
|
Simplify JSON import logic
|
whip/db.py
|
whip/db.py
|
"""
Whip database storage module.
All IP ranges with associated information are stored in a LevelDB
database. The key/value layout is as follows:
* The end IP is used as the key. This allows for quick
fast range lookups.
* The begin IP and the actual information is stored in the value. The
most recent infoset for a range is stored in full, encoded as JSON, so
that it can be returned quickly without any decoding and processing
overhead.
To save a lot of space (and hence improve performance), historical
data for an IP range is stored as diffs from the most recent version.
When querying for older versions, the original data is reconstructed
on-demand.
The value is packed as follows:
* IPv4 begin address (4 bytes)
* Length of the JSON data for the most recent information (2 bytes)
* JSON encoded data for the latest version (variable length)
* Length of the latest datetime string (1 byte)
* Latest datetime as string
* JSON encoded diffs for older versions (variable length, until end)
"""
import logging
import operator
import struct
import plyvel
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
json_dumps = json.dumps
json_loads = json.loads
from whip.util import (
dict_diff,
dict_patch,
ipv4_int_to_bytes,
ipv4_int_to_str,
merge_ranges,
PeriodicCallback,
)
SIZE_STRUCT = struct.Struct('>H')
logger = logging.getLogger(__name__)
def _build_db_record(begin_ip_int, end_ip_int, infosets):
"""Create database records for an iterable of merged infosets."""
# Build history structure. The latest version is stored in
# full, ...
infosets.sort(key=operator.itemgetter('datetime'), reverse=True)
latest = infosets[0]
latest_datetime = latest['datetime'].encode('ascii')
latest_json = json_dumps(latest)
# ... while older versions are stored as (reverse) diffs to the
# previous (in time) version.
history_json = json_dumps([
dict_diff(infosets[i + 1], infosets[i])
for i in xrange(len(infosets) - 1)
])
# Build the actual key and value byte strings.
# XXX: String concatenation seems faster than the''.join((..., ...))
# alternative on 64-bit CPython 2.7.5.
key = ipv4_int_to_bytes(end_ip_int)
value = (ipv4_int_to_bytes(begin_ip_int)
+ SIZE_STRUCT.pack(len(latest_json))
+ latest_json
+ chr(len(latest_datetime))
+ latest_datetime
+ history_json)
return key, value
class Database(object):
def __init__(self, database_dir, create_if_missing=False):
logger.debug("Opening database %s", database_dir)
self.db = plyvel.DB(
database_dir,
create_if_missing=create_if_missing,
write_buffer_size=16 * 1024 * 1024,
max_open_files=512,
lru_cache_size=128 * 1024 * 1024)
self._make_iter()
def _make_iter(self):
"""Make an iterator for the current database.
Iterator construction is relatively costly, so reuse it for
performance reasons. The iterator won't see any data written
after its construction, but this is not a problem since the data
set is static.
"""
self.iter = self.db.iterator(include_key=False)
def load(self, *iters):
"""Load data from importer iterables"""
# Merge all iterables to produce unique, non-overlapping IP
# ranges with multiple timestamped infosets.
merged = merge_ranges(*iters)
reporter = PeriodicCallback(lambda: logger.info(
"%d database records stored; current position: %s",
n, ipv4_int_to_str(item[0])))
n = 0
for n, item in enumerate(merged, 1):
key, value = _build_db_record(*item)
self.db.put(key, value)
# Tick once in a while
if n % 100 == 0:
reporter.tick()
reporter.tick(True)
# Refresh iterator so that it sees the new data
self._make_iter()
def lookup(self, ip, dt=None):
"""Lookup a single IP address in the database
This either returns the stored information, or `None` if no
information was found.
"""
# The database key stores the end IP of all ranges, so a simple
# seek positions the iterator at the right key (if found).
self.iter.seek(ip)
try:
value = next(self.iter)
except StopIteration:
# Past any range in the database: no hit
return None
# Check range boundaries. The first 4 bytes store the begin IP.
# If the IP currently being looked up is in a gap, there is no
# hit after all.
if ip < value[:4]:
return None
# The next 2 bytes indicate the length of the JSON string for
# the most recent information
offset = 4
(json_size,) = SIZE_STRUCT.unpack(value[offset:offset + 2])
offset += 2
infoset_json = value[offset:offset + json_size]
# If the lookup is for the most recent version, we're done.
if dt is None:
return infoset_json
# This is a lookup for a specific timestamp. The most recent
# version may be the one asked for.
offset += json_size
latest_datetime_size = ord(value[offset])
offset += 1
latest_datetime = value[offset:offset + latest_datetime_size]
if latest_datetime <= dt:
return infoset_json
offset += latest_datetime_size
# Too bad, we need to delve deeper into history. Decode JSON,
# iteratively apply patches, and re-encode to JSON again.
infoset = json_loads(infoset_json)
history = json_loads(value[offset:])
for to_delete, to_set in history:
dict_patch(infoset, to_delete, to_set)
if infoset['datetime'] <= dt:
# Finally found it; encode and return the result.
return json_dumps(infoset)
# Too bad, no result
return None
|
Python
| 0.990858
|
@@ -1093,100 +1093,150 @@
vel%0A
-try:%0A import ujson as json%0Aexcept ImportError:%0A try:%0A import simplejson as json
+%0A# Use fastest JSON implementation available%0Afor lib in ('ujson', 'simplejson', 'json'):%0A try:%0A json = __import__(lib)%0A break
%0A
@@ -1268,19 +1268,12 @@
-import json
+pass
%0A%0Ajs
|
14e5937f4d4654451b1ab2cb8ddb5951b6741b5b
|
fix refactor bug
|
lib/raviron/proxy.py
|
lib/raviron/proxy.py
|
#
# This file is part of raviron. Raviron is free software available under
# the terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2015 the raviron authors. See the file "AUTHORS" for a
# complete list.
import re
import os
import shlex
import subprocess
import textwrap
from . import node, util
# proxy-create command
def create_ssh_keypair(keyname, comment):
"""Create a new ssh keypair."""
sshdir = os.path.join(util.get_homedir(), '.ssh')
util.create_directory(sshdir, 0o700)
keyfile = os.path.join(sshdir, keyname)
if util.try_stat(keyfile):
raise RuntimeError('~/.ssh/{} already exists'.format(keyname))
subprocess.check_call(['ssh-keygen', '-f', keyfile, '-N', "", '-q', '-C', comment])
os.chmod(keyfile, 0o600)
os.chmod(keyfile + '.pub', 0o644)
return keyfile
def create_proxy(proxyname):
"""Create a proxy wrapper."""
# Running in a software collection?
enable_scls = []
scls = os.environ.get('X_SCLS', '')
for scl in scls.split():
with open('/etc/scl/conf/{}'.format(scl)) as fin:
prefix = fin.readline().rstrip()
enable_scls.append('. {}/{}/enable'.format(prefix, scl))
if scls:
enable_scls.append('X_SCLS={}'.format(shlex.quote(scls)))
enable_scls.append('export X_SCLS')
else:
enable_scls.append('# No software collections enabled.')
enable_scls = '\n'.join(enable_scls)
# Running in a virtualenv?
venv = os.environ.get('VIRTUAL_ENV')
enable_venv = '. {}/bin/activate'.format(venv) if venv else '# No virtualenv enabled.'
# Create the ~/bin directory if needed
bindir = os.path.join(util.get_homedir(), 'bin')
proxyfile = os.path.join(bindir, proxyname)
util.create_directory(bindir, 0o755)
contents = textwrap.dedent("""\
#!/bin/sh
{}
{}
exec raviron proxy-run
""").format(enable_scls, enable_venv)
with open(proxyfile, 'w') as fout:
fout.write(contents)
os.chmod(proxyfile, 0o700)
return proxyfile
def install_proxy(pubkey, command):
"""Add a public key to the authorized_keys file."""
with open(pubkey) as fin:
keydata = fin.read()
sshdir = os.path.join(util.get_homedir(), '.ssh')
authentry = 'no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding'
authentry += ',command="{}" '.format(command)
authentry += keydata
authfile = os.path.join(sshdir, 'authorized_keys')
with open(authfile, 'a') as fout:
fout.write(authentry)
os.chmod(authfile, 0o600)
_key_name = 'id_raviron'
_proxy_name = 'raviron-proxy'
def create_main(env):
"""The `raviron proxy-create` command."""
keyname = env.config['proxy']['key_name']
proxyname = env.config['proxy']['proxy_name']
keyfile = create_ssh_keypair(keyname, proxyname)
proxyfile = create_proxy(proxyname)
install_proxy(keyfile + '.pub', proxyfile)
print('Private key created as: ~/.ssh/{}'.format(keyname))
print('Proxy created at: ~/bin/{}'.format(proxyname))
# proxy-run command
# These are the virsh commands used by the ssh power driver in Ironic.
# They need to match and be kept up to date with the following file:
# https://github.com/openstack/ironic/blob/master/ironic/drivers/modules/ssh.py#L151
_virsh_commands = [
('start', re.compile(' start ([^ ]+)')),
('stop', re.compile(' destroy ([^ ]+)')),
('reboot', re.compile(' reset ([^ ]+)')),
('get_node_macs', re.compile(' dumpxml ([^ ]+) .*mac')),
('list_running', re.compile(' list --all.*running')),
('list_all', re.compile(' list --all')),
('get_boot_device', re.compile(' dumpxml ([^ ]+) .*boot')),
('set_boot_device', re.compile(r'boot dev=\\"([^\\]+)\\".* edit ([^ ]+)')),
]
def parse_virsh_command_line():
"""Parse the virsh command line.
The proxy script is run as a forced command specified in an ssh private
key. The original command is available in the $SSH_ORIGINAL_COMMAND
environment variable.
"""
command = os.environ.get('SSH_ORIGINAL_COMMAND')
if command is None:
raise RuntimeError('This command needs to be run through ssh.')
for cmd, regex in _virsh_commands:
match = regex.search(command)
if match:
return (cmd,) + match.groups()
raise RuntimeError('unrecognized command: {}'.format(command))
def do_run(env):
"""The `proxy-run` command."""
log = env.logger
log.debug('New request, command = {}'.format(os.environ.get('SSH_ORIGINAL_COMMAND', '?')))
cmdline = parse_virsh_command_line()
log.info('Parsed command: {}'.format(' '.join(cmdline)))
if cmdline[0] == 'start':
node.do_start(env, cmdline[1])
elif cmdline[0] == 'stop':
node.do_stop(env, cmdline[1])
elif cmdline[0] == 'reboot':
node.do_reboot(env, cmdline[1])
elif cmdline[0] == 'list_running':
node.do_list_running(env, True)
elif cmdline[0] == 'list_all':
node.do_list_all(env)
elif cmdline[0] == 'get_boot_device':
node.do_get_boot_device(env, cmdline[1])
elif cmdline[0] == 'set_boot_device':
node.do_set_boot_device(env, cmdline[2], cmdline[1])
elif cmdline[0] == 'get_node_macs':
node.do_get_node_macs(env, cmdline[1], True)
|
Python
| 0.000001
|
@@ -2728,19 +2728,17 @@
def
+do_
create
-_main
(env
|
3578dc1910dff4650c7b38ed97b825b0f5ea9711
|
Fix wording for query help text
|
libpatchew/search.py
|
libpatchew/search.py
|
#!/usr/bin/env python2
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Fam Zheng <famcool@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from message import Message
import series
import patch
from util import *
def build_keyword_checker(e):
u = e.upper()
def c(s):
return u in s.get_subject(upper=True)
return c
def build_id_checker(e):
u = e.upper()
def c(s):
return u in s.get_message_id().upper()
return c
def build_is_checker(e, reverse=False):
def build_subchecker(k):
if p == "replied":
return lambda s: s.is_replied()
elif p == "reviewed":
return lambda s: s.is_reviewed()
elif p == 'tested':
return lambda s: s.get_status("testing", {}).get("passed") == True
elif p == "failed":
return lambda s: s.get_status("testing", {}).get("passed") == False
elif p == "untested":
return lambda s: s.get_status("testing") == None
elif p == "testing":
return lambda s: s.get_status("testing", {}).get("started") == True
elif p == "obsolete":
return lambda s: s.get_status("obsoleted-by", None) != None
subcheckers = []
for p in e.split(","):
sc = build_subchecker(p)
if sc:
subcheckers.append(sc)
def c(s):
return (True in [sc(s) for sc in subcheckers]) != reverse
return c
def build_not_checker(e):
return build_is_checker(e, reverse=True)
def build_addr_checker(e, addr_extract):
search_list = e.split(",")
def c(s):
addr_list = addr_extract(s).upper()
for i in search_list:
if i.upper() in addr_list:
return True
return c
def build_from_checker(e):
return build_addr_checker(e, lambda x: x.get_from(True))
def build_to_checker(e):
return build_addr_checker(e, lambda x: x.get_to(True))
def build_cc_checker(e):
return build_addr_checker(e, lambda x: x.get_cc(True) + "," + x.get_to(True))
def build_age_checker(e):
less = e.startswith("<")
value = ""
unit = None
while e and e[0] in "><":
e = e[1:]
while e and e[0].isdigit():
value = value + e[0]
e = e[1:]
unit = e or "d"
value = int(value)
if not value:
return lambda x: True
sec = human_to_seconds(value, unit)
def c(s):
a = s.get_age(True)
return (a < sec) == less
return c
_checkers = {
'subject': {
'build': build_keyword_checker,
'desc': 'Search keyword in subject'
},
'id': {
'build': build_id_checker,
'desc': 'Search by message-id'
},
'is': {
'build': build_is_checker,
'desc': '''Search by property, for example is:reviewed or is:replied.
Multiple properties can be listed by separating with comma'''
},
'not': {
'build': build_not_checker,
'desc': '''Reverse of "is", meaning to search series that has none of the
list properties'''
},
'from': {
'build': build_from_checker,
'desc': '''Search "From:" field, with a list of names or addresses
separated by ",".'''
},
'to': {
'build': build_to_checker,
'desc': 'Search "To:" field'
},
'cc': {
'build': build_cc_checker,
'desc': 'Search *both* "To:" and "Cc:" field',
},
'age': {
'build': build_age_checker,
'desc': '''Search by age of series, for example age:>1w or age:1y. If
comparison is omitted, it is compare by less than. Supported units are
"d", "w", "m", "y".''',
},
}
class Filter(object):
def __init__(self, db, exp):
self._db = db
self._filters = []
self.build_filters(exp)
def match(self, s):
for c in self._filters:
if not c(s):
return False
return True
def build_filters(self, exp):
for e in [x.strip() for x in exp.split(" ")]:
if not e:
continue
elif e[0] in ":+":
t, v = "is", e[1:]
elif e[0] in "-":
t, v = "not", e[1:]
elif e[0] in "<>":
t, v = "age", e
elif ":" in e:
t, v = e.split(":", 2)
else:
t, v = 'subject', e
if t not in _checkers:
continue
c = _checkers[t]['build'](v)
if c:
self._filters.append(c)
def build_doctext():
r = """
Query = <TERM> <TERM> ...
Each term is <COMP>:<VALUE> or <PREFIX><VALUE> or <VALUE>, Example:
from:Bob subject:fix cc:George age:>1w
to search all emails from Bob that have the word "fix" in subject, with George
in Cc list, and are sent before last week. And
from:Bob subject:fix is:reviewed not:tested
to search all email from Bob that have "fix" in subject, and have been reviewed
but failed testing. It can be simplified as:
from:Bob fix :reviewed -tested
The normal syntax, <COMP>:<VALUE> can be one of:
"""
for k, v in _checkers.iteritems():
r += " * %-10s - %s\n" % (k, " ".join([x.strip() for x in v['desc'].splitlines()]))
r +="""
As in the examples, there are a few syntax shortcuts as <PREFIX><VALUE>, or
plain <VALUE>:
* :VALUE and +VALUE equals to is:VALUE
* -VALUE equals to not:VALUE
* >VALUE and <VALUE equals to age:>VALUE and age:<VALUE
* VALUE (with no prefix) equals to keyword:<value>
"""
return r
doctext = build_doctext()
|
Python
| 0.000216
|
@@ -5856,17 +5856,18 @@
st week.
-
+%0A%0A
And%0A%0A
@@ -6528,23 +6528,23 @@
uals to
-keyword
+subject
:%3Cvalue%3E
|
2c53e6a4fa83bfde0094eb4cb4e18661d6f310f8
|
use log.err here, refs #154
|
lib/carbon/writer.py
|
lib/carbon/writer.py
|
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import time
from os.path import exists, dirname
import errno
import whisper
from carbon import state
from carbon.cache import MetricCache
from carbon.storage import getFilesystemPath, loadStorageSchemas,\
loadAggregationSchemas
from carbon.conf import settings
from carbon import log, events, instrumentation
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.application.service import Service
lastCreateInterval = 0
createCount = 0
schemas = loadStorageSchemas()
agg_schemas = loadAggregationSchemas()
CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95
def optimalWriteOrder():
"""Generates metrics with the most cached values first and applies a soft
rate limit on new metrics"""
global lastCreateInterval
global createCount
metrics = MetricCache.counts()
t = time.time()
metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
log.debug("Sorted %d cache queues in %.6f seconds" % (len(metrics),
time.time() - t))
for metric, queueSize in metrics:
if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
events.cacheSpaceAvailable()
dbFilePath = getFilesystemPath(metric)
dbFileExists = exists(dbFilePath)
if not dbFileExists:
createCount += 1
now = time.time()
if now - lastCreateInterval >= 60:
lastCreateInterval = now
createCount = 1
elif createCount >= settings.MAX_CREATES_PER_MINUTE:
# dropping queued up datapoints for new metrics prevents filling up the entire cache
# when a bunch of new metrics are received.
try:
MetricCache.pop(metric)
except KeyError:
pass
instrumentation.increment('droppedCreates')
continue
try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
datapoints = MetricCache.pop(metric)
except KeyError:
log.msg("MetricCache contention, skipping %s update for now" % metric)
continue # we simply move on to the next metric when this race condition occurs
yield (metric, datapoints, dbFilePath, dbFileExists)
def writeCachedDataPoints():
"Write datapoints until the MetricCache is completely empty"
updates = 0
lastSecond = 0
while MetricCache:
dataWritten = False
for (metric, datapoints, dbFilePath, dbFileExists) in optimalWriteOrder():
dataWritten = True
if not dbFileExists:
archiveConfig = None
xFilesFactor, aggregationMethod = None, None
for schema in schemas:
if schema.matches(metric):
log.creates('new metric %s matched schema %s' % (metric, schema.name))
archiveConfig = [archive.getTuple() for archive in schema.archives]
break
for schema in agg_schemas:
if schema.matches(metric):
log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name))
xFilesFactor, aggregationMethod = schema.archives
break
if not archiveConfig:
raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric)
dbDir = dirname(dbFilePath)
try:
os.makedirs(dbDir)
except OSError as e:
if e.errno != errno.EEXIST:
log.err("%s" % e)
log.creates("creating database file %s (archive=%s xff=%s agg=%s)" %
(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod))
try:
whisper.create(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod, settings.WHISPER_SPARSE_CREATE, settings.WHISPER_FALLOCATE_CREATE)
instrumentation.increment('creates')
except Exception, e:
log.msg("Error creating %s: %s" % (dbFilePath,e))
continue
try:
t1 = time.time()
whisper.update_many(dbFilePath, datapoints)
t2 = time.time()
updateTime = t2 - t1
except:
log.msg("Error writing to %s" % (dbFilePath))
log.err()
instrumentation.increment('errors')
else:
pointCount = len(datapoints)
instrumentation.increment('committedPoints', pointCount)
instrumentation.append('updateTimes', updateTime)
if settings.LOG_UPDATES:
log.updates("wrote %d datapoints for %s in %.5f seconds" % (pointCount, metric, updateTime))
# Rate limit update operations
thisSecond = int(t2)
if thisSecond != lastSecond:
lastSecond = thisSecond
updates = 0
else:
updates += 1
if updates >= settings.MAX_UPDATES_PER_SECOND:
time.sleep(int(t2 + 1) - t2)
# Avoid churning CPU when only new metrics are in the cache
if not dataWritten:
time.sleep(0.1)
def writeForever():
while reactor.running:
try:
writeCachedDataPoints()
except:
log.err()
time.sleep(1) # The writer thread only sleeps when the cache is empty or an error occurs
def reloadStorageSchemas():
global schemas
try:
schemas = loadStorageSchemas()
except:
log.msg("Failed to reload storage schemas")
log.err()
def reloadAggregationSchemas():
global agg_schemas
try:
agg_schemas = loadAggregationSchemas()
except:
log.msg("Failed to reload aggregation schemas")
log.err()
def shutdownModifyUpdateSpeed():
try:
settings.MAX_UPDATES_PER_SECOND = settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN
log.msg("Carbon shutting down. Changed the update rate to: " + str(settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN))
except KeyError:
log.msg("Carbon shutting down. Update rate not changed")
class WriterService(Service):
def __init__(self):
self.storage_reload_task = LoopingCall(reloadStorageSchemas)
self.aggregation_reload_task = LoopingCall(reloadAggregationSchemas)
def startService(self):
self.storage_reload_task.start(60, False)
self.aggregation_reload_task.start(60, False)
reactor.addSystemEventTrigger('before', 'shutdown', shutdownModifyUpdateSpeed)
reactor.callInThread(writeForever)
Service.startService(self)
def stopService(self):
self.storage_reload_task.stop()
self.aggregation_reload_task.stop()
Service.stopService(self)
|
Python
| 0
|
@@ -4421,35 +4421,35 @@
:%0A log.
-msg
+err
(%22Error creating
|
9102fdda8c529bf22365112b40d0d5944603ebd3
|
Update position.py
|
ogn/model/position.py
|
ogn/model/position.py
|
import re
from sqlalchemy import Column, String, Integer, Float, Boolean, SmallInteger
from ogn.aprs_utils import fpm2ms
from ogn.model.beacon import Beacon
from wsgiref.simple_server import software_version
class Position(Beacon):
__tablename__ = "position"
# Flarm specific data
address_type = Column(SmallInteger)
aircraft_type = Column(SmallInteger)
stealth = Column(Boolean)
address = Column(String, index=True)
climb_rate = Column(Float)
turn_rate = Column(Float)
signal_strength = Column(Float)
error_count = Column(Integer)
frequency_offset = Column(Float)
gps_status = Column(String)
software_version = None
hardware_version = None
real_id = None
# Pattern
address_pattern = re.compile(r"id(\S{2})(\S{6})")
climb_rate_pattern = re.compile(r"([\+\-]\d+)fpm")
turn_rate_pattern = re.compile(r"([\+\-]\d+\.\d+)rot")
signal_strength_pattern = re.compile(r"(\d+\.\d+)dB")
error_count_pattern = re.compile(r"(\d+)e")
coordinates_extension_pattern = re.compile(r"\!W(.)(.)!")
hear_ID_pattern = re.compile(r"hear(\w{4})")
frequency_offset_pattern = re.compile(r"([\+\-]\d+\.\d+)kHz")
gps_status_pattern = re.compile(r"gps(\d+x\d+)")
software_version_pattern = re.compile(r"s(\d+\.\d+)")
hardware_version_pattern = re.compile(r"h(\d+)")
real_id_pattern = re.compile(r"r(\w{6})")
def __init__(self, beacon=None):
self.heared_aircraft_IDs = list()
if beacon is not None:
self.name = beacon.name
self.receiver_name = beacon.receiver_name
self.timestamp = beacon.timestamp
self.latitude = beacon.latitude
self.longitude = beacon.longitude
self.ground_speed = beacon.ground_speed
self.track = beacon.track
self.altitude = beacon.altitude
self.comment = beacon.comment
self.parse(beacon.comment)
def parse(self, text):
for part in text.split(' '):
address_match = self.address_pattern.match(part)
climb_rate_match = self.climb_rate_pattern.match(part)
turn_rate_match = self.turn_rate_pattern.match(part)
signal_strength_match = self.signal_strength_pattern.match(part)
error_count_match = self.error_count_pattern.match(part)
coordinates_extension_match = self.coordinates_extension_pattern.match(part)
hear_ID_match = self.hear_ID_pattern.match(part)
frequency_offset_match = self.frequency_offset_pattern.match(part)
gps_status_match = self.gps_status_pattern.match(part)
software_version_match = self.software_version_pattern.match(part)
hardware_version_match = self.hardware_version_pattern.match(part)
real_id_match = self.real_id_pattern.match(part)
if address_match is not None:
# Flarm ID type byte in APRS msg: PTTT TTII
# P => stealth mode
# TTTTT => aircraftType
# II => IdType: 0=Random, 1=ICAO, 2=FLARM, 3=OGN
# (see https://groups.google.com/forum/#!msg/openglidernetwork/lMzl5ZsaCVs/YirmlnkaJOYJ).
self.address_type = int(address_match.group(1), 16) & 0b00000011
self.aircraft_type = (int(address_match.group(1), 16) & 0b01111100) >> 2
self.stealth = ((int(address_match.group(1), 16) & 0b10000000) >> 7 == 1)
self.address = address_match.group(2)
elif climb_rate_match is not None:
self.climb_rate = int(climb_rate_match.group(1))*fpm2ms
elif turn_rate_match is not None:
self.turn_rate = float(turn_rate_match.group(1))
elif signal_strength_match is not None:
self.signal_strength = float(signal_strength_match.group(1))
elif error_count_match is not None:
self.error_count = int(error_count_match.group(1))
elif coordinates_extension_match is not None:
dlat = int(coordinates_extension_match.group(1)) / 1000
dlon = int(coordinates_extension_match.group(2)) / 1000
self.latitude = self.latitude + dlat
self.longitude = self.longitude + dlon
elif hear_ID_match is not None:
self.heared_aircraft_IDs.append(hear_ID_match.group(1))
elif frequency_offset_match is not None:
self.frequency_offset = float(frequency_offset_match.group(1))
elif gps_status_match is not None:
self.gps_status = gps_status_match.group(1)
elif software_version_match is not None:
self.software_version = float(software_version_match.group(1))
elif hardware_version_match is not None:
self.hardware_version = int(hardware_version_match.group(1))
elif real_id_match is not None:
self.real_id = real_id_match.group(1)
else:
raise Exception("No valid position description: %s" % part)
def __repr__(self):
return("<Position %s: %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s>" % (self.name, self.address_type, self.aircraft_type, self.timestamp, self.address_type, self.aircraft_type, self.stealth, self.address, self.climb_rate, self.turn_rate, self.signal_strength, self.error_count, self.frequency_offset, self.gps_status))
|
Python
| 0
|
@@ -156,59 +156,8 @@
con%0A
-from wsgiref.simple_server import software_version%0A
%0A%0Acl
|
c5fac61ca01040d0066aa2ecb30fca24e0edfdd6
|
remove old unused comment
|
pyannote/audio/embedding/triplet_loss/glue.py
|
pyannote/audio/embedding/triplet_loss/glue.py
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import keras.backend as K
from keras.models import Model
from keras.layers import Input
from keras.layers import merge
from ..glue import Glue
class TripletLoss(Glue):
"""Triplet loss for sequence embedding
anchor |-----------| |---------|
input --> | embedding | --> a --> | |
sequence |-----------| | |
| |
positive |-----------| | triplet |
input --> | embedding | --> p --> | | --> loss value
sequence |-----------| | loss |
| |
negative |-----------| | |
input --> | embedding | --> n --> | |
sequence |-----------| |---------|
Parameters
----------
margin : float, optional
Triplet loss margin. Defaults to 0.2.
positive_only : boolean, optional
When False, loss is d(a, p) - d(a, n) + margin.
Default (True) is max(0, d(a, p) - d(a, n) + margin).
Reference
---------
Hervé Bredin, "TristouNet: Triplet Loss for Speaker Turn Embedding"
Submitted to ICASSP 2017. https://arxiv.org/abs/1609.04301
"""
def __init__(self, margin=0.2, positive_only=True):
super(TripletLoss, self).__init__()
self.margin = margin
self.positive_only = positive_only
# # HACK https://github.com/fchollet/keras/issues/3833
# self.__name__ = 'TripletLoss'
def _triplet_loss(self, inputs):
p = K.sum(K.square(inputs[0] - inputs[1]), axis=-1, keepdims=True)
n = K.sum(K.square(inputs[0] - inputs[2]), axis=-1, keepdims=True)
loss = p + self.margin - n
if self.positive_only:
loss = K.maximum(0, loss)
return loss
@staticmethod
def _output_shape(input_shapes):
return (input_shapes[0][0], 1)
@staticmethod
def _identity_loss(y_true, y_pred):
return K.mean(y_pred - 0 * y_true)
def build_model(self, input_shape, design_embedding):
"""Design the model for which the loss is optimized
Parameters
----------
input_shape: (n_samples, n_features) tuple
Shape of input sequences.
design_embedding : function or callable
This function should take input_shape as input and return a Keras
model that takes a sequence as input, and returns the embedding as
output.
Returns
-------
model : Keras model
See also
--------
An example of `design_embedding` is
pyannote.audio.embedding.models.TristouNet.__call__
"""
anchor = Input(shape=input_shape, name="anchor")
positive = Input(shape=input_shape, name="positive")
negative = Input(shape=input_shape, name="negative")
embedding = design_embedding(input_shape)
embedded_anchor = embedding(anchor)
embedded_positive = embedding(positive)
embedded_negative = embedding(negative)
distance = merge(
[embedded_anchor, embedded_positive, embedded_negative],
mode=self._triplet_loss, output_shape=self._output_shape)
model = Model(input=[anchor, positive, negative], output=distance)
return model
def loss(self, y_true, y_pred):
return self._identity_loss(y_true, y_pred)
def extract_embedding(self, from_model):
return from_model.layers_by_depth[1][0]
|
Python
| 0
|
@@ -2713,111 +2713,8 @@
only
-%0A # # HACK https://github.com/fchollet/keras/issues/3833%0A # self.__name__ = 'TripletLoss'
%0A%0A
|
ae3ba5dfbedde4440d6ff534a606868f5af8f009
|
remove legacy tensorflow support (#1817)
|
python/dllib/src/test/bigdl/keras/test_net.py
|
python/dllib/src/test/bigdl/keras/test_net.py
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import keras.layers as KLayer
from keras.models import Sequential as KSequential
from test.zoo.pipeline.utils.test_utils import ZooTestCase
import zoo.pipeline.api.keras.layers as ZLayer
from zoo.pipeline.api.keras.models import Model as ZModel
from zoo.pipeline.api.keras.models import Sequential as ZSequential
from zoo.pipeline.api.net import Net, TFNet
from bigdl.nn.layer import Linear, Sigmoid, SoftMax, Model as BModel
from bigdl.util.common import *
from bigdl.nn.layer import Sequential
np.random.seed(1337) # for reproducibility
class TestLayer(ZooTestCase):
def test_load_bigdl_model(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
model_path = os.path.join(resource_path, "models/bigdl/bigdl_lenet.model")
model = Net.load_bigdl(model_path)
model2 = model.new_graph(["reshape2"])
model2.freeze_up_to(["pool3"])
model2.unfreeze()
import numpy as np
data = np.zeros([1, 1, 28, 28])
output = model2.forward(data)
assert output.shape == (1, 192)
def test_load_caffe_model(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
model_path = os.path.join(resource_path, "models/caffe/test_persist.caffemodel")
def_path = os.path.join(resource_path, "models/caffe/test_persist.prototxt")
model = Net.load_caffe(def_path, model_path)
model2 = model.new_graph(["ip"])
model2.freeze_up_to(["conv2"])
model2.unfreeze()
def test_deprecated_save(self):
with pytest.raises(Exception) as e_info:
input = ZLayer.Input(shape=(5,))
output = ZLayer.Dense(10)(input)
zmodel = ZModel(input, output, name="graph1")
zmodel.save(create_tmp_path())
def test_save_load_Model(self):
input = ZLayer.Input(shape=(5,))
output = ZLayer.Dense(10)(input)
zmodel = ZModel(input, output, name="graph1")
tmp_path = create_tmp_path()
zmodel.saveModel(tmp_path, None, True)
model_reloaded = Net.load(tmp_path)
input_data = np.random.random([10, 5])
y = np.random.random([10, 10])
model_reloaded.compile(optimizer="adam",
loss="mse")
model_reloaded.fit(x=input_data, y=y, batch_size=8, nb_epoch=2)
def test_save_load_Sequential(self):
zmodel = ZSequential()
dense = ZLayer.Dense(10, input_dim=5)
zmodel.add(dense)
tmp_path = create_tmp_path()
zmodel.saveModel(tmp_path, None, True)
model_reloaded = Net.load(tmp_path)
input_data = np.random.random([10, 5])
y = np.random.random([10, 10])
model_reloaded.compile(optimizer="adam",
loss="mse")
model_reloaded.fit(x=input_data, y=y, batch_size=8, nb_epoch=1)
def test_load(self):
input = ZLayer.Input(shape=(5,))
output = ZLayer.Dense(10)(input)
zmodel = ZModel(input, output, name="graph1")
tmp_path = create_tmp_path()
zmodel.saveModel(tmp_path, None, True)
model_reloaded = Net.load(tmp_path)
input_data = np.random.random([3, 5])
self.compare_output_and_grad_input(zmodel, model_reloaded, input_data)
def test_load_keras(self):
model = KSequential()
model.add(KLayer.Dense(32, activation='relu', input_dim=100))
tmp_path_json = create_tmp_path() + ".json"
model_json = model.to_json()
with open(tmp_path_json, "w") as json_file:
json_file.write(model_json)
zmodel = Net.load_keras(json_path=tmp_path_json)
assert isinstance(zmodel, Sequential)
tmp_path_hdf5 = create_tmp_path() + ".h5"
model.save(tmp_path_hdf5)
zmodel2 = Net.load_keras(hdf5_path=tmp_path_hdf5)
assert isinstance(zmodel2, Sequential)
def test_tf_load(self):
linear = Linear(10, 2)()
sigmoid = Sigmoid()(linear)
softmax = SoftMax().set_name("output")(sigmoid)
model = BModel(linear, softmax)
input = np.random.random((4, 10))
tmp_path = create_tmp_path() + "/model.pb"
model.save_tensorflow([("input", [4, 10])], tmp_path)
model_reloaded = Net.load_tf(tmp_path, ["input"], ["output"])
expected_output = model.forward(input)
output = model_reloaded.forward(input)
self.assert_allclose(output, expected_output)
def test_layers_method(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
model_path = os.path.join(resource_path, "models/bigdl/bigdl_lenet.model")
model = Net.load_bigdl(model_path)
assert len(model.layers) == 12
def test_flatten_layers_method(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
model_path = os.path.join(resource_path, "models/bigdl/bigdl_lenet.model")
model = Net.load_bigdl(model_path)
assert len(Sequential().add(model).flattened_layers()) == 12
if __name__ == "__main__":
pytest.main([__file__])
|
Python
| 0
|
@@ -4516,578 +4516,8 @@
l)%0A%0A
- def test_tf_load(self):%0A linear = Linear(10, 2)()%0A sigmoid = Sigmoid()(linear)%0A softmax = SoftMax().set_name(%22output%22)(sigmoid)%0A model = BModel(linear, softmax)%0A input = np.random.random((4, 10))%0A%0A tmp_path = create_tmp_path() + %22/model.pb%22%0A%0A model.save_tensorflow(%5B(%22input%22, %5B4, 10%5D)%5D, tmp_path)%0A%0A model_reloaded = Net.load_tf(tmp_path, %5B%22input%22%5D, %5B%22output%22%5D)%0A expected_output = model.forward(input)%0A output = model_reloaded.forward(input)%0A self.assert_allclose(output, expected_output)%0A%0A
|
137b20e4aa779be3c97c500ab485126085492ce5
|
comment format
|
pywikibot/families/scratchpad_wikia_family.py
|
pywikibot/families/scratchpad_wikia_family.py
|
# -*- coding: utf-8 -*-
from pywikibot import family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'scratchpad_wikia'
self.langs = {
'de':'de.mini.wikia.com',
'en':'scratchpad.wikia.com',
'fr':'bloc-notes.wikia.com',
'ja':'ja.scratchpad.wikia.com',
'zh':'zh.scratchpad.wikia.com',
}
# A few selected big languages for things that we do not want
# to loop over all languages. This is only needed by the
# titletranslate.py module, so if you carefully avoid the
# options, you could get away without these for another
# wikimedia family.
self.languages_by_size = ['en','de']
def version(self, code):
return "1.14.0"
def scriptpath(self, code):
return ''
|
Python
| 0.000001
|
@@ -489,16 +489,24 @@
not want
+ to loop
%0A
@@ -507,24 +507,16 @@
#
- to loop
over al
@@ -554,26 +554,16 @@
d by the
-%0A #
titletr
@@ -572,16 +572,26 @@
slate.py
+%0A #
module,
@@ -620,26 +620,16 @@
void the
-%0A #
options
@@ -648,16 +648,26 @@
get away
+%0A #
without
@@ -684,26 +684,16 @@
another
-%0A #
wikimed
|
c898b68fa8d81963b7a5282e67ecb28764bbd0a3
|
Add comment explaining mocking
|
tests/app/models/test_contact_list.py
|
tests/app/models/test_contact_list.py
|
from datetime import datetime
from app.models.contact_list import ContactList
from app.models.job import PaginatedJobs
def test_created_at():
created_at = ContactList({'created_at': '2016-05-06T07:08:09.061258'}).created_at
assert isinstance(created_at, datetime)
assert created_at.isoformat() == '2016-05-06T08:08:09.061258+01:00'
def test_get_jobs(mock_get_jobs):
contact_list = ContactList({'id': 'a', 'service_id': 'b'})
assert isinstance(contact_list.get_jobs(page=123), PaginatedJobs)
mock_get_jobs.assert_called_once_with(
'b',
contact_list_id='a',
statuses={
'finished',
'sending limits exceeded',
'ready to send',
'scheduled',
'sent to dvla',
'pending',
'in progress',
},
page=123,
)
|
Python
| 0
|
@@ -509,16 +509,108 @@
edJobs)%0A
+ # mock_get_jobs mocks the underlying API client method, not%0A # contact_list.get_jobs%0A
mock
|
cfe2c5b405cc5cc74fed81e506e717698236f608
|
debug print lines
|
yumoter.py
|
yumoter.py
|
#!/usr/bin/env python2
import sys, os, json, errno, subprocess, yum
class yumoter:
def __init__(self, configFile, repobasepath):
self.repobasepath = repobasepath
self.reloadConfig(configFile)
self.yb = yum.YumBase()
self.yb.setCacheDir()
def reloadConfig(self, jsonFile):
self.repoConfig = self._getConfig(jsonFile)
self._getPaths()
def _getConfig(self, jsonFile):
fh = open(jsonFile, 'r')
jsonOutput = json.load(fh)
fh.close()
return jsonOutput
def _mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _runRsync(self, rsrc, rdst, args):
# str(rsrc), str(rdst), list(args)
sysCall = ['rsync'] + args + [rsrc, rdst]
rsyncStdout = []
rsyncStderr = []
p = subprocess.Popen(sysCall, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
for line in iter(p.stdout.readline, ""):
stdoutLine = line.strip() + '\r\n'
rsyncStdout.append(stdoutLine)
sys.stdout.write(stdoutLine)
sys.stdout.flush()
for line in iter(p.stderr.readline, ""):
stderrLine = line.strip() + '\r\n'
rsyncStderr.append(stderrLine)
sys.stderr.write(stderrLine)
sys.stderr.flush()
return (stdoutLine, stderrLine)
# TODO check return status please. Stop coding like a 12 year old.
def getDeps(self, pkgObj):
depDicts = yb.findDeps([pkgObj])
return depDicts
def _getPaths(self):
for repo in self.repoConfig:
repopath = []
# Does this repo have a path for promotion?
if 'promotionpath' in self.repoConfig[repo]:
for promopath in self.repoConfig[repo]['promotionpath']:
repopath.append("%s/%s/%s" % (self.repobasepath, self.repoConfig[repo]['path'], promopath))
else:
# repo does not have a path for promotion
repopath.append("%s/%s" % (self.repobasepath, self.repoConfig[repo]['path']))
self.repoConfig[repo]['fullpaths'] = repopath
def _mkPaths(self):
masterPathList = []
for repo in self.repoConfig:
if 'promotionpath' in self.repoConfig[repo]:
for entry in self.repoConfig[repo]['fullpaths']:
masterPathList.append(entry)
for entry in masterPathList:
if not os.path.isdir(entry):
print "creating missing dir: %s" % entry
self._mkdir_p(entry)
def syncRepos(self):
for repo in self.repoConfig:
# Only repos with upstream set need to be synced.
if 'upstream' in self.repoConfig[repo]:
# If the dst dir doesn't exist, create it.
if not os.path.isdir(self.repoConfig[repo]['fullpaths'][0]):
self._mkPaths()
else:
print "foo"
print self.repoConfig[repo]['fullpaths']
#a = self._runRsync(self.repoConfig[repo]['upstream'], self.repoConfig[repo]['fullpaths'][0], ['-av', '--progress'])
#print a
else:
print "wtf"
print self.repoConfig[repo]
'''
def repoSearch(self, pkgName, repos):
pkgs = yb.pkgSack.returnNewestByNameArch(patterns=pkgName)
for pkg in pkgs:
print "%s: %s" % (pkg, pkg.summary)
'''
|
Python
| 0.000003
|
@@ -2802,32 +2802,55 @@
elf.repoConfig:%0A
+ print repo%0A
# On
@@ -3395,98 +3395,8 @@
nt a
-%0A else:%0A print %22wtf%22%0A print self.repoConfig%5Brepo%5D
%0A%0A''
|
b3d2c2a94a227fd7498ce2eded0bde440a521cb2
|
Convert certificate generation to processutils.
|
nova/crypto.py
|
nova/crypto.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers around standard crypto data elements.
Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
"""
from __future__ import absolute_import
import base64
import binascii
import os
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from oslo_log import log as logging
import paramiko
import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def generate_fingerprint(public_key):
try:
pub_bytes = public_key.encode('utf-8')
# Test that the given public_key string is a proper ssh key. The
# returned object is unused since pyca/cryptography does not have a
# fingerprint method.
serialization.load_ssh_public_key(
pub_bytes, backends.default_backend())
pub_data = base64.b64decode(public_key.split(' ')[1])
digest = hashes.Hash(hashes.MD5(), backends.default_backend())
digest.update(pub_data)
md5hash = digest.finalize()
raw_fp = binascii.hexlify(md5hash)
if six.PY3:
raw_fp = raw_fp.decode('ascii')
return ':'.join(a + b for a, b in zip(raw_fp[::2], raw_fp[1::2]))
except Exception:
raise exception.InvalidKeypair(
reason=_('failed to generate fingerprint'))
def generate_x509_fingerprint(pem_key):
try:
if isinstance(pem_key, six.text_type):
pem_key = pem_key.encode('utf-8')
cert = x509.load_pem_x509_certificate(
pem_key, backends.default_backend())
raw_fp = binascii.hexlify(cert.fingerprint(hashes.SHA1()))
if six.PY3:
raw_fp = raw_fp.decode('ascii')
return ':'.join(a + b for a, b in zip(raw_fp[::2], raw_fp[1::2]))
except (ValueError, TypeError, binascii.Error) as ex:
raise exception.InvalidKeypair(
reason=_('failed to generate X509 fingerprint. '
'Error message: %s') % ex)
def generate_key_pair(bits=2048):
key = paramiko.RSAKey.generate(bits)
keyout = six.StringIO()
key.write_private_key(keyout)
private_key = keyout.getvalue()
public_key = '%s %s Generated-by-Nova' % (key.get_name(), key.get_base64())
fingerprint = generate_fingerprint(public_key)
return (private_key, public_key, fingerprint)
def ssh_encrypt_text(ssh_public_key, text):
"""Encrypt text with an ssh public key.
If text is a Unicode string, encode it to UTF-8.
"""
if isinstance(text, six.text_type):
text = text.encode('utf-8')
try:
pub_bytes = ssh_public_key.encode('utf-8')
pub_key = serialization.load_ssh_public_key(
pub_bytes, backends.default_backend())
return pub_key.encrypt(text, padding.PKCS1v15())
except Exception as exc:
raise exception.EncryptionFailure(reason=six.text_type(exc))
def generate_winrm_x509_cert(user_id, bits=2048):
"""Generate a cert for passwordless auth for user in project."""
subject = '/CN=%s' % user_id
upn = '%s@localhost' % user_id
with utils.tempdir() as tmpdir:
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
conffile = os.path.abspath(os.path.join(tmpdir, 'temp.conf'))
_create_x509_openssl_config(conffile, upn)
(certificate, _err) = utils.execute(
'openssl', 'req', '-x509', '-nodes', '-days', '3650',
'-config', conffile, '-newkey', 'rsa:%s' % bits,
'-outform', 'PEM', '-keyout', keyfile, '-subj', subject,
'-extensions', 'v3_req_client',
binary=True)
(out, _err) = utils.execute('openssl', 'pkcs12', '-export',
'-inkey', keyfile, '-password', 'pass:',
process_input=certificate,
binary=True)
private_key = base64.b64encode(out)
fingerprint = generate_x509_fingerprint(certificate)
if six.PY3:
private_key = private_key.decode('ascii')
certificate = certificate.decode('utf-8')
return (private_key, certificate, fingerprint)
def _create_x509_openssl_config(conffile, upn):
content = ("distinguished_name = req_distinguished_name\n"
"[req_distinguished_name]\n"
"[v3_req_client]\n"
"extendedKeyUsage = clientAuth\n"
"subjectAltName = otherName:""1.3.6.1.4.1.311.20.2.3;UTF8:%s\n")
with open(conffile, 'w') as file:
file.write(content % upn)
|
Python
| 0.00002
|
@@ -1175,16 +1175,58 @@
rt x509%0A
+from oslo_concurrency import processutils%0A
from osl
@@ -4294,32 +4294,39 @@
ficate, _err) =
+process
utils.execute(%0A
@@ -4617,16 +4617,23 @@
_err) =
+process
utils.ex
|
39c34860fa9992f38892aa026c5b0c6547bd4b23
|
Fix flaky evergreen test
|
tests/content/test_content_manager.py
|
tests/content/test_content_manager.py
|
from django.test import override_settings
from django.utils import timezone
from bulbs.campaigns.models import Campaign
from bulbs.content.models import Content
from bulbs.utils.test import make_content, BaseIndexableTestCase
from example.testcontent.models import TestContentObj, TestContentObjTwo
class ContentManagerTestCase(BaseIndexableTestCase):
def setUp(self):
super(ContentManagerTestCase, self).setUp()
campaign = Campaign.objects.create(
sponsor_name="TheCobbler",
start_date=timezone.now() - timezone.timedelta(days=5),
end_date=timezone.now() + timezone.timedelta(days=5)
)
make_content(evergreen=True, published=timezone.now(), _quantity=50)
make_content(TestContentObj, campaign=campaign, published=timezone.now(), _quantity=50)
Content.search_objects.refresh()
def test_sponsored(self):
sponsored = Content.search_objects.sponsored().extra(from_=0, size=50)
qs = TestContentObj.objects.filter(campaign__isnull=False)
self.assertEqual(qs.count(), sponsored.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in sponsored])
)
def test_evergreen(self):
evergreen = Content.search_objects.evergreen().extra(from_=0, size=50)
qs = Content.objects.filter(evergreen=True)
self.assertEqual(qs.count(), evergreen.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in evergreen])
)
@override_settings(VIDEO_DOC_TYPE=TestContentObjTwo.search_objects.mapping.doc_type)
def test_evergreen_video(self):
make_content(TestContentObjTwo, evergreen=True, published=self.now, _quantity=12)
make_content(TestContentObjTwo, published=self.now, _quantity=12)
Content.search_objects.refresh()
evergreen = Content.search_objects.evergreen_video().extra(from_=0, size=50)
qs = TestContentObjTwo.objects.filter(evergreen=True)
self.assertEqual(12, evergreen.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in evergreen])
)
|
Python
| 0.000003
|
@@ -670,16 +670,36 @@
content(
+TestReadingListObj,
evergree
|
3c82d0ca4a314ffd052b99ece7afec6aaea4e063
|
Update BatchKwargs to_id tests
|
tests/datasource/test_batch_kwargs.py
|
tests/datasource/test_batch_kwargs.py
|
import pytest
import os
from freezegun import freeze_time
try:
from unittest import mock
except ImportError:
import mock
from great_expectations.datasource.types import *
@freeze_time("1955-11-05")
def test_batch_kwargs_fingerprint():
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv"
}
)
#demonstrate *output* kwargs post-datasource/generator
# When there is only a single "important" key used in batch_kwargs, the ID can prominently include it
assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(
partition_id="19551105T000000.000000Z",
fingerprint="path:/data/test.csv")
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"partition_id": "20190101"
}
)
# When partition_id is explicitly included, we can extract it and potentially still have a human readable id
assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(
partition_id="20190101",
fingerprint="path:/data/test.csv")
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"iterator": True,
"partition_id": "3",
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"]
}
)
# When there are multiple relevant keys we use the hash of the batch_kwargs dictionary
assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(
partition_id="3",
fingerprint="a5d67721928ee13317a81459818a556b")
def test_batch_kwargs_from_dict():
test_batch_kwargs = {
"path": "/data/test.csv",
"partition_id": "1"
}
# The build_batch_fingerprint convenience method makes it possible to build a batch_fingerprint from a dict.
# HOWEVER, using it can be difficult since the default-ignored keys may depend on a specific batch_kwargs type
assert BatchKwargs.build_batch_fingerprint(test_batch_kwargs) == BatchFingerprint(
partition_id="1",
fingerprint="path:/data/test.csv")
def test_batch_kwargs_attributes_and_keys():
# When BatchKwargs are typed, the required keys should become accessible via dot notation and immutable
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"iterator": True,
"partition_id": "3",
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"]
}
)
assert test_batch_kwargs.path == "/data/test.csv"
assert test_batch_kwargs["path"] == test_batch_kwargs.path
# We do not allow setting the special attributes this way
with pytest.raises(AttributeError):
test_batch_kwargs.path = "/a/new/path.csv"
# Nor do we provide attribute-style access to unreserved names
with pytest.raises(AttributeError):
assert test_batch_kwargs.names == ["start", "type", "quantity", "end"]
# But we can access and set even protected names using dictionary notation
assert test_batch_kwargs["names"] == ["start", "type", "quantity", "end"]
test_batch_kwargs["path"] = "/a/new/path.csv"
assert test_batch_kwargs.path == "/a/new/path.csv"
|
Python
| 0
|
@@ -182,35 +182,8 @@
*%0A%0A%0A
-@freeze_time(%221955-11-05%22)%0A
def
@@ -323,69 +323,8 @@
)%0A
-%0A #demonstrate *output* kwargs post-datasource/generator%0A%0A
@@ -458,115 +458,19 @@
rgs.
-batch_fingerprint == BatchFingerprint(%0A partition_id=%2219551105T000000.000000Z%22,%0A fingerprint=
+to_id() ==
%22pat
@@ -478,33 +478,33 @@
:/data/test.csv%22
-)
+%0A
%0A%0A test_batch
@@ -595,398 +595,72 @@
%22
-partition_id%22: %2220190101%22%0A %7D%0A )%0A%0A # When partition_id is explicitly included, we can extract it and potentially still have a human readable id%0A assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(%0A partition_id=%2220190101%22,%0A fingerprint=%22path:/data/test.csv%22)%0A%0A test_batch_kwargs = PathBatchKwargs(%0A %7B%0A %22path%22: %22/data/test.csv%22,%0A
+reader_method%22: %22read_csv%22,%0A %22reader_options%22: %7B%0A
@@ -693,37 +693,8 @@
- %22partition_id%22: %223%22,%0A
@@ -723,32 +723,36 @@
e7,%0A
+
+
%22parse_dates%22: %5B
@@ -750,32 +750,36 @@
dates%22: %5B0, 3%5D,%0A
+
%22nam
@@ -812,32 +812,46 @@
antity%22, %22end%22%5D%0A
+ %7D%0A
%7D%0A )%0A
@@ -949,681 +949,115 @@
-assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(%0A partition_id=%223%22,%0A fingerprint=%22a5d67721928ee13317a81459818a556b%22)%0A%0A%0Adef test_batch_kwargs_from_dict():%0A test_batch_kwargs = %7B%0A %22path%22: %22/data/test.csv%22,%0A %22partition_id%22: %221%22%0A %7D%0A%0A # The build_batch_fingerprint convenience method makes it possible to build a batch_fingerprint from a dict.%0A # HOWEVER, using it can be difficult since the default-ignored keys may depend on a specific batch_kwargs type%0A assert BatchKwargs.build_batch_fingerprint(test_batch_kwargs) == BatchFingerprint(%0A partition_id=%221%22,%0A fingerprint=%22path:/data/test.csv%22)
+print(test_batch_kwargs.to_id())%0A assert test_batch_kwargs.to_id() == %228607e071c6383509c8cd8f4c1ea65518%22
%0A%0A%0Ad
@@ -1314,58 +1314,106 @@
%22
-iterator%22: True,%0A %22partition_id%22: %223%22,%0A
+reader_method%22: %22read_csv%22,%0A %22reader_options%22: %7B%0A %22iterator%22: True,%0A
@@ -1442,32 +1442,36 @@
e7,%0A
+
+
%22parse_dates%22: %5B
@@ -1469,32 +1469,36 @@
dates%22: %5B0, 3%5D,%0A
+
%22nam
@@ -1531,32 +1531,46 @@
antity%22, %22end%22%5D%0A
+ %7D%0A
%7D%0A )%0A
@@ -2137,16 +2137,34 @@
kwargs%5B%22
+reader_options%22%5D%5B%22
names%22%5D
|
8c8bc1ef8e3ba7519d4612856a420ed410974e12
|
add redactor on installed apps settings
|
opps/core/__init__.py
|
opps/core/__init__.py
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
trans_app_label = _('Opps')
|
Python
| 0
|
@@ -77,32 +77,109 @@
s _%0A
-trans_app_label = _('Opps'
+from django.conf import settings%0A%0A%0A%0Atrans_app_label = _('Opps')%0Asettings.INSTALLED_APPS += ('redactor',
)%0A
|
a1df5125589e397aa45494cfe7ef8c7b7ca2174d
|
Update PkgDistributionCreator.py
|
MTM_Installer/PkgDistributionCreator.py
|
MTM_Installer/PkgDistributionCreator.py
|
#!/usr/bin/env python
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *** Drew Coobs <coobs1@illinois.edu> ***
# Modified version of Chris Gerke's PkgDistributionCreator script
# https://github.com/autopkg/cgerke-recipes/blob/master/SharedProcessors/PkgDistributionCreator.py
#
import os.path
import subprocess
import shutil
import xml.etree.ElementTree as etree
from glob import glob
from autopkglib import Processor, ProcessorError
__all__ = ["PkgDistributionCreator"]
class PkgDistributionCreator(Processor):
description = ("Bundles together munki pkg installers with MTM onboarding pkg. ")
input_variables = {
"source_file1": {
"required": True,
"description": ("Path to a source file (MyCoolPkg1.pkg) "),
},
"source_file2": {
"required": True,
"description": ("Path to a source file (MyCoolPkg2.pkg) "),
},
"source_file3": {
"required": True,
"description": ("Path to a source file (MyCoolPkg3.pkg) "),
},
"source_file4": {
"required": True,
"description": ("Path to a source file (MyCoolPkg4.pkg) "),
},
"source_file5": {
"required": True,
"description": ("Path to a source file (MyCoolPkg5.pkg) "),
},
"source_file6": {
"required": True,
"description": ("Path to a source file (MyCoolPkg6.pkg) "),
},
"distribution_file": {
"required": True,
"description": ("Destination path of distribution file. "),
},
"package_dir": {
"required": True,
"description": ("Directory containing source pkgs. "),
},
"output_file": {
"required": True,
"description": ("Name of output file. "),
},
}
output_variables = {
}
__doc__ = description
source_path = None
def pkgConvert(self):
if os.path.exists('/usr/bin/productbuild'):
try:
self.output("Found binary %s" % '/usr/bin/productbuild')
except OSError as e:
raise ProcessorError(
"Can't find binary %s: %s" % ('/usr/bin/productbuild', e.strerror))
try:
pbcmd = ["/usr/bin/productbuild",
"--synthesize",
"--package", self.env['source_file1'],
"--package", self.env['source_file2'],
"--package", self.env['source_file3'],
"--package", self.env['source_file4'],
"--package", self.env['source_file5'],
"--package", self.env['source_file6'],
self.env['distribution_file']]
p = subprocess.Popen(pbcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
except OSError as e:
raise ProcessorError("Creation of distribution file failed with error code %d: %s"
% (e.errno, e.strerror))
if p.returncode != 0:
raise ProcessorError("Creation of distribution file %s failed: %s"
% (self.env['output_file'], err))
try:
pbcmd = ["/usr/bin/productbuild",
"--distribution", self.env['distribution_file'],
"--package-path", self.env['package_dir'],
self.env['output_file']]
p = subprocess.Popen(pbcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
except OSError as e:
raise ProcessorError("cmmac execution failed with error code %d: %s"
% (e.errno, e.strerror))
if p.returncode != 0:
raise ProcessorError("cmmac conversion of %s failed: %s"
% (self.env['output_file'], err))
root = etree.Element('/Users/Shared/AutoPkg/Cache/com.github.Gibbun.pkg.UofI_MTM_Installer/distribution.xml')
child = etree.Element('<title>My Awesome App</title>')
root.append(child)
def main(self):
if os.path.exists(self.env['source_file1']):
try:
self.output("Found %s" % self.env['source_file1'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file1'], e.strerror))
if os.path.exists(self.env['source_file2']):
try:
self.output("Found %s" % self.env['source_file2'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file2'], e.strerror))
if os.path.exists(self.env['source_file3']):
try:
self.output("Found %s" % self.env['source_file3'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file3'], e.strerror))
if os.path.exists(self.env['source_file4']):
try:
self.output("Found %s" % self.env['source_file4'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file4'], e.strerror))
if os.path.exists(self.env['source_file5']):
try:
self.output("Found %s" % self.env['source_file5'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file5'], e.strerror))
if os.path.exists(self.env['source_file6']):
try:
self.output("Found %s" % self.env['source_file6'])
self.pkgConvert()
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file6'], e.strerror))
if __name__ == '__main__':
processor = PkgDistributionCreator()
processor.execute_shell()
|
Python
| 0
|
@@ -4663,16 +4663,42 @@
n.xml')%0A
+ self.output(root)%0A
@@ -4752,16 +4752,43 @@
itle%3E')%0A
+ self.output(child)%0A
|
9174a4d404cafd872a75c761db6357361fd9599d
|
Support requests with a body and url data
|
demands/service.py
|
demands/service.py
|
import logging
import requests
import time
log = logging.getLogger(__name__)
class Request(object):
"""
Request object which provides functionality to send and receive
http requests/responses.
"""
def __init__(self, url, method, data, headers, cookies, verify):
self.url = url
self.method = method
self.data = data or {}
self.cookies = cookies or {}
self.headers = headers or {}
self.verify = verify
self.auth = None
def _compose_request_arguments(self):
arguments = {}
if self.method in ('POST', 'PUT', 'PATCH'):
arguments['data'] = self.data
else:
arguments['params'] = self.data
arguments['cookies'] = self.cookies
arguments['headers'] = self.headers
arguments['auth'] = self.auth
if self.url.startswith('https'):
arguments['verify'] = self.verify
return arguments
def authenticate(self, username, password):
"""Enable http authentication with the provided username and password"""
log.debug('Authentication via HTTP auth as "%s"', username)
self.auth = (username, password)
def send(self):
"""Execute the request, and return the response"""
method = self.method.lower()
request_arguments = self._compose_request_arguments()
start_time = time.time()
response = getattr(requests, method)(self.url, **request_arguments)
log.debug('%s HTTP [%s] call to "%s" %.2fms', response.status_code, self.method, self.url,
(time.time() - start_time) * 1000)
log.debug('HTTP request data: %s', request_arguments)
return response
class HTTPServiceError(Exception):
def __init__(self, code, details):
self.code = code
super(Exception, self).__init__(
'code: %s, details: %s' % (code, details)
)
class HTTPService(object):
"""
Provides an interface which allows arbitrary methods to be defined and
called on a remote http service.
"""
def __init__(self, config):
self.config = config
def pre_send(self, request, **params):
"""
Override to modify request object to be called just before sending
the request
"""
if 'username' in self.config:
request.authenticate(
self.config['username'], self.config['password'])
if 'client_name' in self.config:
request.headers['User-Agent'] = '%s %s - %s' % (
self.config['client_name'],
self.config.get('client_version', 'x.y.z'),
self.config.get('app_name', 'unknown'),
)
def post_send(self, request, response, **params):
"""Override to modify response object returned by call made by request object."""
response.is_ok = response.status_code < 300
if (not response.is_ok and
not response.status_code in params.get('expected_response_codes', [])):
log.error('Unexpected response from %s: url: %s, code: %s, details: %s',
self.__class__.__name__, response.url, response.status_code, response.content)
raise HTTPServiceError(response.status_code, response.content)
def get(self, path, data=None, cookies=None, headers=None, **kwargs):
return self._make_call('GET', path, data, cookies, headers, **kwargs)
def post(self, path, data=None, cookies=None, headers=None, **kwargs):
return self._make_call('POST', path, data, cookies, headers, **kwargs)
def put(self, path, data=None, cookies=None, headers=None, **kwargs):
return self._make_call('PUT', path, data, cookies, headers, **kwargs)
def delete(self, path, data=None, cookies=None, headers=None, **kwargs):
return self._make_call('DELETE', path, data, cookies, headers, **kwargs)
def _make_call(self, method, path, data, headers, cookies, **kwargs):
"""
Call the service method defined by the passed path and http method.
Additional arguments include cookies, headers, and data values.
"""
base = self.config.get('url')
url = '/'.join([base.rstrip('/'), path.lstrip('/')])
request = Request(url, method, data, headers, cookies, self.config.get('verify_ssl', True))
self.pre_send(request, **kwargs)
response = request.send()
self.post_send(request, response, **kwargs)
return response
|
Python
| 0
|
@@ -244,32 +244,38 @@
l, method, data,
+ body,
headers, cookie
@@ -365,24 +365,55 @@
data or %7B%7D%0A
+ self.body = body or %7B%7D%0A
self
@@ -583,175 +583,519 @@
-arguments = %7B%7D%0A if self.method in ('POST', 'PUT', 'PATCH'):%0A arguments%5B'data'%5D = self.data%0A else:%0A arguments%5B'params'%5D = self.data%0A
+%22%22%22Compose arguments as expected by the requests library.%0A%0A Some of the variables are munged (requests vs demands):%0A%0A data / self.body - The body to attach the request. If a dictionary%0A is provided, form-encoding will take place (also used%0A for file uploads)%0A params / self.data - Dictionary of URL parameters to%0A append to the URL%0A%0A %22%22%22%0A arguments = %7B%7D%0A arguments%5B'params'%5D = self.data%0A arguments%5B'data'%5D = self.body
%0A
@@ -3701,32 +3701,43 @@
path, data=None,
+ body=None,
cookies=None, h
@@ -3801,32 +3801,38 @@
ET', path, data,
+ body,
cookies, header
@@ -3872,32 +3872,43 @@
path, data=None,
+ body=None,
cookies=None, h
@@ -3973,32 +3973,38 @@
ST', path, data,
+ body,
cookies, header
@@ -4043,32 +4043,43 @@
path, data=None,
+ body=None,
cookies=None, h
@@ -4143,32 +4143,38 @@
UT', path, data,
+ body,
cookies, header
@@ -4220,24 +4220,35 @@
, data=None,
+ body=None,
cookies=Non
@@ -4327,16 +4327,22 @@
h, data,
+ body,
cookies
@@ -4403,24 +4403,30 @@
path, data,
+ body,
headers, co
@@ -4585,16 +4585,22 @@
headers,
+ body,
and dat
@@ -4765,16 +4765,22 @@
d, data,
+ body,
headers
|
9412b362b649a8eaa62448bef5772b0f001efdbb
|
Remove the download syncing as it's no longer part of Conveyor
|
conveyor/core.py
|
conveyor/core.py
|
from __future__ import absolute_import
from __future__ import division
import bz2
import csv
import logging
import logging.config
import io
import time
import urlparse
import lxml.html
import redis
import requests
import slumber
import yaml
from apscheduler.scheduler import Scheduler
from conveyor.processor import Processor, get_key
# @@@ Switch all Urls to SSL
# @@@ Switch to better exception classes
logger = logging.getLogger(__name__)
class Conveyor(object):
def __init__(self, config_file=None, *args, **kwargs):
super(Conveyor, self).__init__(*args, **kwargs)
if config_file is None:
config_file = "config.yml"
with open(config_file) as f:
self.config = yaml.safe_load(f.read())
logging.config.dictConfig(self.config["logging"])
self.redis = redis.StrictRedis(**self.config.get("redis", {}).get("connection", {}))
def run(self):
self.scheduler = Scheduler()
if self.config["conveyor"].get("schedule", {}).get("packages", {}):
self.scheduler.add_interval_job(self.packages, **self.config["conveyor"]["schedule"]["packages"])
if self.config["conveyor"].get("schedule", {}).get("downloads", {}):
self.scheduler.add_interval_job(self.downloads, **self.config["conveyor"]["schedule"]["downloads"])
self.scheduler.start()
try:
while True:
time.sleep(999)
except KeyboardInterrupt:
logger.info("Shutting down Conveyor...")
self.scheduler.shutdown(wait=False)
def packages(self):
if not self.redis.get(get_key(self.config.get("redis", {}).get("prefix", None), "pypi:since")):
# This is the first time we've ran so we need to do a bulk import
raise Exception(" Cannot process changes with no value for the last successful run.")
warehouse = slumber.API(
self.config["conveyor"]["warehouse"]["url"],
auth=(
self.config["conveyor"]["warehouse"]["auth"]["username"],
self.config["conveyor"]["warehouse"]["auth"]["password"],
)
)
session = requests.session(verify=self.config["conveyor"].get("verify", True))
processor = Processor(
index=self.config["conveyor"]["index"],
warehouse=warehouse,
session=session,
store=self.redis,
store_prefix=self.config.get("redis", {}).get("prefix", None)
)
processor.process()
def downloads(self):
session = requests.session(verify=self.config["conveyor"].get("verify", True))
warehouse = slumber.API(
self.config["conveyor"]["warehouse"]["url"],
auth=(
self.config["conveyor"]["warehouse"]["auth"]["username"],
self.config["conveyor"]["warehouse"]["auth"]["password"],
)
)
# Get a listing of all the Files
resp = session.get(self.config["conveyor"]["stats"])
resp.raise_for_status()
html = lxml.html.fromstring(resp.content)
urls = [(urlparse.urljoin(self.config["conveyor"]["stats"], x), x) for x in html.xpath("//a/@href")]
for url, statfile in urls:
if not url.endswith(".bz2"):
continue
date = statfile[:-4]
year, month, day = date.split("-")
last_modified_key = get_key(self.config.get("redis", {}).get("prefix", ""), "pypi:download:last_modified:%s" % url)
last_modified = self.redis.get(last_modified_key)
headers = {"If-Modified-Since": last_modified} if last_modified else None
resp = session.get(url, headers=headers, prefetch=True)
if resp.status_code == 304:
logger.info("Skipping %s, it has not been modified since %s", statfile, last_modified)
continue
resp.raise_for_status()
logger.info("Computing download counts from %s", statfile)
data = bz2.decompress(resp.content)
csv_r = csv.DictReader(io.BytesIO(data), ["project", "filename", "user_agent", "downloads"])
for row in csv_r:
row["date"] = date
row["downloads"] = int(row["downloads"])
# See if we have a Download object for this yet
downloads = warehouse.downloads.get(project=row["project"], filename=row["filename"], date__year=year, date__month=month, date__day=day, user_agent=row["user_agent"])
if downloads["meta"]["total_count"] == 1:
warehouse.downloads(downloads["objects"][0]["id"]).put(row)
elif downloads["meta"]["total_count"] == 0:
warehouse.downloads.post(row)
else:
RuntimeError("There are More than 1 Download items returned")
if "Last-Modified" in resp.headers:
self.redis.set(last_modified_key, resp.headers["Last-Modified"])
else:
self.redis.delete(last_modified_key)
break
|
Python
| 0
|
@@ -1148,198 +1148,8 @@
%5D)%0A%0A
- if self.config%5B%22conveyor%22%5D.get(%22schedule%22, %7B%7D).get(%22downloads%22, %7B%7D):%0A self.scheduler.add_interval_job(self.downloads, **self.config%5B%22conveyor%22%5D%5B%22schedule%22%5D%5B%22downloads%22%5D)%0A%0A
@@ -2492,2672 +2492,4 @@
s()%0A
-%0A def downloads(self):%0A session = requests.session(verify=self.config%5B%22conveyor%22%5D.get(%22verify%22, True))%0A%0A warehouse = slumber.API(%0A self.config%5B%22conveyor%22%5D%5B%22warehouse%22%5D%5B%22url%22%5D,%0A auth=(%0A self.config%5B%22conveyor%22%5D%5B%22warehouse%22%5D%5B%22auth%22%5D%5B%22username%22%5D,%0A self.config%5B%22conveyor%22%5D%5B%22warehouse%22%5D%5B%22auth%22%5D%5B%22password%22%5D,%0A )%0A )%0A%0A # Get a listing of all the Files%0A resp = session.get(self.config%5B%22conveyor%22%5D%5B%22stats%22%5D)%0A resp.raise_for_status()%0A%0A html = lxml.html.fromstring(resp.content)%0A urls = %5B(urlparse.urljoin(self.config%5B%22conveyor%22%5D%5B%22stats%22%5D, x), x) for x in html.xpath(%22//a/@href%22)%5D%0A%0A for url, statfile in urls:%0A if not url.endswith(%22.bz2%22):%0A continue%0A%0A date = statfile%5B:-4%5D%0A year, month, day = date.split(%22-%22)%0A%0A last_modified_key = get_key(self.config.get(%22redis%22, %7B%7D).get(%22prefix%22, %22%22), %22pypi:download:last_modified:%25s%22 %25 url)%0A last_modified = self.redis.get(last_modified_key)%0A%0A headers = %7B%22If-Modified-Since%22: last_modified%7D if last_modified else None%0A%0A resp = session.get(url, headers=headers, prefetch=True)%0A%0A if resp.status_code == 304:%0A logger.info(%22Skipping %25s, it has not been modified since %25s%22, statfile, last_modified)%0A continue%0A%0A resp.raise_for_status()%0A%0A logger.info(%22Computing download counts from %25s%22, statfile)%0A%0A data = bz2.decompress(resp.content)%0A csv_r = csv.DictReader(io.BytesIO(data), %5B%22project%22, %22filename%22, %22user_agent%22, %22downloads%22%5D)%0A%0A for row in csv_r:%0A row%5B%22date%22%5D = date%0A row%5B%22downloads%22%5D = int(row%5B%22downloads%22%5D)%0A%0A # See if we have a Download object for this yet%0A downloads = warehouse.downloads.get(project=row%5B%22project%22%5D, filename=row%5B%22filename%22%5D, date__year=year, date__month=month, date__day=day, user_agent=row%5B%22user_agent%22%5D)%0A%0A if downloads%5B%22meta%22%5D%5B%22total_count%22%5D == 1:%0A warehouse.downloads(downloads%5B%22objects%22%5D%5B0%5D%5B%22id%22%5D).put(row)%0A elif downloads%5B%22meta%22%5D%5B%22total_count%22%5D == 0:%0A warehouse.downloads.post(row)%0A else:%0A RuntimeError(%22There are More than 1 Download items returned%22)%0A%0A if %22Last-Modified%22 in resp.headers:%0A self.redis.set(last_modified_key, resp.headers%5B%22Last-Modified%22%5D)%0A else:%0A self.redis.delete(last_modified_key)%0A%0A break%0A
|
596e90626bf88be0ea1461ecb80924ef6b3b85e6
|
use udp for direct communication
|
metric-generator/generator.py
|
metric-generator/generator.py
|
import socket
import sys
import random
import argparse
from time import time, sleep
class TokenBucket(object):
"""An implementation of the token bucket algorithm.
>>> bucket = TokenBucket(80, 0.5)
>>> print bucket.consume(10)
True
adapted from http://code.activestate.com/recipes/511490-implementation-of-the-token-bucket-algorithm/?in=lang-python
Not thread safe.
"""
__slots__ = ['capacity', '_tokens', 'fill_rate', 'timestamp']
def __init__(self, tokens, fill_rate):
"""tokens is the total tokens in the bucket. fill_rate is the
rate in tokens/second that the bucket will be refilled."""
self.capacity = float(tokens)
self._tokens = float(tokens)
self.fill_rate = float(fill_rate)
self.timestamp = time()
def consume(self, tokens, block=True):
"""Consume tokens from the bucket. Returns True if there were
sufficient tokens.
If there are not enough tokens and block is True, sleeps until the
bucket is replenished enough to satisfy the deficiency.
If there are not enough tokens and block is False, returns False.
It is an error to consume more tokens than the bucket capacity.
"""
assert tokens <= self.capacity, \
'Attempted to consume {} tokens from a bucket with capacity {}' \
.format(tokens, self.capacity)
if block and tokens > self.tokens:
deficit = tokens - self._tokens
delay = deficit / self.fill_rate
# print 'Have {} tokens, need {}; sleeping {} seconds'.format(self._tokens, tokens, delay)
sleep(delay)
if tokens <= self.tokens:
self._tokens -= tokens
return True
else:
return False
@property
def tokens(self):
if self._tokens < self.capacity:
now = time()
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
class InfiniteTokenBucket(object):
"""TokenBucket implementation with infinite capacity, i.e. consume always
returns True."""
__slots__ = ()
def __init__(self, tokens=None, fill_rate=None):
pass
def consume(self, tokens, block=True):
return True
@property
def tokens(self):
return float('infinity')
def rate_limit(args, bandwidth_or_burst, steady_state_bandwidth=None):
"""Limit the bandwidth of a generator.
Given a data generator, return a generator that yields the data at no
higher than the specified bandwidth. For example, ``rate_limit(data, _256k)``
will yield from data at no higher than 256KB/s.
The three argument form distinguishes burst from steady-state bandwidth,
so ``rate_limit(data, _1024k, _128k)`` would allow data to be consumed at
128KB/s with an initial burst of 1MB.
"""
bandwidth = steady_state_bandwidth or bandwidth_or_burst
rate_limiter = TokenBucket(bandwidth_or_burst, bandwidth)
for n in range(args.number):
data_point = 'latency,id={},cluster={},multistack={},direct={},rate={} sentat=0000000000000'.format(n, args.cluster_size, args.multistack, args.direct, args.rate)
rate_limiter.consume(len(data_point))
yield data_point
parser = argparse.ArgumentParser(description='Starts generating dumb metrics and sending them to monitoring client at given rate')
parser.add_argument('--cluster-size', dest='cluster_size', type=int, help='The number of kafka brokers in datasink')
parser.add_argument('--multistack', dest='multistack', help='Flag to indicate if testing bridged cloud installation')
parser.add_argument('--rate', required=True, dest='rate', type=float, help='Transmission rate of metrics in kBs')
parser.add_argument('--number', dest='number', type=int, help='The amount of metrics to generate')
parser.add_argument('--direct', dest='direct', help='Indicates if messages are send directly to the consumer or through ANDy framework')
parser.add_argument('--ip', dest='ip', type=str, help='The IP of the receiving endpoint')
parser.add_argument('--port', dest='port', type=int, help='The port of the receiving endpoint')
parser.set_defaults(cluster_size=1)
parser.set_defaults(multistack=False)
parser.set_defaults(direct=False)
parser.set_defaults(ip='127.0.0.1')
parser.set_defaults(port=9876)
parser.set_defaults(number=100)
args = parser.parse_args()
required_byte_rate = args.rate * 1024
HOST, PORT = args.ip, args.port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
start_time = time()
bytes_sent = 0
messages_sent = 0
for dp in rate_limit(args, required_byte_rate/32, required_byte_rate):
millis = int(round(time() * 1000))
payload = dp.replace('sentat=0000000000000', 'sentat={}\n'.format(millis))
bytes_sent += sock.send(payload)
messages_sent += 1
end_time = time()
time_total = end_time - start_time
print 'Messages sent {}'.format(messages_sent)
print 'Sent {} bytes in {} second, which translates to {} kBps rate'.format(bytes_sent, time_total, (bytes_sent/1024)/time_total)
sock.close();
|
Python
| 0
|
@@ -4219,16 +4219,39 @@
gs.port%0A
+%0Aif(not args.direct):%0A%09
sock = s
@@ -4299,16 +4299,17 @@
STREAM)%0A
+%09
sock.con
@@ -4326,16 +4326,79 @@
, PORT))
+%0Aelse %0A%09sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
%0A%0Astart_
@@ -4629,16 +4629,36 @@
illis))%0A
+%0A%09if(args.direct):%0A%09
%09bytes_s
@@ -4683,16 +4683,76 @@
ayload)%0A
+%09else:%0A%09%09bytes_sent += sock.sendto(payload, (HOST, PORT))%0A%09%0A
%09message
|
6a02c5e1844ad7d1b9ae50cd5dbae6975fb685ee
|
Make internal error more clear
|
numba/error.py
|
numba/error.py
|
import traceback
def format_pos(node):
if node is not None and hasattr(node, 'lineno'):
return "%s:%s: " % (node.lineno, node.col_offset)
else:
return ""
class NumbaError(Exception):
"Some error happened during compilation"
def __init__(self, node, msg=None, *args):
if msg is None:
node, msg = None, node
self.node = node
self.msg = msg
self.args = args
def __str__(self):
try:
pos = format_pos(self.node)
msg = "%s%s %s" % (pos, self.msg, " ".join(map(str, self.args)))
return msg.rstrip()
except:
traceback.print_exc()
return ""
class InternalError(NumbaError):
"Indicates a compiler bug"
class _UnknownAttribute(Exception):
pass
|
Python
| 0.000321
|
@@ -684,16 +684,61 @@
return %22
+%3Cinternal error creating numba error message%3E
%22%0A%0A%0Aclas
@@ -842,8 +842,9 @@
pass
+%0A
|
4bbbac6f88f6f10bb6da82fb00704455115f9d9b
|
test seems to be working
|
py/testdir_multi_jvm/test_GLM_covtype20x_s3n_thru_hdfs.py
|
py/testdir_multi_jvm/test_GLM_covtype20x_s3n_thru_hdfs.py
|
import os, json, unittest, time, shutil, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts
import h2o_browse as h2b
import h2o_import as h2i
import time, random
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts(1,
# this is for our amazon ec hdfs
# see https://github.com/0xdata/h2o/wiki/H2O-and-s3n
hdfs_name_node='10.78.14.235:9000',
hdfs_version='0.20.2')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_covtype20x_s3n_thru_hdfs(self):
csvFilename = "covtype20x.data"
csvPathname = csvFilename
# csvFilename = "train_set.csv"
# csvPathname = "allstate/ + csvFilename
# https://s3.amazonaws.com/home-0xdiag-datasets/allstate/train_set.csv
URI = "s3n://home-0xdiag-datasets/"
s3nKey = URI + csvPathname
trialMax = 3
timeoutSecs = 500
for trial in range(trialMax):
trialStart = time.time()
# since we delete the key, we have to re-import every iteration
# s3n URI thru HDFS is not typical.
importHDFSResult = h2o.nodes[0].import_hdfs(URI)
s3nFullList = importHDFSResult['succeeded']
### print "s3nFullList:", h2o.dump_json(s3nFullList)
self.assertGreater(len(s3nFullList),8,"Didn't see more than 8 files in s3n?")
key2 = csvFilename + "_" + str(trial) + ".hex"
print "Loading s3n key: ", s3nKey, 'thru HDFS'
start = time.time()
parseKey = h2o.nodes[0].parse(s3nKey, key2,
timeoutSecs=500, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print s3nKey, 'parse time:', parseKey['response']['time']
print "parse result:", parseKey['destination_key']
kwargs = {
'y': 54,
'family': 'binomial',
'link': 'logit',
'num_cross_validation_folds': 2,
'case_mode': '=',
'case': 1,
'max_iter': 50,
'beta_epsilon': 1e-3}
timeoutSecs = 300
# L2
kwargs.update({'alpha': 0, 'lambda': 0})
start = time.time()
glm = h2o_cmd.runGLMOnly(parseKey=parseKey, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time()
print "glm (L2) end on ", csvPathname, 'took', elapsed - start, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_glm.simpleCheckGLM(self, glm, 13, **kwargs)
h2o.check_sandbox_for_errors()
# Elastic
kwargs.update({'alpha': 0.5, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLMOnly(parseKey=parseKey, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time()
print "glm (Elastic) end on ", csvPathname, 'took', elapsed - start, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_glm.simpleCheckGLM(self, glm, 13, **kwargs)
h2o.check_sandbox_for_errors()
# L1
kwargs.update({'alpha': 1.0, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLMOnly(parseKey=parseKey, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time()
print "glm (L1) end on ", csvPathname, 'took', elapsed - start, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_glm.simpleCheckGLM(self, glm, 13, **kwargs)
h2o.check_sandbox_for_errors()
print "Deleting key in H2O so we get it from S3 (if ec2) or nfs again.", \
"Otherwise it would just parse the cached key."
storeView = h2o.nodes[0].store_view()
### print "storeView:", h2o.dump_json(storeView)
print "Removing", s3nKey
removeKeyResult = h2o.nodes[0].remove_key(key=s3nKey)
### print "removeKeyResult:", h2o.dump_json(removeKeyResult)
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds.", \
if __name__ == '__main__':
h2o.unit_main()
|
Python
| 0.000006
|
@@ -103,16 +103,25 @@
2o_hosts
+, h2o_glm
%0Aimport
@@ -1337,34 +1337,8 @@
= 3
-%0A timeoutSecs = 500
%0A%0A
@@ -1925,16 +1925,46 @@
u HDFS'%0A
+ timeoutSecs = 500%0A
@@ -2071,19 +2071,27 @@
outSecs=
-500
+timeoutSecs
, retryD
@@ -2165,17 +2165,16 @@
- start%0A
-%0A
@@ -2192,16 +2192,29 @@
3nKey, '
+h2o reported
parse ti
@@ -2248,16 +2248,156 @@
'time'%5D%0A
+ print %22parse end on %22, s3nKey, 'took', elapsed, 'seconds',%5C%0A %22%25d pct. of timeout%22 %25 ((elapsed*100)/timeoutSecs)%0A%0A
@@ -2709,18 +2709,17 @@
_iter':
-50
+8
,%0A
@@ -2777,17 +2777,17 @@
tSecs =
-3
+5
00%0A
@@ -3071,32 +3071,24 @@
ok', elapsed
- - start
, 'seconds',
@@ -3564,32 +3564,24 @@
ok', elapsed
- - start
, 'seconds',
@@ -4051,24 +4051,16 @@
elapsed
- - start
, 'secon
|
619269367c9e38fe55ae8667ead8486f63467d2b
|
Fix case where apache passes DN in the format we expect rather than ssl format.
|
src/python/apache_utils.py
|
src/python/apache_utils.py
|
"""
Apache Utils.
Tools for dealing with credential checking from X509 SSL certificates.
These are useful when using Apache as a reverse proxy to check user
credentials against a local DB.
"""
from collections import namedtuple
import cherrypy
from sqlalchemy_utils import create_db, db_session
from tables import Users
VerifiedUser = namedtuple('VerifiedUser', ('id', 'dn', 'ca', 'admin'))
def name_from_dn(client_dn):
"""
Get human-readable name from DN.
Attempt to determine a meaningful name from a
clients DN. Requires the DN to have already been
converted to the more usual slash delimeted style.
Args:
client_dn (str): The client DN
Returns:
str: The human-readable name
"""
cns = (token.strip('CN= ') for token in client_dn.split('/')
if token.startswith('CN='))
return sorted(cns, key=len)[-1]
def apache_client_convert(client_dn, client_ca=None):
"""
Convert Apache style client certs.
Convert from the Apache comma delimited style to the
more usual slash delimited style.
Args:
client_dn (str): The client DN
client_ca (str): [Optional] The client CA
Returns:
tuple: The converted client (DN, CA)
"""
client_dn = '/' + '/'.join(reversed(client_dn.split(',')))
if client_ca is not None:
client_ca = '/' + '/'.join(reversed(client_ca.split(',')))
return client_dn, client_ca
class CredentialDispatcher(object):
"""
Dispatcher that checks SSL credentials.
This dispatcher is a wrapper that simply checks SSL credentials and
then hands off to the wrapped dispatcher.
"""
def __init__(self, users_dburl, dispatcher, admin_only=False):
"""Initialise."""
self._users_dburl = users_dburl
self._dispatcher = dispatcher
self._admin_only = admin_only
def __call__(self, path):
"""Dispatch."""
required_headers = set(['Ssl-Client-S-Dn', 'Ssl-Client-I-Dn', 'Ssl-Client-Verify'])
missing_headers = required_headers.difference(cherrypy.request.headers.iterkeys())
if missing_headers:
raise cherrypy.HTTPError(401, 'Unauthorized: Incomplete certificate information '
'available, required: %s' % list(missing_headers))
client_dn, client_ca = apache_client_convert(cherrypy.request.headers['Ssl-Client-S-Dn'],
cherrypy.request.headers['Ssl-Client-I-Dn'])
client_verified = cherrypy.request.headers['Ssl-Client-Verify']
if client_verified != 'SUCCESS':
raise cherrypy.HTTPError(401, 'Unauthorized: Cert not verified for user DN: %s, CA: %s.'
% (client_dn, client_ca))
create_db(self._users_dburl)
with db_session(self._users_dburl) as session:
users = session.query(Users)\
.filter(Users.dn == client_dn)\
.filter(Users.ca == client_ca)\
.all()
if not users:
raise cherrypy.HTTPError(403, 'Forbidden: Unknown user. user: (%s, %s)'
% (client_dn, client_ca))
if len(users) > 1:
raise cherrypy.HTTPError(500, 'Internal Server Error: Duplicate user detected. user: (%s, %s)'
% (client_dn, client_ca))
if users[0].suspended:
raise cherrypy.HTTPError(403, 'Forbidden: User is suspended by VO. user: (%s, %s)'
% (client_dn, client_ca))
if self._admin_only and not users[0].admin:
raise cherrypy.HTTPError(403, 'Forbidden: Admin users only')
cherrypy.request.verified_user = VerifiedUser(users[0].id,
users[0].dn,
users[0].ca,
users[0].admin)
return self._dispatcher(path)
__all__ = ('VerifiedUser', 'name_from_dn', 'apache_client_convert', 'CredentialDispatcher')
|
Python
| 0
|
@@ -1230,24 +1230,66 @@
CA)%0A %22%22%22%0A
+ if not client_dn.startswith('/'):%0A
client_d
@@ -1335,24 +1335,28 @@
plit(',')))%0A
+
if clien
@@ -1373,16 +1373,20 @@
t None:%0A
+
|
429bf52eb482955cfe195708898ce275e1a72dcb
|
Validate input.
|
src/devilry_qualifiesforexam/devilry_qualifiesforexam/rest/preview.py
|
src/devilry_qualifiesforexam/devilry_qualifiesforexam/rest/preview.py
|
from djangorestframework.views import View
from djangorestframework.permissions import IsAuthenticated
from django.shortcuts import get_object_or_404
from devilry_qualifiesforexam.pluginhelpers import create_sessionkey
from devilry.apps.core.models import Period
from devilry.utils.groups_groupedby_relatedstudent_and_assignment import GroupsGroupedByRelatedStudentAndAssignment
from devilry_subjectadmin.rest.auth import IsPeriodAdmin
class Preview(View):
"""
Generate the data required to provide a preview for the qualified for exam wizard.
# GET
## Parameters
The following parameters are required:
- ``periodid``: The ID of the period. Supplied as the last part of the URL-path.
404 is returned unless the user is admin on this period.
- ``pluginsessionid``: Forwarded from the first page of the wizard. It is an ID
used to lookup the output from the plugin.
## Returns
An object/dict with the following attributes:
- ``pluginoutput``: The serialized output from the plugin.
- ``perioddata``: All results for all students on the period.
"""
permissions = (IsAuthenticated, IsPeriodAdmin)
def get(self, request, id):
pluginsessionid = self.request.GET['pluginsessionid']
period = get_object_or_404(Period, pk=id)
previewdata = self.request.session[create_sessionkey(pluginsessionid)]
grouper = GroupsGroupedByRelatedStudentAndAssignment(period)
return {
'perioddata': grouper.serialize(),
'pluginoutput': previewdata.serialize()
}
|
Python
| 0.000017
|
@@ -96,16 +96,125 @@
ticated%0A
+from djangorestframework.response import ErrorResponse%0Afrom djangorestframework import status as statuscodes%0A
from dja
@@ -1346,27 +1346,210 @@
.GET
-%5B'pluginsessionid'%5D
+.get('pluginsessionid', None)%0A if not pluginsessionid:%0A raise ErrorResponse(statuscodes.HTTP_400_BAD_REQUEST,%0A %7B'detail': '%60%60pluginsessionid%60%60 is a required parameter'%7D)
%0A
|
17cef49cb97baae5d3300a368a1dbc472e193b1a
|
Add GIT_RESTRICTED_BRANCHES
|
osgbuild/constants.py
|
osgbuild/constants.py
|
"""Global constants for osg-build"""
import os
import sys
WD_RESULTS = '_build_results'
WD_PREBUILD = '_final_srpm_contents'
WD_UNPACKED = '_upstream_srpm_contents'
WD_UNPACKED_TARBALL = '_upstream_tarball_contents'
WD_QUILT = '_quilt'
AFS_CACHE_PATH = '/p/vdt/public/html/upstream'
AFS_CACHE_PREFIX = 'file://' + AFS_CACHE_PATH
WEB_CACHE_PREFIX = 'http://vdt.cs.wisc.edu/upstream'
DEFAULT_CONFIG_FILE = os.path.expanduser("~/.osg-build.ini")
ALT_DEFAULT_CONFIG_FILE = os.path.expanduser("~/.vdt-build.ini")
KOJI_USER_CONFIG_DIR = os.path.expanduser("~/.koji")
OSG_KOJI_USER_CONFIG_DIR = os.path.expanduser("~/.osg-koji")
KOJI_CLIENT_CERT = os.path.join(KOJI_USER_CONFIG_DIR, "client.crt")
KOJI_CONF = "osg-koji-site.conf"
OLD_KOJI_CONF = "osg-koji.conf"
DATA_DIR = "/usr/share/osg-build"
KOJI_HUB = "http://koji-hub.batlab.org"
HTTPS_KOJI_HUB = "https://koji-hub.batlab.org"
DATA_FILE_SEARCH_PATH = [sys.path[0],
os.path.join(sys.path[0], "data"),
DATA_DIR]
SVN_ROOT = "https://vdt.cs.wisc.edu/svn"
SVN_REDHAT_PATH = "native/redhat/"
SVN_RESTRICTED_BRANCHES = {
r'^trunk$' : 'main',
r'^branches/upcoming$' : 'upcoming',
r'^branches/osg-(?P<osgver>\d+\.\d+)$' : 'versioned'}
KOJI_RESTRICTED_TARGETS = {
r'^(el\d+)-osg$' : 'main', #old main
r'^osg-(el\d+)$' : 'main',
r'^(el\d+)-osg-upcoming$' : 'upcoming', #old upcoming
r'^osg-upcoming-(el\d+)$' : 'upcoming',
r'^osg-(?P<osgver>\d+\.\d+)-(el\d+)$' : 'versioned'}
CSL_KOJI_DIR = "/p/vdt/workspace/koji-1.6.0"
OSG_REMOTE = 'https://github.com/opensciencegrid/Software-Redhat.git'
KNOWN_GIT_REMOTES = ['https://github.com/unlhcc/hcc-packaging.git',
'git@github.com:unlhcc/hcc-packaging.git',
OSG_REMOTE]
# Map the authenticated URL to an anonymous checkout URL.
GIT_REMOTE_MAPS = {'git@github.com:unlhcc/hcc-packaging.git': 'https://github.com/unlhcc/hcc-packaging.git'}
DEFAULT_BUILDOPTS_COMMON = {
'autoclean': True,
'cache_prefix': 'AUTO',
'dry_run': False,
'full_extract': False,
'getfiles': False,
'koji_backend': None,
'kojilogin': None,
'koji_wrapper': True,
'mock_clean': True,
'mock_config': 'AUTO',
'mock_config_from_koji': None,
'no_wait': False,
'redhat_releases': None,
'regen_repos': False,
'scratch': False,
'vcs': None,
'target_arch': None,
'working_directory': '.',
}
ALLBUILD_BUILDOPTS = DEFAULT_BUILDOPTS_COMMON.copy()
ALLBUILD_BUILDOPTS.update({
'no_wait': True,
'regen_repos': False,
'scratch': False,
'svn': True
})
ALLBUILD_ALLOWED_OPTNAMES = [
'koji_backend', 'kojilogin', 'koji_wrapper', 'no_wait', 'scratch']
DEFAULT_BUILDOPTS_BY_DVER = {
'5': {
'distro_tag': 'osg.el5',
'koji_tag': 'el5-osg',
'koji_target': 'el5-osg',
'redhat_release': '5',
},
'6': {
'distro_tag': 'osg.el6',
'koji_tag': 'el6-osg',
'koji_target': 'el6-osg',
'redhat_release': '6',
}
}
DVERS = DEFAULT_BUILDOPTS_BY_DVER.keys()
BUGREPORT_EMAIL = "osg-software@opensciencegrid.org"
|
Python
| 0.00001
|
@@ -1617,16 +1617,213 @@
ioned'%7D%0A
+GIT_RESTRICTED_BRANCHES = %7B%0A r'%5E(%5Cw*/)?master$' : 'main',%0A r'%5E(%5Cw*/)?upcoming$' : 'upcoming',%0A r'%5E(%5Cw*/)?osg-(?P%3Cosgver%3E%5Cd+%5C.%5Cd+)$' : 'versioned'%7D%0A%0A
CSL_KOJI
|
8ab7ad1f6aee485c64a7e1347c76e628cc820ba8
|
add some docker Builder args
|
src/py/gopythongo/builders/docker.py
|
src/py/gopythongo/builders/docker.py
|
# -* encoding: utf-8 *-
import argparse
import gopythongo.shared.docker_args
from gopythongo.utils import print_info, highlight
from gopythongo.builders import BaseBuilder
from typing import Any
class DockerBuilder(BaseBuilder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@property
def builder_name(self) -> str:
return u"docker"
def add_args(self, parser: argparse.ArgumentParser) -> None:
gopythongo.shared.docker_args.add_shared_args(parser)
def validate_args(self, args: argparse.Namespace) -> None:
gopythongo.shared.docker_args.validate_shared_args(args)
def build(self, args: argparse.Namespace) -> None:
print_info("Building with %s" % highlight("docker"))
builder_class = DockerBuilder
|
Python
| 0
|
@@ -392,17 +392,16 @@
return
-u
%22docker%22
@@ -401,16 +401,16 @@
docker%22%0A
+
%0A def
@@ -459,32 +459,32 @@
arser) -%3E None:%0A
-
gopython
@@ -530,16 +530,875 @@
arser)%0A%0A
+ gp_docker = parser.add_argument_group(%22Docker Builder options%22)%0A gp_docker.add_argument(%22--docker-buildfile%22, dest=%22docker_buildfile%22, default=None,%0A help=%22Specify a Dockerfile to build the the build environment. The build commands will %22%0A %22then be executed inside the resulting container.%22)%0A gp_docker.add_argument(%22--docker-leave-containers%22, dest=%22docker_leave_containers%22, action=%22store_true%22,%0A default=False, env_var=%22DOCKER_LEAVE_CONTAINERS%22,%0A help=%22After creating a build environment and a runtime container, if this option is %22%0A %22used, GoPythonGo will not use 'docker rm' and 'docker rmi' to clean up the %22%0A %22resulting containers.%22)%0A%0A
def
|
6248a0b813fc6598d964639ad696ecd506015918
|
Rename to TaarifaAPI
|
taarifa_api/settings.py
|
taarifa_api/settings.py
|
"""Global API configuration."""
from os import environ
from urlparse import urlparse
from schemas import facility_schema, request_schema, resource_schema, \
service_schema
API_NAME = 'Taarifa'
URL_PREFIX = 'api'
if 'EVE_DEBUG' in environ:
DEBUG = True
if 'MONGOLAB_URI' in environ:
url = urlparse(environ['MONGOLAB_URI'])
MONGO_HOST = url.hostname
MONGO_PORT = url.port
MONGO_USERNAME = url.username
MONGO_PASSWORD = url.password
MONGO_DBNAME = url.path[1:]
else:
MONGO_DBNAME = API_NAME
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
# Enable reads (GET), edits (PATCH) and deletes of individual items
# (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH', 'DELETE']
services = {
"schema": service_schema,
}
requests = {
"schema": request_schema,
"source": "requests",
"key": "service_code",
}
facilities = {
"item_title": "facility",
"schema": facility_schema,
}
resources = {
"schema": resource_schema,
"versioning": True,
"source": "resources",
"key": "facility_code",
}
DOMAIN = {
'services': services,
'requests': requests,
'facilities': facilities,
'resources': resources,
}
# FIXME: Temporarily allow CORS requests for development purposes
X_DOMAINS = "*"
|
Python
| 0.999999
|
@@ -191,16 +191,19 @@
'Taarifa
+API
'%0AURL_PR
|
27ef7070e8eb538e74ba5f69a156a1fee14b5af3
|
Comment correction, `get_list` is now `filter`.
|
tests/modeltests/or_lookups/models.py
|
tests/modeltests/or_lookups/models.py
|
"""
19. OR lookups
To perform an OR lookup, or a lookup that combines ANDs and ORs,
combine QuerySet objects using & and | operators.
Alternatively, use positional arguments, and pass one or more expressions
of clauses using the variable ``django.db.models.Q`` (or any object with
a get_sql method).
"""
from django.db import models
class Article(models.Model):
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
class Meta:
ordering = ('pub_date',)
def __unicode__(self):
return self.headline
__test__ = {'API_TESTS':"""
>>> from datetime import datetime
>>> from django.db.models import Q
>>> a1 = Article(headline='Hello', pub_date=datetime(2005, 11, 27))
>>> a1.save()
>>> a2 = Article(headline='Goodbye', pub_date=datetime(2005, 11, 28))
>>> a2.save()
>>> a3 = Article(headline='Hello and goodbye', pub_date=datetime(2005, 11, 29))
>>> a3.save()
>>> Article.objects.filter(headline__startswith='Hello') | Article.objects.filter(headline__startswith='Goodbye')
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
>>> Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye'))
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
>>> Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye'))
[]
# You can shorten this syntax with code like the following,
# which is especially useful if building the query in stages:
>>> articles = Article.objects.all()
>>> articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye')
[]
>>> articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye')
[<Article: Hello and goodbye>]
>>> Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello')
[<Article: Hello and goodbye>]
>>> Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye')
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
>>> Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood')
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
>>> Article.objects.filter(Q(pk=1) | Q(pk=2))
[<Article: Hello>, <Article: Goodbye>]
>>> Article.objects.filter(Q(pk=1) | Q(pk=2) | Q(pk=3))
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
# You could also use "in" to accomplish the same as above.
>>> Article.objects.filter(pk__in=[1,2,3])
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
>>> Article.objects.filter(pk__in=[1,2,3,4])
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
# Passing "in" an empty list returns no results ...
>>> Article.objects.filter(pk__in=[])
[]
# ... but can return results if we OR it with another query.
>>> Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye'))
[<Article: Goodbye>, <Article: Hello and goodbye>]
# Q arg objects are ANDed
>>> Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye'))
[<Article: Hello and goodbye>]
# Q arg AND order is irrelevant
>>> Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello')
[<Article: Hello and goodbye>]
# Try some arg queries with operations other than get_list
>>> Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye'))
<Article: Hello and goodbye>
>>> Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count()
3
>>> list(Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values())
[{'headline': u'Hello and goodbye', 'pub_date': datetime.datetime(2005, 11, 29, 0, 0), 'id': 3}]
>>> Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([1,2])
{1: <Article: Hello>}
# Demonstrating exclude with a Q object
>>> Article.objects.exclude(Q(headline__startswith='Hello'))
[<Article: Goodbye>]
# The 'complex_filter' method supports framework features such as
# 'limit_choices_to' which normally take a single dictionary of lookup arguments
# but need to support arbitrary queries via Q objects too.
>>> Article.objects.complex_filter({'pk': 1})
[<Article: Hello>]
>>> Article.objects.complex_filter(Q(pk=1) | Q(pk=2))
[<Article: Hello>, <Article: Goodbye>]
"""}
|
Python
| 0.000002
|
@@ -3355,16 +3355,15 @@
han
-get_list
+filter.
%0A%3E%3E%3E
|
ab8930c771d71c09186f94fb554ee0e6d82cea43
|
Remove ignore source from multi push notification commands #11
|
notification/management/commands/multipush.py
|
notification/management/commands/multipush.py
|
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from notification.apns.apns import APNs, Frame, Payload, PayloadAlert
from notification.models import DeviceToken, CertFile
import logging
import os.path
import random
import sys
import time
CERT_FILE_UPLOAD_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'files/'
)
class Command(BaseCommand):
help = 'Send Push Notification to multi device tokens.'
def __init__(self):
self.frame = Frame()
self.expiry = int(time.time() + 3600)
self.priority = 10
def add_arguments(self, parser):
parser.add_argument(
"-s", "--sandbox",
action="store_true",
dest="sandbox",
default=False,
help="Use apple sandbox.",
)
parser.add_argument(
'-t', '--token',
action='store',
nargs='+',
type=str,
dest='device_tokens',
help='Target device tokens.',
)
parser.add_argument(
'--title',
action='store',
type=str,
metavar='TITLE',
dest='title',
help='Title displayed in push notification.',
)
parser.add_argument(
'--subtitle',
action='store',
type=str,
metavar='SUBTITLE',
dest='subtitle',
help='Subtitle displayes in push notification.',
)
parser.add_argument(
'--body',
action='store',
type=str,
metavar='BODY',
dest='body',
help='Body displayed in push notification.',
)
parser.add_argument(
'--sound',
action='store',
type=str,
metavar='SOUND',
dest='sound',
default='default',
help='Sounds to be heard when push notification is received.',
)
parser.add_argument(
'--badge',
action='store',
type=int,
metavar='BADGE',
dest='badge',
default=1,
help='Badge displayed on application icon.',
)
parser.add_argument(
'-c', '--contentavailable',
action='store_true',
dest='content_available',
default=False,
help='Use content-available. (Support for iOS7 or higher)',
)
parser.add_argument(
'-m', '--mutablecontent',
action='store_true',
dest='mutable_content',
default=False,
help='Use mutable-content. (Support for iOS9 or higher)',
)
def handle(self, *args, **options):
error = False
if options['device_tokens'] is None:
try:
raise ValueError('Please specify a device tokens (-t or --token)')
except ValueError as e:
error = True
logging.error(e)
if options['title'] is None:
try:
raise ValueError('Please input title in push notification (--title)')
except ValueError as e:
error = True
logging.error(e)
if error:
sys.exit()
device_tokens = list(filter(lambda device_token:
DeviceToken.objects.filter(device_token=device_token).count() > 0,
options['device_tokens']))
_ = map(lambda item: logging.warning('There is no match for the specified device token: {}'.format(item)),
list(set(options['device_tokens']) - set(device_tokens)))
try:
cert_file = CertFile.objects.get(target_mode=int(not options['sandbox']), is_use=True)
except ObjectDoesNotExist:
sys.exit(logging.error('Certificate file has not been uploaded'))
apns = APNs(use_sandbox=options['sandbox'], cert_file=CERT_FILE_UPLOAD_DIR + cert_file.filename, enhanced=True)
identifier = random.getrandbits(32)
payload_alert = PayloadAlert(title=options['title'], subtitle=options['subtitle'], body=options['body'])
payload = Payload(alert=payload_alert if payload_alert.body is not None else payload_alert.title,
sound=options['sound'],
badge=options['badge'],
content_available=options['content_available'],
mutable_content=options['mutable_content'])
_ = map(lambda device_token:
apns.gateway_server.send_notification(device_token, payload, identifier=identifier),
device_tokens)
|
Python
| 0
|
@@ -143,15 +143,8 @@
PNs,
- Frame,
Pay
@@ -162,16 +162,16 @@
adAlert%0A
+
from not
@@ -276,20 +276,8 @@
sys%0A
-import time%0A
%0A%0ACE
@@ -439,16 +439,16 @@
mand):%0A%0A
+
help
@@ -504,135 +504,8 @@
.'%0A%0A
- def __init__(self):%0A self.frame = Frame()%0A self.expiry = int(time.time() + 3600)%0A self.priority = 10%0A%0A
|
059a799b9c347b6abfcd2daa3678d98cd0884210
|
Add "no cover" to teardown() and handle_address_delete() on TiedModelRealtimeSignalProcessor. These are never called.
|
ovp_search/signals.py
|
ovp_search/signals.py
|
from django.db import models
from haystack import signals
from ovp_projects.models import Project
from ovp_organizations.models import Organization
from ovp_core.models import GoogleAddress
class TiedModelRealtimeSignalProcessor(signals.BaseSignalProcessor):
"""
TiedModelRealTimeSignalProcessor handles updates to a index tied to a model
We need to be able to detect changes to a model a rebuild another index,
such as detecting changes to GoogleAddress and updating the index
for projects and organizations.
"""
attach_to = [
(Project, 'handle_save', 'handle_delete'),
(Organization, 'handle_save', 'handle_delete'),
(GoogleAddress, 'handle_address_save', 'handle_address_delete'),
]
m2m = [
Project.causes.through, Project.skills.through, Organization.causes.through
]
def setup(self):
for item in self.attach_to:
models.signals.post_save.connect(getattr(self, item[1]), sender=item[0])
models.signals.post_delete.connect(getattr(self, item[1]), sender=item[0])
for item in self.m2m:
models.signals.m2m_changed.connect(self.handle_m2m, sender=item)
def teardown(self):
for item in self.attach_to:
models.signals.post_save.disconnect(getattr(self, item[1]), sender=item[0])
models.signals.post_delete.disconnect(getattr(self, item[1]), sender=item[0])
for item in self.m2m:
models.signals.m2m_changed.disconnect(self.handle_m2m, sender=item)
def handle_address_save(self, sender, instance, **kwargs):
""" Custom handler for address save """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_save(obj.__class__, obj)
def handle_address_delete(self, sender, instance, **kwargs):
""" Custom handler for address delete """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_delete(obj.__class__, obj)
def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """
self.handle_save(instance.__class__, instance)
def find_associated_with_address(self, instance):
""" Returns list with projects and organizations associated with given address """
objects = []
objects += list(Project.objects.filter(address=instance))
objects += list(Organization.objects.filter(address=instance))
return objects
|
Python
| 0
|
@@ -1131,27 +1131,70 @@
%0A%0A
-def teardown(self):
+# never really called%0A def teardown(self): # pragma: no cover
%0A
@@ -1713,32 +1713,87 @@
_class__, obj)%0A%0A
+ # this function is never really called on sqlite dbs%0A
def handle_add
@@ -1830,32 +1830,51 @@
ance, **kwargs):
+ # pragma: no cover
%0A %22%22%22 Custom
|
847d652c4ef179bbf94c0e322dc3277858fcf08c
|
Add h5cc shlib argument for correctly link hdf5 libraries
|
mesonbuild/dependencies/hdf5.py
|
mesonbuild/dependencies/hdf5.py
|
# Copyright 2013-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
import subprocess
import shutil
from pathlib import Path
from .. import mlog
from ..mesonlib import split_args, listify
from .base import (DependencyException, DependencyMethods, ExternalDependency, ExternalProgram,
PkgConfigDependency)
class HDF5Dependency(ExternalDependency):
def __init__(self, environment, kwargs):
language = kwargs.get('language', 'c')
super().__init__('hdf5', environment, kwargs, language=language)
kwargs['required'] = False
kwargs['silent'] = True
self.is_found = False
methods = listify(self.methods)
if language not in ('c', 'cpp', 'fortran'):
raise DependencyException('Language {} is not supported with HDF5.'.format(language))
if set([DependencyMethods.AUTO, DependencyMethods.PKGCONFIG]).intersection(methods):
pkgconfig_files = ['hdf5', 'hdf5-serial']
PCEXE = shutil.which('pkg-config')
if PCEXE:
# some distros put hdf5-1.2.3.pc with version number in .pc filename.
ret = subprocess.run([PCEXE, '--list-all'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
universal_newlines=True)
if ret.returncode == 0:
for pkg in ret.stdout.split('\n'):
if pkg.startswith(('hdf5')):
pkgconfig_files.append(pkg.split(' ', 1)[0])
pkgconfig_files = list(set(pkgconfig_files)) # dedupe
for pkg in pkgconfig_files:
pkgdep = PkgConfigDependency(pkg, environment, kwargs, language=self.language)
if not pkgdep.found():
continue
self.compile_args = pkgdep.get_compile_args()
# some broken pkgconfig don't actually list the full path to the needed includes
newinc = []
for arg in self.compile_args:
if arg.startswith('-I'):
stem = 'static' if kwargs.get('static', False) else 'shared'
if (Path(arg[2:]) / stem).is_dir():
newinc.append('-I' + str(Path(arg[2:]) / stem))
self.compile_args += newinc
# derive needed libraries by language
pd_link_args = pkgdep.get_link_args()
link_args = []
for larg in pd_link_args:
lpath = Path(larg)
# some pkg-config hdf5.pc (e.g. Ubuntu) don't include the commonly-used HL HDF5 libraries,
# so let's add them if they exist
# additionally, some pkgconfig HDF5 HL files are malformed so let's be sure to find HL anyway
if lpath.is_file():
hl = []
if language == 'cpp':
hl += ['_hl_cpp', '_cpp']
elif language == 'fortran':
hl += ['_hl_fortran', 'hl_fortran', '_fortran']
hl += ['_hl'] # C HL library, always needed
suffix = '.' + lpath.name.split('.', 1)[1] # in case of .dll.a
for h in hl:
hlfn = lpath.parent / (lpath.name.split('.', 1)[0] + h + suffix)
if hlfn.is_file():
link_args.append(str(hlfn))
# HDF5 C libs are required by other HDF5 languages
link_args.append(larg)
else:
link_args.append(larg)
self.link_args = link_args
self.version = pkgdep.get_version()
self.is_found = True
self.pcdep = pkgdep
return
if DependencyMethods.AUTO in methods:
wrappers = {'c': 'h5cc', 'cpp': 'h5c++', 'fortran': 'h5fc'}
comp_args = []
link_args = []
# have to always do C as well as desired language
for lang in set([language, 'c']):
prog = ExternalProgram(wrappers[lang], silent=True)
if not prog.found():
return
cmd = prog.get_command() + ['-show']
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, timeout=15)
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), p.stdout)
mlog.debug(mlog.bold('Standard error\n'), p.stderr)
return
args = split_args(p.stdout)
for arg in args[1:]:
if arg.startswith(('-I', '-f', '-D')) or arg == '-pthread':
comp_args.append(arg)
elif arg.startswith(('-L', '-l', '-Wl')):
link_args.append(arg)
elif Path(arg).is_file():
link_args.append(arg)
self.compile_args = comp_args
self.link_args = link_args
self.is_found = True
return
@staticmethod
def get_methods():
return [DependencyMethods.AUTO, DependencyMethods.PKGCONFIG]
|
Python
| 0.000001
|
@@ -4930,32 +4930,116 @@
return%0A
+ shlib_arg = '-noshlib' if kwargs.get('static', False) else '-shlib'%0A
@@ -5066,16 +5066,27 @@
nd() + %5B
+shlib_arg,
'-show'%5D
|
2f357ac185e7728e0a0afec6827500c78a4b2796
|
Update SavedModel example to use serialized tf Example. Change: 135378723
|
tensorflow/python/saved_model/example/saved_model_half_plus_two.py
|
tensorflow/python/saved_model/example/saved_model_half_plus_two.py
|
## Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports an example linear regression inference graph.
Exports a TensorFlow graph to /tmp/saved_model/half_plus_two/ based on the
SavedModel format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise SavedModel load and
execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils
def _generate_saved_model_for_half_plus_two(export_dir, as_text=False):
"""Generates SavedModel for half plus two.
Args:
export_dir: The directory to which the SavedModel should be written.
as_text: Writes the SavedModel protocol buffer in text format to disk.
"""
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Set up the model parameters as variables to exercise variable loading
# functionality upon restore.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Set up placeholders.
x = tf.placeholder(tf.float32, name="x")
y = tf.add(tf.mul(a, x), b, name="y")
# Set up the signature for regression with input and output tensor
# specification.
input_tensor = meta_graph_pb2.TensorInfo()
input_tensor.name = x.name
signature_inputs = {"input": input_tensor}
output_tensor = meta_graph_pb2.TensorInfo()
output_tensor.name = y.name
signature_outputs = {"output": output_tensor}
signature_def = utils.build_signature_def(signature_inputs,
signature_outputs, "regression")
# Initialize all variables and then save the SavedModel.
sess.run(tf.initialize_all_variables())
builder.add_meta_graph_and_variables(
sess, [constants.TAG_SERVING],
signature_def_map={"regression": signature_def})
builder.save(as_text)
def main(_):
export_dir_pb = "/tmp/saved_model/half_plus_two"
_generate_saved_model_for_half_plus_two(export_dir_pb)
export_dir_pbtxt = "/tmp/saved_model/half_plus_two_pbtxt"
_generate_saved_model_for_half_plus_two(export_dir_pbtxt, as_text=True)
if __name__ == "__main__":
tf.app.run()
|
Python
| 0
|
@@ -1955,22 +1955,24 @@
%0A%0A #
-Set up
+Create a
placeho
@@ -1979,44 +1979,466 @@
lder
-s.%0A x = tf.placeholder(tf.float32
+ for serialized tensorflow.Example messages to be fed.%0A serialized_tf_example = tf.placeholder(tf.string, name=%22tf_example%22)%0A%0A # Parse the tensorflow.Example looking for a feature named %22x%22 with a single%0A # floating point value.%0A feature_configs = %7B%22x%22: tf.FixedLenFeature(%5B1%5D, dtype=tf.float32),%7D%0A tf_example = tf.parse_example(serialized_tf_example, feature_configs)%0A # Use tf.identity() to assign name%0A x = tf.identity(tf_example%5B%22x%22%5D
, na
@@ -2651,17 +2651,37 @@
.name =
-x
+serialized_tf_example
.name%0A
@@ -2799,17 +2799,30 @@
.name =
-y
+tf.identity(y)
.name%0A
|
a07db1f5d05bf89cfd34c8229813eb7805c35b6a
|
Move general SOCKSError after more specific failures
|
ooni/errors.py
|
ooni/errors.py
|
from twisted.internet.defer import CancelledError
from twisted.internet.defer import TimeoutError as DeferTimeoutError
from twisted.web._newclient import ResponseNeverReceived
from twisted.web.error import Error
from twisted.internet.error import ConnectionRefusedError, TCPTimedOutError
from twisted.internet.error import DNSLookupError, ConnectError, ConnectionLost
from twisted.internet.error import TimeoutError as GenericTimeoutError
from twisted.internet.error import ProcessDone, ConnectionDone
from twisted.python import usage
from txsocksx.errors import SOCKSError
from txsocksx.errors import MethodsNotAcceptedError, AddressNotSupported
from txsocksx.errors import ConnectionError, NetworkUnreachable
from txsocksx.errors import ConnectionLostEarly, ConnectionNotAllowed
from txsocksx.errors import NoAcceptableMethods, ServerFailure
from txsocksx.errors import HostUnreachable, ConnectionRefused
from txsocksx.errors import TTLExpired, CommandNotSupported
from socket import gaierror
known_failures = [
(ConnectionRefusedError, 'connection_refused_error'),
(ConnectionLost, 'connection_lost_error'),
(CancelledError, 'task_timed_out'),
(gaierror, 'address_family_not_supported_error'),
(DNSLookupError, 'dns_lookup_error'),
(TCPTimedOutError, 'tcp_timed_out_error'),
(ResponseNeverReceived, 'response_never_received'),
(DeferTimeoutError, 'deferred_timeout_error'),
(GenericTimeoutError, 'generic_timeout_error'),
(SOCKSError, 'socks_error'),
(MethodsNotAcceptedError, 'socks_methods_not_supported'),
(AddressNotSupported, 'socks_address_not_supported'),
(NetworkUnreachable, 'socks_network_unreachable'),
(ConnectionError, 'socks_connect_error'),
(ConnectionLostEarly, 'socks_connection_lost_early'),
(ConnectionNotAllowed, 'socks_connection_not_allowed'),
(NoAcceptableMethods, 'socks_no_acceptable_methods'),
(ServerFailure, 'socks_server_failure'),
(HostUnreachable, 'socks_host_unreachable'),
(ConnectionRefused, 'socks_connection_refused'),
(TTLExpired, 'socks_ttl_expired'),
(CommandNotSupported, 'socks_command_not_supported'),
(ProcessDone, 'process_done'),
(ConnectionDone, 'connection_done'),
(ConnectError, 'connect_error'),
]
def handleAllFailures(failure):
"""
Trap all the known Failures and we return a string that
represents the failure. Any unknown Failures will be reraised and
returned by failure.trap().
"""
failure.trap(*[failure_type for failure_type, _ in known_failures])
return failureToString(failure)
def failureToString(failure):
"""
Given a failure instance return a string representing the kind of error
that occurred.
Args:
failure: a :class:twisted.internet.error instance
Returns:
A string representing the HTTP response error message.
"""
for failure_type, failure_string in known_failures:
if isinstance(failure.value, failure_type):
if failure_string:
return failure_string
else:
# Failure without a corresponding failure message
return 'unknown_failure %s' % str(failure.value)
class DirectorException(Exception):
pass
class UnableToStartTor(DirectorException):
pass
class InvalidOONIBCollectorAddress(Exception):
pass
class InvalidOONIBBouncerAddress(Exception):
pass
class AllReportersFailed(Exception):
pass
class GeoIPDataFilesNotFound(Exception):
pass
class ReportNotCreated(Exception):
pass
class ReportAlreadyClosed(Exception):
pass
class TorStateNotFound(Exception):
pass
class TorControlPortNotFound(Exception):
pass
class InsufficientPrivileges(Exception):
pass
class ProbeIPUnknown(Exception):
pass
class NoMoreReporters(Exception):
pass
class TorNotRunning(Exception):
pass
class OONIBError(Exception):
pass
class OONIBInvalidRequest(OONIBError):
pass
class OONIBReportError(OONIBError):
pass
class OONIBReportUpdateError(OONIBReportError):
pass
class OONIBReportCreationError(OONIBReportError):
pass
class OONIBTestDetailsLookupError(OONIBReportError):
pass
class OONIBInputError(OONIBError):
pass
class OONIBInputDescriptorNotFound(OONIBInputError):
pass
class UnableToLoadDeckInput(Exception):
pass
class CouldNotFindTestHelper(Exception):
pass
class CouldNotFindTestCollector(Exception):
pass
class NetTestNotFound(Exception):
pass
class MissingRequiredOption(Exception):
def __init__(self, message, net_test_loader):
super(MissingRequiredOption, self).__init__()
self.net_test_loader = net_test_loader
self.message = message
def __str__(self):
return ','.join(self.message)
class OONIUsageError(usage.UsageError):
def __init__(self, net_test_loader):
super(OONIUsageError, self).__init__()
self.net_test_loader = net_test_loader
class FailureToLoadNetTest(Exception):
pass
class NoPostProcessor(Exception):
pass
class InvalidOption(Exception):
pass
class IncoherentOptions(Exception):
def __init__(self, first_options, second_options):
super(IncoherentOptions, self).__init__()
self.message = "%s is different to %s" % (first_options, second_options)
def __str__(self):
return self.message
class TaskTimedOut(Exception):
pass
class InvalidInputFile(Exception):
pass
class ReporterException(Exception):
pass
class InvalidDestination(ReporterException):
pass
class ReportLogExists(Exception):
pass
class InvalidConfigFile(Exception):
pass
class ConfigFileIncoherent(Exception):
pass
def get_error(error_key):
if error_key == 'test-helpers-key-missing':
return CouldNotFindTestHelper
if error_key == 'input-descriptor-not-found':
return OONIBInputDescriptorNotFound
if error_key == 'invalid-request':
return OONIBInvalidRequest
elif isinstance(error_key, int):
return Error("%d" % error_key)
else:
return OONIBError
class IfaceError(Exception):
pass
class ProtocolNotRegistered(Exception):
pass
class ProtocolAlreadyRegistered(Exception):
pass
class LibraryNotInstalledError(Exception):
pass
|
Python
| 0
|
@@ -1463,41 +1463,8 @@
'),%0A
- (SOCKSError, 'socks_error'),%0A
@@ -2096,24 +2096,57 @@
upported'),%0A
+ (SOCKSError, 'socks_error'),%0A
(Process
|
e8a29a6af8856c2957ed93a2da31b62916b6694d
|
add git support and support passing project_name in VersionControl __init__
|
deps/__init__.py
|
deps/__init__.py
|
import os
import sys
import shutil
import logging
import urlparse
logger = logging.getLogger('deps')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
class MissingDependency(Exception):
pass
class VersionControl(object):
def __init__(self, url, root, app_name='', project_name=''):
self.url = url
self.root = root
tail = os.path.basename((urlparse.urlparse(url)[2]).rstrip('/'))
if not app_name:
self.app_name = tail
self.project_name = tail
else:
self.app_name = app_name
self.project_name = tail
self.python_path = os.path.join(
self.root,
self.project_name,
)
self.path = os.path.join(
self.root,
self.project_name,
self.app_name,
)
def __repr__(self):
return "<VersionControl: %s>" % self.app_name
def add_to_python_path(self, position):
if not os.path.exists(self.path):
raise MissingDependency('%s does not exist. Run "./manage.py up" to retrieve this dependency' % self.app_name)
sys.path.insert(position, self.python_path)
class HG(VersionControl):
def checkout(self):
logger.info('checking out %s' % self.project_name)
os.system('hg clone %s %s' % (self.url, self.python_path))
def up(self):
logger.info('%s' % self)
if not os.path.exists(self.path):
self.checkout()
os.chdir(self.python_path)
os.system('hg pull --update')
class SVN(VersionControl):
def checkout(self):
logger.info('checking out %s' % self.project_name)
os.system('svn co %s %s' % (self.url, self.path))
def up(self):
logger.info('%s' % self)
if not os.path.exists(self.path):
self.checkout()
os.system('svn up %s' % self.path)
def add_all_to_path(settings, auto_update=False, position=1):
for dependency in settings.DEPENDENCIES:
try:
dependency.add_to_python_path(position)
except MissingDependency:
if auto_update:
dependency.up()
else:
raise
dependency.add_to_python_path(position)
|
Python
| 0
|
@@ -292,18 +292,20 @@
pp_name=
-''
+None
, projec
@@ -315,10 +315,12 @@
ame=
-''
+None
):%0A
@@ -451,75 +451,45 @@
-if not app_name:%0A self.app_name = tail%0A self.
+self.project_name = project_name and
proj
@@ -501,33 +501,16 @@
ame
-=
+or
tail%0A
- else:%0A
@@ -541,40 +541,24 @@
name
-%0A self.project
+ and app
_name
-=
+or
tai
@@ -1514,24 +1514,391 @@
-update')%0A%0A%0A
+class GIT(VersionControl):%0A def checkout(self):%0A logger.info('checking out %25s' %25 self.project_name)%0A os.system('git clone %25s %25s' %25 (self.url, self.python_path))%0A%0A def up(self):%0A logger.info('%25s' %25 self)%0A if not os.path.exists(self.path):%0A self.checkout()%0A os.chdir(self.python_path)%0A os.system('git pull')%0A%0A%0A
class SVN(Ve
|
b2ed2050fdab7ba1052e33786c0a0868333114c4
|
Update treeviz_example.py
|
open_spiel/python/examples/treeviz_example.py
|
open_spiel/python/examples/treeviz_example.py
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game tree visualization example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import pyspiel
from open_spiel.python.visualizations import treeviz
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_string("out", "/tmp/gametree.png", "Name of output file, e.g., "
"[*.png|*.pdf].")
flags.DEFINE_enum("prog", "dot", ["dot", "neato", "circo"], "Graphviz layout.")
flags.DEFINE_boolean("group_infosets", False, "Whether to group infosets.")
flags.DEFINE_boolean("group_terminal", False, "Whether to group terminal nodes.")
flags.DEFINE_boolean("verbose", False, "Wether to print verbose output.")
def _zero_sum_node_decorator(state):
"""Custom node decorator that only shows the return of the first player."""
attrs = treeviz.default_node_decorator(state) # get default attributes
if state.is_terminal():
attrs["label"] = str(int(state.returns()[0]))
return attrs
def main(argv):
del argv
game = pyspiel.load_game(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
logging.warn("%s is not turn-based. Trying to reload game as turn-based.",
FLAGS.game)
game = pyspiel.load_game_as_turn_based(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must be sequential, not {}".format(
game_type.dynamics))
if (game_type.utility == pyspiel.GameType.Utility.ZERO_SUM and
game.num_players() == 2):
logging.info("Game is zero-sum: only showing first-player's returns.")
gametree = treeviz.GameTree(
game,
node_decorator=_zero_sum_node_decorator,
group_infosets=FLAGS.group_infosets,
group_terminal=FLAGS.group_terminal)
else:
gametree = treeviz.GameTree(game) # use default decorators
if FLAGS.verbose:
logging.info("Game tree:\n%s", gametree.to_string())
gametree.draw(FLAGS.out, prog=FLAGS.prog)
logging.info("Game tree saved to file: %s", FLAGS.out)
if __name__ == "__main__":
app.run(main)
|
Python
| 0.000001
|
@@ -1371,16 +1371,17 @@
alse, %22W
+h
ether to
|
fd58dcc83080449219944426281c6d8e5f9e5f60
|
change key into keyIndex for invalid path issues.
|
minicrawler/spider/basespider.py
|
minicrawler/spider/basespider.py
|
from ..provider.webbrowser import WebBrowser
from ..provider.requestor import Requestor
import csv
import os
import pickle
import datetime
import shutil
import numpy as np
import pandas as pd
class BaseSpider():
# member variables
_provider = None
_outputFolder = 'output'
_keyList = []
_lastKeyPath = None
_isRunning = False
# constructor
def __init__(self, url, lastKeyPath, asyncPage = False, outputFolder = None, broswerRest = None):
self._lastKeyPath = lastKeyPath
# if it's a async page
if(asyncPage):
self._provider = WebBrowser(url, broswerRest)
else:
self._provider = Requestor(url)
if(outputFolder):
self._outputFolder = outputFolder
self._keyList = self._getTotalKeys()
# print (self._keyList)
def crawl(self, forceRestart = False, startIndex = 0):
if(forceRestart):
self._setKeyIndex(startIndex)
#
if(os.path.exists(self._outputFolder)):
shutil.rmtree(self._outputFolder)
# start
# test finish # self._setKeyIndex(25230)
if(self._isRunning):
print('It was already running...')
else:
self._isRunning = True
# go though all crawled pages
self._ignoreCrawledPages()
self._getTotalPages()
self._provider.quit()
self._isRunning = False
# NOTICE: needs to be implemented in subclass
def _getTotalKeys(self):
# html = self.provider.getContent()
return []
# NOTICE: needs to be implemented in subclass
def _getCurrentPage(self, index, key):
return None
# NOTICE: needs to be implemented in subclass for async pages
def _gotoNextPage(self, index, key):
pass
def _ignoreCrawledPages(self):
index = self._getKeyIndex()
for i in range(index):
print('go to page {:d}'.format(i + 1))
self._gotoNextPage(i, self._keyList[i])
def _getTotalPages(self):
index = self._getKeyIndex()
totalLeft = len(self._keyList) - index
date = datetime.datetime.today().strftime("%m/%d/%Y %H:%S")
print ('[{!s}] total left items: [{:d}/{:d}]'.format(date, totalLeft, len(self._keyList)))
for i in range(totalLeft):
self._getAndSave()
def _getKeyIndex(self):
index = 0
# get current index
try:
with open(self._lastKeyPath, 'rb') as f:
index = pickle.load(f)
except Exception as e:
print (e)
return index
def _setKeyIndex(self, index):
# save current index
with open(self._lastKeyPath, 'wb') as f:
pickle.dump(index, f)
def _getAndSave(self):
keyIndex = self._getKeyIndex()
# just for test # keyIndex = 6
# https://pyformat.info/
print('trying to get the page {:d}...'.format(keyIndex + 1))
if(self._keyList and self._keyList[keyIndex]):
key = self._keyList[keyIndex]
result = self._getCurrentPage(keyIndex, key)
if(result):
# print (result)
# save to .xlsx
self._saveWorkbook(result, key)
print('getting {!s} done!'.format(key))
# save current index as an existing one
keyIndex += 1
# save current index
self._setKeyIndex(keyIndex)
# go to the next page
self._gotoNextPage(keyIndex, key)
date = datetime.datetime.today().strftime("%m/%d/%Y %H:%S")
string = '[{!s}] next [{:d}/{:d}]'.format(date, keyIndex + 1, len(self._keyList))
# last items
if(keyIndex >= len(self._keyList)):
string = '[{!s}] all finished!'.format(date)
print(string)
def _saveWorkbook(self, items, key):
path = '/'.join([self._outputFolder, key + '.csv'])
# check the folder
if not os.path.exists(self._outputFolder):
os.makedirs(self._outputFolder)
print('"./{!s}/" does not exist, making a new one...'.format(self._outputFolder))
#
df = pd.DataFrame(items)
# df = df.set_index('institute')
if(os.path.isfile(path)):
# append model
with open(path, 'a') as f:
df.to_csv(f, header=False, index=False)
else:
df.to_csv(path, index=False)
#
|
Python
| 0
|
@@ -2886,12 +2886,74 @@
to .
-xlsx
+csv%0A # use keyIndex because current key may be invalid path
%0A
@@ -2983,24 +2983,29 @@
(result, key
+Index
)%0A pr
@@ -3594,19 +3594,32 @@
Folder,
+'
key
+' + str(key)
+ '.csv
|
9cbdc64bcc1144b8ca7d32d08aa5d36afa7f1e73
|
index command - reflected _log_id_short change
|
pageobject/commands/index.py
|
pageobject/commands/index.py
|
def index(self, value):
"""
Return index of the first child containing the specified value.
:param str value: text value to look for
:returns: index of the first child containing the specified value
:rtype: int
:raises ValueError: if the value is not found
"""
self.logger.info('getting index of "{}" within {}'.format(value, self._log_id_short))
self.logger.debug('getting index of "{}" within page object; {}'.format(value, self._log_id_long))
index = self.text_values.index(value)
self.logger.info('index of "{}" within {} is {}'.format(value, self._log_id_short, index))
self.logger.debug('index of "{}" within page object is {}; {}'.format(value, index, self._log_id_long))
return index
|
Python
| 0.000001
|
@@ -314,32 +314,37 @@
etting index of
+text
%22%7B%7D%22 within %7B%7D'.
@@ -339,16 +339,33 @@
within
+page object list
%7B%7D'.form
@@ -427,32 +427,37 @@
etting index of
+text
%22%7B%7D%22 within page
@@ -463,16 +463,21 @@
e object
+ list
; %7B%7D'.fo
@@ -573,32 +573,37 @@
.info('index of
+text
%22%7B%7D%22 within %7B%7D i
@@ -598,16 +598,33 @@
within
+page object list
%7B%7D is %7B%7D
@@ -699,16 +699,21 @@
ndex of
+text
%22%7B%7D%22 wit
|
32fdc3dfaaa8c8598de04a57141c5995be5409ca
|
Fix F.rrelu test unstability
|
tests/chainer_tests/functions_tests/activation_tests/test_rrelu.py
|
tests/chainer_tests/functions_tests/activation_tests/test_rrelu.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
@testing.parameterize(*testing.product({
'train': [True, False],
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestRReLU(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
# Assumption l < u
self.l = numpy.random.uniform(0, 1)
self.u = numpy.random.uniform(0, 1)
if self.l >= self.u:
self.l, self.u = self.u, self.l
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
# cast self.r later because check_backward casts only x
self.r = numpy.random.uniform(self.l, self.u, self.shape)
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
r = self.r.astype(x.dtype)
r = device.send(r)
with chainer.using_config('train', self.train):
y = functions.rrelu(x, l=self.l, u=self.u, r=r)
return y,
def forward_expected(self, inputs):
x, = inputs
r = self.r.astype(self.dtype)
if self.train:
expected = numpy.where(x >= 0, x, x * r)
else:
r_test = numpy.mean([self.l, self.u]).astype(self.dtype)
expected = numpy.where(x >= 0, x, x * r_test)
return expected,
@testing.parameterize(*testing.product({
'specify_r': [True, False],
'return_r': [True, False],
'train': [True, False],
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestRReLUR(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Assumption l < u
self.l = numpy.random.uniform(0, 1)
self.u = numpy.random.uniform(0, 1)
if self.l >= self.u:
self.l, self.u = self.u, self.l
self.r = numpy.random.uniform(
self.l, self.u, self.x.shape).astype(self.x.dtype)
def _check(self):
r = self.r if self.specify_r else None
return_r = self.return_r
with chainer.using_config('train', self.train):
out = functions.rrelu(
self.x, self.l, self.u, r=r, return_r=return_r)
if not return_r:
return
out, out_r = out
assert isinstance(out_r, type(out.array))
if r is None:
assert out_r.shape == out.array.shape
else:
if self.train:
assert out_r is r
def test_cpu(self):
with chainer.using_config('use_ideep', 'never'):
self._check()
@attr.gpu
def test_gpu(self):
self.x = cuda.to_gpu(self.x)
self.r = cuda.to_gpu(self.r)
self._check()
testing.run_module(__name__, __file__)
|
Python
| 0.000005
|
@@ -699,44 +699,8 @@
):%0A%0A
- dodge_nondifferentiable = True%0A%0A
@@ -1390,32 +1390,150 @@
ype(self.dtype)%0A
+ if self.test_name in ('test_backward', 'test_double_backward'):%0A x%5B(-0.05 %3C x) & (x %3C 0.05)%5D = 0.5%0A
return x
|
6143e6b015ed0435dc747b8d4242d47dca79c7a8
|
improve busydialog handling
|
lib/kodi65/busyhandler.py
|
lib/kodi65/busyhandler.py
|
# -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import xbmc
from kodi65 import utils
import traceback
from functools import wraps
class BusyHandler(object):
"""
Class to deal with busydialog handling
"""
def __init__(self, *args, **kwargs):
self.busy = 0
self.enabled = True
def enable(self):
"""
Enables busydialog handling
"""
self.enabled = True
def disable(self):
"""
Disables busydialog handling
"""
self.enabled = False
def show_busy(self):
"""
Increase busycounter and open busydialog if needed
"""
if not self.enabled:
return None
if self.busy == 0:
xbmc.executebuiltin("ActivateWindow(busydialog)")
self.busy += 1
def hide_busy(self):
"""
Decrease busycounter and close busydialog if needed
"""
if not self.enabled:
return None
self.busy = max(0, self.busy - 1)
if self.busy == 0:
xbmc.executebuiltin("Dialog.Close(busydialog)")
def set_busy(self, func):
"""
Decorator to show busy dialog while function is running
"""
@wraps(func)
def decorator(cls, *args, **kwargs):
self.show_busy()
result = None
try:
result = func(cls, *args, **kwargs)
except Exception:
utils.log(traceback.format_exc())
utils.notify("Error", "please contact add-on author")
finally:
self.hide_busy()
return result
return decorator
busyhandler = BusyHandler()
|
Python
| 0.000001
|
@@ -150,16 +150,19 @@
ort xbmc
+gui
%0Afrom ko
@@ -394,32 +394,75 @@
f.enabled = True
+%0A self.dialog = xbmcgui.DialogBusy()
%0A%0A def enable
@@ -876,80 +876,125 @@
-xbmc.executebuiltin(%22ActivateWindow(busydialog)%22)%0A self.busy += 1
+self.dialog.create()%0A self.busy += 1%0A%0A def set_progress(self, percent):%0A self.dialog.update(percent)
%0A%0A
@@ -1238,54 +1238,26 @@
-xbmc.executebuiltin(%22D
+self.d
ialog.
-C
+c
lose(
-busydialog)%22
)%0A%0A
|
7f48ffa31e8aebffcb5be14ba26fe51dda586519
|
add branch parsing
|
PyAnalysisTools/ROOTUtils/FileHandle.py
|
PyAnalysisTools/ROOTUtils/FileHandle.py
|
__author__ = 'marcusmorgenstern'
__mail__ = ''
import os
import re
from ROOT import TFile
from PyAnalysisTools.base import _logger, InvalidInputError
_memoized = {}
def get_id_tuple(f, args, kwargs, mark=object()):
l = [id(f)]
for arg in args:
l.append(id(arg))
l.append(id(mark))
for k, v in kwargs:
l.append(k)
l.append(id(v))
return tuple(l)
def memoize(f):
"""
Some basic memoizer
"""
def memoized(*args, **kwargs):
key = get_id_tuple(f, args, kwargs)
if key not in _memoized:
_memoized[key] = f(*args, **kwargs)
return _memoized[key]
return memoized
@memoize
class FileHandle(object):
def __init__(self, file_name, path='./'):
self.file_name = file_name
self.path = path
self.absFName = os.path.join(self.path, self.file_name)
self.open()
self.process = self.parse_process()
def open(self):
if not os.path.exists(self.absFName):
raise ValueError("File " + os.path.join(self.path, self.file_name) + " does not exist.")
self.tfile = TFile.Open(os.path.join(self.path, self.file_name), 'READ')
def parse_process(self):
process_name = self.file_name.split("-")[-1].split(".")[0]
if process_name.isdigit():
return "Data"
return process_name
def get_directory(self, directory):
if directory is None:
return self.tfile
try:
return self.tfile.Get(directory)
except Exception as e:
print str(e)
def get_objects(self):
objects = []
for obj in self.tfile.GetListOfKeys():
objects.append(self.tfile.Get(obj.GetName()))
return objects
def get_objects_by_type(self, typename):
obj = self.get_objects()
obj = filter(lambda t: t.InheritsFrom(typename), obj)
return obj
def get_objects_by_pattern(self, pattern, tdirectory=None):
tdir = self.get_directory(tdirectory)
objects = []
pattern = re.compile(pattern)
for key in tdir.GetListOfKeys():
if re.search(pattern, key.GetName()):
objects.append(tdir.Get(key.GetName()))
if len(objects) == 0:
_logger.warning("Could not find objects matching %s in %s" % (pattern, tdir.GetName()))
return objects
def get_object_by_name(self, obj_name, tdirectory=None):
tdir = self.tfile
if tdirectory:
try:
tdir = self.get_object_by_name(tdirectory)
except ValueError as e:
raise e
obj = tdir.Get(obj_name)
if not obj.__nonzero__():
raise ValueError("Object " + obj_name + " does not exist in file " + os.path.join(self.path, self.file_name))
return obj
def get_number_of_total_events(self):
try:
cutflow_hist = self.get_object_by_name("Nominal/cutflow_DxAOD")
return cutflow_hist.GetBinContent(1)
except ValueError as e:
_logger.error("Unable to parse cutflow Nominal/DxAOD from file %s" % self.file_name)
raise e
def fetch_and_link_hist_to_tree(self, tree_name, hist, var_name, cut_string=""):
tree = self.get_object_by_name(tree_name)
_logger.debug("Parsed tree %s from file %s containing %i entries" % (tree_name, self.file_name, tree.GetEntries()))
if cut_string is None:
cut_string = ""
n_selected_events = tree.Project(hist.GetName(), var_name, cut_string)
_logger.debug("Selected %i events from tree %s for distribution %s and cut %s." %(n_selected_events,
tree_name,
var_name,
cut_string))
if n_selected_events == -1:
_logger.error("Unable to project %s from tree %s with cut %s" % (var_name, tree_name, cut_string))
raise RuntimeError("TTree::Project failed")
return hist
@staticmethod
def release_object_from_file(obj):
obj.SetDirectory(0)
|
Python
| 0.000001
|
@@ -2375,32 +2375,451 @@
return objects%0A%0A
+ def get_branch_names_from_tree(self, tree_name, tdirectory=None, pattern=%22.*%22):%0A tree = self.get_object_by_name(tree_name, tdirectory)%0A pattern = re.compile(pattern)%0A branch_names = %5B%5D%0A for branch in tree.GetListOfBranches():%0A print branch%0A if re.search(pattern, branch.GetName()):%0A branch_names.append(branch.GetName())%0A return branch_names%0A%0A
def get_obje
|
99eca228811022281da8c93123d7562e5e5c6acb
|
Update recommender_system.py
|
lib/recommender_system.py
|
lib/recommender_system.py
|
#!/usr/bin/env python
"""
This is a module that contains the main class and functionalities of the recommender systems.
"""
import numpy
from lib.content_based import ContentBased
from lib.evaluator import Evaluator
from lib.LDA import LDARecommender
from util.data_parser import DataParser
from util.recommender_configuer import RecommenderConfiguration
class RecommenderSystem(object):
"""
A class that will combine the content-based and collaborative-filtering,
in order to provide the main functionalities of recommendations.
"""
def __init__(self):
"""
Constructor of the RecommenderSystem.
"""
DataParser.process()
self.ratings = DataParser.get_ratings_matrix()
# TODO: split abstracts
self.abstracts = DataParser.get_abstracts().values()
self.config = RecommenderConfiguration()
self.n_factors = self.config.get_hyperparameters()['n_factors']
self.n_iterations = self.config.get_options()['n_iterations']
# self.content_based = ContentBased(self.abstracts, self.n_factors, self.n_iterations)
if self.config.get_content_based() == 'LDA':
self.content_based = LDARecommender(self.abstracts, self.n_factors, self.n_iterations)
elif self.config.get_content_based() == 'LDA2Vec':
raise NotImplemented('LDA2Vec is not yet implemented.')
else:
raise NameError("Not a valid content based " + self.config.get_content_based())
self.hyperparameters = self.config.get_hyperparameters()
if self.config.get_collaborative_filtering() == 'ALS':
# self.collaborative_filtering = CollaborativeFiltering(ratings, self.n_factors,
# self.hyperparameters['collaborative-filtering-lambda'])
pass
else:
raise NameError("Not a valid collaborative filtering " + self.config.get_collaborative_filtering())
if self.config.get_error_metric() == 'RMS':
# TODO: initialize with abstracts
self.evaluator = Evaluator(self.ratings)
else:
raise NameError("Not a valid error metric " + self.config.get_error_metric())
def process(self):
"""
Process an iteration of the algorithm on the given data.
"""
self.content_based.train()
theta = self.content_based.get_word_distribution()
# TODO: Use collaborative filtering and evaluator
# u, v = self.collaborative_filtering.train(theta)
error = self.evaluator.get_rmse(theta)
return error
def recommend_items(self, user_id, num_recommendations=10):
"""
Get recommendations for a user.
@param(int) user_id: The id of the user.
@param(int) num_recommendations: The number of recommended items.
@returns(list) a list of the best recommendations for a given user_id.
"""
pass
|
Python
| 0
|
@@ -1006,34 +1006,32 @@
ations'%5D%0A
- #
self.content_ba
|
767a50052895cf10386f01bab83941a2141c30f1
|
fix json test and add json from string test
|
tests/python_tests/datasource_test.py
|
tests/python_tests/datasource_test.py
|
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path
import os, mapnik2
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def test_field_listing():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
fields = lyr.datasource.fields()
eq_(fields, ['AREA', 'EAS_ID', 'PRFEDEA'])
def test_total_feature_count_shp():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
num_feats = len(features)
eq_(num_feats, 10)
def test_total_feature_count_json():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Ogr(file='../data/json/points.json',layer_by_index=0)
features = lyr.datasource.all_features()
num_feats = len(features)
eq_(num_feats, 3)
def test_feature_envelope():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
for feat in features:
env = feat.envelope()
contains = lyr.envelope().contains(env)
eq_(contains, True)
intersects = lyr.envelope().contains(env)
eq_(intersects, True)
def test_feature_attributes():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
feat = features[0]
attrs = {'PRFEDEA': u'35043411', 'EAS_ID': 168, 'AREA': 215229.266}
eq_(feat.attributes, attrs)
eq_(lyr.datasource.fields(),['AREA', 'EAS_ID', 'PRFEDEA'])
eq_(lyr.datasource.field_types(),[float,int,str])
|
Python
| 0.000008
|
@@ -953,17 +953,300 @@
_feats,
-3
+5)%0A%0Adef test_reading_json_from_string():%0A json = open('../data/json/points.json','r').read()%0A lyr = mapnik2.Layer('test')%0A lyr.datasource = mapnik2.Ogr(file=json,layer_by_index=0)%0A features = lyr.datasource.all_features()%0A num_feats = len(features)%0A eq_(num_feats, 5
)%0A %0Ad
|
05855c934624c667053635a8ab8679c54426e49f
|
Rewrite the initialization of Release.eol_date.
|
releases/migrations/0003_populate_release_eol_date.py
|
releases/migrations/0003_populate_release_eol_date.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
def set_eol_date(apps, schema_editor):
Release = apps.get_model('releases', 'Release')
# List of EOL dates for releases for which docs are published.
for version, eol_date in [
('1.4', datetime.date(2015, 10, 1)),
('1.5', datetime.date(2014, 9, 2)),
('1.6', datetime.date(2015, 4, 1)),
]:
Release.objects.filter(version=version).update(eol_date=eol_date)
def unset_eol_date(apps, schema_editor):
Release = apps.get_model('releases', 'Release')
Release.objects.update(eol_date=None)
class Migration(migrations.Migration):
dependencies = [
('releases', '0002_release_eol_date'),
]
operations = [
migrations.RunPython(set_eol_date, unset_eol_date),
]
|
Python
| 0
|
@@ -219,15 +219,15 @@
#
-List of
+Set the
EOL
@@ -235,95 +235,755 @@
date
-s for releases for which docs are published.%0A for version, eol_date in %5B%0A ('1
+ of all releases to the date of the following release%0A # except for the final one in the 0 series and in each 1.x series.%0A releases = list(Release.objects.all().order_by('major', 'minor', 'micro',%0A 'status', 'iteration'))%0A for previous, current in zip(releases%5B:-1%5D, releases%5B1:%5D):%0A if current.major != previous.major:%0A continue%0A if current.major == 1 and previous.minor != current.minor:%0A continue%0A previous.eol_date = current.date%0A previous.save()%0A # Set the EOL date of final releases the 0 series and in each 1.x series.%0A for version, eol_date in %5B%0A ('0.96.5', datetime.date(2008, 9, 3)), # 1.0 release%0A ('1.0
.4',
@@ -1004,33 +1004,326 @@
(201
+0,
5, 1
-0, 1)),%0A ('1.5
+7)), # 1.2 release%0A ('1.1.4', datetime.date(2011, 3, 23)), # 1.3 release%0A ('1.2.7', datetime.date(2012, 3, 23)), # 1.4 release%0A ('1.3.7', datetime.date(2013, 2, 26)), # 1.5 release%0A ('1.4.22', datetime.date(2015, 10, 1)), # end of LTS support%0A ('1.5.12
', d
@@ -1348,16 +1348,35 @@
9, 2)),
+ # 1.7 release
%0A
@@ -1381,16 +1381,19 @@
('1.6
+.11
', datet
@@ -1418,15 +1418,173 @@
1)),
-%0A %5D:
+ # 1.8 release%0A # 1.7.10 and 1.8.5 are still supported at the time of writing.%0A %5D:%0A # This patterns ignores missing releases e.g. during tests.
%0A
@@ -1655,16 +1655,17 @@
_date)%0A%0A
+%0A
def unse
|
e26d30a0e88cb1100e0b285b394e94ce8c48d4f3
|
fix the incorrect plot lasso tool
|
src/scripts/lasso_enet_var_select.py
|
src/scripts/lasso_enet_var_select.py
|
#!/usr/bin/env python
################################################################################
# AUTHORS: Miguel Ibarra <miguelib@ufl.edu>
# Matt Thoburn <mthoburn@ufl.edu>
#
# DESCRIPTION: This runs an Elastic Net or Lasso Test on wide data
################################################################################
import os
import logging
import argparse
try:
from importlib import resources as ires
except ImportError:
import importlib_resources as ires
import itertools as it
import numpy as np
import rpy2.robjects as robjects
from argparse import RawDescriptionHelpFormatter
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage as STAP
from rpy2.rinterface_lib.embedded import RRuntimeError
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
def getOptions(myOpts=None):
description = """
The tool performs feature selection using LASSO/Elastic Net feature selection method.
"""
parser = argparse.ArgumentParser(
description=description, formatter_class=RawDescriptionHelpFormatter
)
# Standard Input
standard = parser.add_argument_group(
title="Standard input", description="Standard input for SECIM tools."
)
standard.add_argument(
"-i",
"--input",
dest="input",
action="store",
required=True,
help="Input dataset in wide format.",
)
standard.add_argument(
"-d",
"--design",
dest="design",
action="store",
required=True,
help="Design file.",
)
standard.add_argument(
"-id",
"--ID",
dest="uniqID",
action="store",
required=True,
help="Name of the column with unique" " identifiers.",
)
standard.add_argument(
"-g",
"--group",
dest="group",
action="store",
required=False,
default=False,
help="Name of the column" " with groups.",
)
# Tool Input
tool = parser.add_argument_group(title="Tool Especific")
tool.add_argument(
"-a",
"--alpha",
dest="alpha",
action="store",
required=True,
help="Alpha Value.",
)
# Tool Output
output = parser.add_argument_group(title="Required output")
output.add_argument(
"-c",
"--coefficients",
dest="coefficients",
action="store",
required=False,
help="Path of en" " coefficients file.",
)
output.add_argument(
"-f",
"--flags",
dest="flags",
action="store",
required=False,
help="Path of en flag file.",
)
output.add_argument(
"-p",
"--plots",
dest="plots",
action="store",
required=False,
help="Path of en coefficients file.",
)
parser.add_argument(
"-r",
"--rscript",
action="store",
required=False,
help="Full path to R script if not using package version",
)
args = parser.parse_args()
# Standardize paths
args.input = os.path.abspath(args.input)
args.plots = os.path.abspath(args.plots)
args.flags = os.path.abspath(args.flags)
args.design = os.path.abspath(args.design)
args.coefficients = os.path.abspath(args.coefficients)
return args
def main(args):
if not args.rscript:
with ires.path("secimtools.data", "lasso_enet.R") as R_path:
my_r_script_path = str(R_path)
else:
my_r_script_path = args.rscript
logger.info(f"R script path: {my_r_script_path}")
pandas2ri.activate()
with open(my_r_script_path, "r") as f:
rFile = f.read()
lassoEnetScript = STAP(rFile, "lasso_enet")
# Import data trough the interface module
dat = wideToDesign(
args.input, args.design, args.uniqID, group=args.group, logger=logger
)
dat.dropMissing()
# Get remaining Sample IDs for dataframe filtering of irrelevant columns
sample_ids = dat.wide.index.tolist()
group_col_name = dat.group
# Transpose Data so compounds are columns
dat.trans = dat.transpose()
group_data = dat.trans[group_col_name]
dat.trans.columns.name = ""
# Dropping nan columns from design
removed = dat.design[dat.design[dat.group] == "nan"]
dat.design = dat.design[dat.design[dat.group] != "nan"]
dat.trans.drop(removed.index.values, axis=0, inplace=True)
dat.trans = dat.trans.loc[:,sample_ids]
dat.trans['group'] = group_data
logger.info("{0} removed from analysis".format(removed.index.values))
dat.design.rename(columns={dat.group: "group"}, inplace=True)
dat.trans.rename(columns={dat.group: "group"}, inplace=True)
groupList = [
title for title, group in dat.design.groupby("group") if len(group.index) > 2
]
# Turn the group list into pairwise combinations
comboMatrix = np.array(list(it.combinations(groupList, 2)))
comboLength = len(comboMatrix)
correct_list_of_names = np.array(dat.trans.columns.values.tolist())
try:
returns = lassoEnetScript.lassoEN(
dat.trans,
dat.design,
args.uniqID,
correct_list_of_names,
comboMatrix,
comboLength,
args.alpha,
args.plots,
)
except RRuntimeError as e:
try:
e.context = {
'r_traceback': '\n'.join((r'unlist(traceback())'))
}
except Exception as traceback_exc:
e.context = {
'r_traceback':
'(an error occurred while getting traceback from R)',
'r_traceback_err': traceback_exc,
}
raise
robjects.r["write.table"](
returns[0],
file=args.coefficients,
sep="\t",
quote=False,
row_names=False,
col_names=True,
)
robjects.r["write.table"](
returns[1],
file=args.flags,
sep="\t",
quote=False,
row_names=False,
col_names=True,
)
# Finishing
logger.info("Script Complete!")
if __name__ == "__main__":
args = getOptions()
logger = logging.getLogger()
sl.setLogger(logger)
logger.info(
"Importing data with the folowing parameters: "
"\n\tWide: {0}"
"\n\tDesign:{1}"
"\n\tUniqID:{2}"
"\n\tAlpha: {3}".format(args.input, args.design, args.uniqID, args.alpha)
)
main(args)
|
Python
| 0.003436
|
@@ -2206,32 +2206,52 @@
dest=%22alpha%22,%0A
+ type=float,%0A
action=%22
|
c772bff9251c049bdbeee0b502537166aaf8ff07
|
Make sure that the threshold can cope with data ranges of 0 to 1
|
ui/visualizations/VolumeVisualizationSimple.py
|
ui/visualizations/VolumeVisualizationSimple.py
|
"""
VolumeVisualizationSimple
:Authors:
Berend Klein Haneveld
"""
from VolumeVisualization import VolumeVisualization
from VolumeVisualization import VisualizationTypeSimple
from vtk import vtkVolumeProperty
from vtk import vtkColorTransferFunction
from vtk import vtkPiecewiseFunction
from ui.widgets.SliderFloatWidget import SliderFloatWidget
from ui.widgets.ColorWidget import ColorChoiceWidget
from core.decorators import overrides
from PySide.QtGui import QWidget
from PySide.QtGui import QGridLayout
from PySide.QtCore import Qt
class VolumeVisualizationSimple(VolumeVisualization):
"""
VolumeVisualization subclass for a simple visualization.
"""
def __init__(self):
super(VolumeVisualizationSimple, self).__init__()
self.visualizationType = VisualizationTypeSimple
# Create the volume property
self.volProp = vtkVolumeProperty()
self.volProp.SetIndependentComponents(True)
self.volProp.SetInterpolationTypeToLinear()
self.volProp.ShadeOn()
self.volProp.SetAmbient(0.1)
self.volProp.SetDiffuse(0.9)
self.volProp.SetSpecular(0.2)
self.volProp.SetSpecularPower(10.0)
self.volProp.SetScalarOpacityUnitDistance(0.8919)
self.minimum = 0
self.maximum = 1
self.lowerBound = 0
self.upperBound = 1
colors = [[255, 139, 0], [0, 147, 255], [0, 255, 147], [213, 100, 255], [255, 75, 75]]
self.colors = map(lambda x: [x[0] / 255.0, x[1] / 255.0, x[2] / 255.0], colors)
self.color = self.colors[0]
@overrides(VolumeVisualization)
def getParameterWidget(self):
"""
Returns a widget with sliders / fields with which properties of this
volume property can be adjusted.
:rtype: QWidget
"""
self.lowerBoundSlider = SliderFloatWidget()
self.lowerBoundSlider.setName("Lower:")
self.lowerBoundSlider.setRange([self.minimum, self.maximum])
self.lowerBoundSlider.setValue(self.lowerBound)
self.lowerBoundSlider.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lowerBoundSlider.valueChanged.connect(self.valueChanged)
self.upperBoundSlider = SliderFloatWidget()
self.upperBoundSlider.setName("Upper:")
self.upperBoundSlider.setRange([self.minimum, self.maximum])
self.upperBoundSlider.setValue(self.upperBound)
self.upperBoundSlider.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.upperBoundSlider.valueChanged.connect(self.valueChanged)
self.colorChooser = ColorChoiceWidget()
self.colorChooser.setName("Color:")
self.colorChooser.setColors(self.colors)
self.colorChooser.setColor(self.color)
self.colorChooser.setMinimumHeight(self.upperBoundSlider.sizeHint().height())
self.colorChooser.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.colorChooser.valueChanged.connect(self.valueChanged)
layout = QGridLayout()
layout.setAlignment(Qt.AlignTop)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.lowerBoundSlider)
layout.addWidget(self.upperBoundSlider)
layout.addWidget(self.colorChooser)
widget = QWidget()
widget.setLayout(layout)
try:
from ColumnResizer import ColumnResizer
self.columnResizer = ColumnResizer()
self.columnResizer.addWidgetsFromLayout(self.lowerBoundSlider.layout(), 0)
self.columnResizer.addWidgetsFromLayout(self.upperBoundSlider.layout(), 0)
self.columnResizer.addWidgetsFromLayout(self.colorChooser.layout(), 0)
self.otherColRes = ColumnResizer()
self.otherColRes.addWidgetsFromLayout(self.lowerBoundSlider.layout(), 2)
self.otherColRes.addWidgetsFromLayout(self.upperBoundSlider.layout(), 2)
except Exception, e:
print e
return widget
@overrides(VolumeVisualization)
def setImageData(self, imageData):
if imageData is None:
self.minimum = 0.0
self.maximum = 1.0
self.lowerBound = self.minimum
self.upperBound = self.maximum
return
self.minimum, self.maximum = imageData.GetScalarRange()
self.lowerBound = self.minimum
self.upperBound = self.maximum
@overrides(VolumeVisualization)
def setMapper(self, mapper):
pass
@overrides(VolumeVisualization)
def shaderType(self):
return 0
@overrides(VolumeVisualization)
def updateTransferFunction(self):
r, g, b = self.color
# Transfer functions and properties
self.colorFunction = vtkColorTransferFunction()
self.colorFunction.AddRGBPoint(self.minimum, r, g, b)
self.colorFunction.AddRGBPoint(self.lowerBound, r, g, b)
self.colorFunction.AddRGBPoint(self.lowerBound+1, r, g, b)
self.colorFunction.AddRGBPoint(self.maximum, r, g, b)
self.opacityFunction = vtkPiecewiseFunction()
self.opacityFunction.AddPoint(self.minimum, 0)
self.opacityFunction.AddPoint(self.lowerBound, 0)
self.opacityFunction.AddPoint(self.lowerBound+1, 1)
self.opacityFunction.AddPoint(self.upperBound-1, 1)
self.opacityFunction.AddPoint(self.upperBound, 0)
self.opacityFunction.AddPoint(self.maximum, 0)
self.volProp.SetColor(self.colorFunction)
self.volProp.SetScalarOpacity(self.opacityFunction)
self.updatedTransferFunction.emit()
@overrides(VolumeVisualization)
def valueChanged(self, value):
"""
This method is called when the value of one of the sliders / fields is
adjusted. Argument value is unused. It is just there so that it can be
connected to the signals of the interface elements.
:type value: int
"""
self.lowerBound = min(self.lowerBoundSlider.value(), self.upperBoundSlider.value())
self.upperBound = max(self.lowerBoundSlider.value(), self.upperBoundSlider.value())
self.color = self.colorChooser.color
self.updateTransferFunction()
|
Python
| 0.999594
|
@@ -4273,128 +4273,8 @@
b)%0A
-%09%09self.colorFunction.AddRGBPoint(self.lowerBound, r, g, b)%0A%09%09self.colorFunction.AddRGBPoint(self.lowerBound+1, r, g, b)%0A
%09%09se
@@ -4325,16 +4325,16 @@
, g, b)%0A
+
%0A%09%09self.
@@ -4523,16 +4523,21 @@
erBound+
+0.000
1, 1)%0A%09%09
@@ -4585,10 +4585,8 @@
ound
--1
, 1)
@@ -4629,24 +4629,31 @@
f.upperBound
++0.0001
, 0)%0A%09%09self.
|
ab93ea01dacc0fbd63fac91b1afcf5af1b711c2f
|
correct latest migration
|
umklapp/migrations/0009_teller_hasleft.py
|
umklapp/migrations/0009_teller_hasleft.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-31 19:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('umklapp', '0008_auto_20160528_2332'),
]
operations = [
migrations.AddField(
model_name='teller',
name='hasLeft',
field=models.BooleanField(default=True),
preserve_default=False,
),
]
|
Python
| 0.000002
|
@@ -63,13 +63,13 @@
-31
-19:39
+20:38
%0Afro
@@ -424,11 +424,12 @@
ult=
-Tru
+Fals
e),%0A
|
dbd11fcc20f6770afa097e65d0a81c82b7f0c334
|
Update tests with access token
|
mnubo/tests/test_auth_manager.py
|
mnubo/tests/test_auth_manager.py
|
from mnubo.api_manager import APIManager
import requests
import json
from requests import Response
from mock import MagicMock
def test_auth_maneger_init():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
requests.post.assert_called_with('HOSTNAME/oauth/token?grant_type=client_credentials', headers={'content-type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ='})
auth.client_access_token = auth.fetch_client_access_token()
auth_authorization_header = auth.get_token_authorization_header()
authorization_header = auth.get_authorization_header()
api_url = auth.get_api_url()
auth_url = auth.get_auth_url()
assert auth.client_access_token == 'CLIENT_ACCESS_TOKEN'
assert auth_authorization_header == {'content-type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ='}
assert authorization_header == {'content-type': 'application/json', 'Authorization': 'Bearer CLIENT_ACCESS_TOKEN'}
assert api_url == 'HOSTNAME/api/v3/'
assert auth_url == 'HOSTNAME/oauth/token?grant_type=client_credentials'
def test_create_operations():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.post = MagicMock(return_value=response)
create = auth.post('ROUTE', None)
assert create == {"message": "SUCCESS"}
def test_put_operation():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.put = MagicMock(return_value=response)
put = auth.put('ROUTE', None)
assert put == {"message": "SUCCESS"}
def test_delete_operation():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.delete = MagicMock(return_value=response)
delete = auth.delete('ROUTE')
assert delete == {"message": "SUCCESS"}
|
Python
| 0
|
@@ -57,20 +57,24 @@
%0Aimport
-json
+datetime
%0Afrom re
@@ -125,16 +125,17 @@
icMock%0A%0A
+%0A
def test
@@ -217,39 +217,32 @@
%22access_token%22:%22
-CLIENT_
ACCESS_TOKEN%22,%22t
@@ -621,31 +621,24 @@
)%0A%0A auth.
-client_
access_token
@@ -651,23 +651,16 @@
h.fetch_
-client_
access_t
@@ -880,23 +880,16 @@
rt auth.
-client_
access_t
@@ -896,20 +896,19 @@
oken
+.token
== '
-CLIENT_
ACCE
@@ -917,16 +917,95 @@
_TOKEN'%0A
+ assert auth.access_token.expires_in == datetime.timedelta(seconds=3887999)%0A
asse
@@ -1246,23 +1246,16 @@
'Bearer
-CLIENT_
ACCESS_T
@@ -1429,32 +1429,32 @@
se = Response()%0A
+
response._co
@@ -1471,39 +1471,32 @@
%22access_token%22:%22
-CLIENT_
ACCESS_TOKEN%22,%22t
|
aa203b23eec8ff9ccbde3678f01f4ee14f43a09f
|
Fix typo introduced by code quality patch
|
src/storage/sqlite.py
|
src/storage/sqlite.py
|
import collections
from threading import current_thread, enumerate as threadingEnumerate, RLock
import Queue
import time
from helper_sql import *
from storage import InventoryStorage, InventoryItem
class SqliteInventory(InventoryStorage):
def __init__(self):
super(self.__class__, self).__init__()
self._inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
self._streams = collections.defaultdict(set) # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
self.lock = RLock() # Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
def __contains__(self, hash):
with self.lock:
if hash in self._inventory:
return True
return bool(sqlQuery('SELECT 1 FROM inventory WHERE hash=?', hash))
def __getitem__(self, hash):
with self.lock:
if hash in self._inventory:
return self._inventory[hash]
rows = sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE hash=?', hash)
if not rows:
raise KeyError(hash)
return InventoryItem(*rows[0])
def __setitem__(self, hash, value):
with self.lock:
value = InventoryItem(*value)
self._inventory[hash] = value
self._streams[value.stream].add(hash)
def __delitem__(self, hash):
raise NotImplementedError
def __iter__(self):
with self.lock:
hashes = self._inventory.keys()[:]
hashes += (x for x, in sqlQuery('SELECT hash FROM inventory'))
return hashes.__iter__()
def __len__(self):
with self.lock:
return len(self._inventory) + sqlQuery('SELECT count(*) FROM inventory')[0][0]
def by_type_and_tag(self, objectType, tag):
with self.lock:
values = [value for value in self._inventory.values() if value.type == type and value.tag == tag]
values += (InventoryItem(*value) for value in sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE objecttype=? AND tag=?', type, tag))
return values
def hashes_by_stream(self, stream):
with self.lock:
return self._streams[stream]
def unexpired_hashes_by_stream(self, stream):
with self.lock:
t = int(time.time())
hashes = [x for x, value in self._inventory.items() if value.stream == stream and value.expires > t]
hashes += (payload for payload, in sqlQuery('SELECT hash FROM inventory WHERE streamnumber=? AND expirestime>?', stream, t))
return hashes
def flush(self):
with self.lock: # If you use both the inventoryLock and the sqlLock, always use the inventoryLock OUTSIDE of the sqlLock.
with SqlBulkExecute() as sql:
for objectHash, value in self._inventory.items():
sql.execute('INSERT INTO inventory VALUES (?, ?, ?, ?, ?, ?)', objectHash, *value)
self._inventory.clear()
def clean(self):
with self.lock:
sqlExecute('DELETE FROM inventory WHERE expirestime<?',int(time.time()) - (60 * 60 * 3))
self._streams.clear()
for objectHash, value in self.items():
self._streams[value.stream].add(objectHash)
|
Python
| 0.000003
|
@@ -2335,17 +2335,23 @@
type ==
-t
+objectT
ype and
@@ -2541,17 +2541,23 @@
tag=?',
-t
+objectT
ype, tag
|
3405dc54b611b3d12583f0ff14f6b8d9e32a18a9
|
Revert "fixed pipeline is dropping frames and GUI can't see any videos"
|
voctocore/lib/sources/decklinkavsource.py
|
voctocore/lib/sources/decklinkavsource.py
|
#!/usr/bin/env python3
import logging
import re
from gi.repository import Gst, GLib
from lib.config import Config
from lib.sources.avsource import AVSource
class DeckLinkAVSource(AVSource):
timer_resolution = 0.5
def __init__(self, name, has_audio=True, has_video=True):
super().__init__('DecklinkAVSource', name, has_audio, has_video)
self.device = Config.getDeckLinkDeviceNumber(name)
self.aconn = Config.getDeckLinkAudioConnection(name)
self.vconn = Config.getDeckLinkVideoConnection(name)
self.vmode = Config.getDeckLinkVideoMode(name)
self.vfmt = Config.getDeckLinkVideoFormat(name)
self.name = name
self.signalPad = None
self.build_pipeline()
def port(self):
return "Decklink #{}".format(self.device)
def attach(self, pipeline):
super().attach(pipeline)
self.signalPad = pipeline.get_by_name(
'decklinkvideosrc-{}'.format(self.name))
GLib.timeout_add(self.timer_resolution * 1000, self.do_timeout)
def do_timeout(self):
self.inputSink.set_property(
'alpha', 1.0 if self.num_connections() > 0 else 0.0)
# just come back
return True
def num_connections(self):
return 1 if self.signalPad and self.signalPad.get_property('signal') else 0
def __str__(self):
return 'DecklinkAVSource[{name}] reading card #{device}'.format(
name=self.name,
device=self.device
)
def build_source(self):
# A video source is required even when we only need audio
pipe = """
decklinkvideosrc
name=decklinkvideosrc-{name}
device-number={device}
connection={conn}
video-format={fmt}
mode={mode}
drop-no-signal-frames=true
""".format(name=self.name,
device=self.device,
conn=self.vconn,
mode=self.vmode,
fmt=self.vfmt
)
if self.has_video:
if self.build_deinterlacer():
pipe += """\
! {deinterlacer}
""".format(deinterlacer=self.build_deinterlacer())
pipe += """\
! videoconvert
! videoscale
! videorate
name=vout-{name}
""".format(
deinterlacer=self.build_deinterlacer(),
name=self.name
)
else:
pipe += """\
! fakesink
"""
if self.has_audio:
pipe += """
decklinkaudiosrc
name=decklinkaudiosrc-{name}
device-number={device}
connection={conn}
channels={channels}
""".format( name=self.name,
device=self.device,
conn=self.aconn,
channels=Config.getNumAudioStreams())
return pipe
def build_audioport(self):
return 'decklinkaudiosrc-{name}.'.format(name=self.name)
def build_videoport(self):
return 'vout-{}.'.format(self.name)
|
Python
| 0
|
@@ -1776,43 +1776,8 @@
de%7D%0A
- drop-no-signal-frames=true%0A
%22%22%22.
|
c192e414c05266e555379564dcb50560c378265d
|
Fix for mediainfo on .bin files
|
mod_sample/media_info_parser.py
|
mod_sample/media_info_parser.py
|
import os
import traceback
import sys
from lxml import etree
import subprocess
import xmltodict
class InvalidMediaInfoError(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
class MediaInfoFetcher:
def __init__(self, sample):
from run import config
# Fetch media info
media_info_path = os.path.join(
config.get('SAMPLE_REPOSITORY', ''), 'TestFiles', 'media',
sample.sha + '.xml')
if os.path.isfile(media_info_path):
with open(media_info_path) as fd:
doc = xmltodict.parse(fd.read())
if 'Mediainfo' in doc:
self.media_info = doc['Mediainfo']
self.video_tracks = []
self.caption_tracks = []
self.audio_tracks = []
self.other_tracks = []
self.general_track = {}
self.parsed = False
else:
raise InvalidMediaInfoError(
'No Mediainfo root element present')
else:
raise InvalidMediaInfoError(
'File {path} not found'.format(path=media_info_path))
def get_media_info(self, force_parse=False):
result = [{
'name': 'Media info version',
'value': self.media_info['@version']
}]
if not self.parsed or force_parse:
self._process_tracks()
result.append({
'name': 'General',
'value': self.general_track
})
result.append({
'name': 'Video',
'value': self.video_tracks
})
result.append({
'name': 'Captions',
'value': self.caption_tracks
})
return result
def _process_tracks(self):
# Reset stored tracks
if self.parsed:
self.video_tracks = []
self.caption_tracks = []
self.audio_tracks = []
self.other_tracks = []
self.general_track = {}
self.parsed = False
try:
file_info = self.media_info['File']
if 'track' in file_info:
for track in file_info['track']:
self._process_track(track)
# Ran through all tracks, no errors, so parsed successfully
self.parsed = True
else:
raise InvalidMediaInfoError('No tracks present in XML')
except KeyError:
traceback.print_exc()
raise InvalidMediaInfoError('No File element present in XML')
def _process_track(self, track):
if '@type' not in track:
raise InvalidMediaInfoError('Track file does not contain a type')
track_type = track['@type']
if track_type == 'General':
self._process_general(track)
elif track_type == 'Video':
self._process_video(track)
elif track_type == 'Audio':
# Implement at some point
pass
elif track_type == 'Text':
self._process_text(track)
# Other tracks are ignored for now
return
def _process_generic(self, track, keys):
result = {}
# Values we could use, but they're not always present
for key in keys:
if key in track:
result[key.replace('_', ' ')] = track[key]
return result
def _process_general(self, track):
self.general_track = self._process_generic(
track, ['Format', 'File_size', 'Duration', 'Codec_ID']
)
def _process_video(self, track):
result = self._process_generic(
track, ['Display_aspect_ratio', 'Writing_library', 'Duration',
'Codec_ID'])
# Append non standard ones
if 'Width' in track and 'Height' in track:
result['Resolution'] = '{width} x {height}'.format(
width=track['Width'],
height=track['Height'])
if 'Format' in track:
v_format = track['Format']
if 'Format_Info' in track:
v_format += ' ({info})'.format(info=track['Format_Info'])
result['Format'] = v_format
if 'Frame_rate' in track:
v_rate = track['Frame_rate']
if 'Frame_rate_mode' in track:
v_rate += ' (mode: {mode})'.format(
mode=track['Frame_rate_mode'])
result['Frame rate'] = v_rate
if 'Scan_type' in track:
v_scan = track['Scan_type']
if 'Scan_order' in track:
v_scan += ' ({order})'.format(order=track['Scan_order'])
result['Scan type'] = v_scan
name = 'Stream nr. {number}'.format(number=len(self.video_tracks))
if 'ID' in track:
name = 'ID: {number}'.format(number=track['ID'])
self.video_tracks.append({'name': name, 'value': result})
def _process_text(self, track):
self.caption_tracks.append({
'name': 'ID: {number}'.format(number=track['ID']),
'value': self._process_generic(
track, ['Format', 'Menu_ID', 'Muxing_mode'])
})
@staticmethod
def generate_media_xml(sample):
from run import config
if not sys.platform.startswith("linux"):
raise InvalidMediaInfoError('Windows generation of MediaInfo '
'unsupported')
media_folder = os.path.join(
config.get('SAMPLE_REPOSITORY', ''), 'TestFiles')
media_info_path = os.path.join(
media_folder, 'media', sample.sha + '.xml')
output_handle = open(media_info_path, 'w')
media_path = os.path.join(media_folder, sample.filename)
process = subprocess.Popen(
['mediainfo', '--Output=XML', media_path], stdout=output_handle)
process.wait()
if os.path.isfile(media_info_path):
# Load media info, and replace full pathname
tree = etree.parse(media_info_path)
for elem in tree.xpath("//track[@type='General']"):
for child in elem:
if child.tag == 'Complete_name':
child.text = child.text.replace(media_folder, '')
break
# Store
tree.write(media_info_path, encoding='utf-8',
xml_declaration=True, pretty_print=True)
# Return instance
return MediaInfoFetcher(sample)
raise InvalidMediaInfoError('Could not generate media info')
|
Python
| 0
|
@@ -32,16 +32,53 @@
ort sys%0A
+from collections import OrderedDict%0A%0A
from lxm
@@ -2720,32 +2720,224 @@
k(self, track):%0A
+ if type(track) is not OrderedDict:%0A raise InvalidMediaInfoError('There is no useful information '%0A 'present in this MediaInfo file.')%0A
if '@typ
|
ed863a5f792dcd11ef71361037fd19e022707821
|
fix "get_app_template_dirs"
|
django_assets/loaders.py
|
django_assets/loaders.py
|
from django.conf import settings
from django import template
from webassets.loaders import GlobLoader, LoaderError
try:
set
except NameError:
from sets import Set as set
from django_assets.templatetags.assets import AssetsNode as AssetsNodeOriginal
try:
from django.templatetags.assets import AssetsNode as AssetsNodeMapped
except ImportError:
# Since Django #12295, custom templatetags are no longer mapped into
# the Django namespace. Support both versions.
AssetsNodeMapped = None
AssetsNodeClasses = tuple(
filter(lambda c: bool(c), (AssetsNodeOriginal, AssetsNodeMapped))
)
__all__ = ('DjangoLoader', 'get_django_template_dirs',)
def _shortpath(abspath):
"""Make an absolute path relative to the project's settings module,
which would usually be the project directory.
"""
b = os.path.dirname(os.path.normpath(sys.modules[settings.SETTINGS_MODULE].__file__))
p = os.path.normpath(abspath)
return p[len(os.path.commonprefix([b, p])):]
def uniq(seq):
"""Remove duplicate items, preserve order.
http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
FILESYSTEM_LOADERS = [
'django.template.loaders.filesystem.load_template_source', # <= 1.1
'django.template.loaders.filesystem.Loader', # > 1.2
]
APPDIR_LOADERS = [
'django.template.loaders.app_directories.load_template_source', # <= 1.1
'django.template.loaders.app_directories.Loader' # > 1.2
]
def get_django_template_dirs(loader_list=None):
"""Build a list of template directories based on configured loaders.
"""
if not loader_list:
loader_list = settings.TEMPLATE_LOADERS
template_dirs = []
for loader in loader_list:
if loader in FILESYSTEM_LOADERS:
template_dirs.extend(settings.TEMPLATE_DIRS)
if loader in APPDIR_LOADERS:
from django.template.loaders.app_directories import app_template_dirs
template_dirs.extend(app_template_dirs)
if isinstance(loader, (list, tuple)) and len(loader) >= 2:
# The cached loader uses the tuple syntax, but simply search all
# tuples for nested loaders; thus possibly support custom ones too.
template_dirs.extend(get_django_template_dirs(loader[1]))
return uniq(template_dirs)
class DjangoLoader(GlobLoader):
"""Parse all the templates of the current Django project, try to find
bundles in active use.
"""
def load_bundles(self):
bundles = []
for template_dir in get_django_template_dirs():
for filename in self.glob_files((template_dir, '*.html'), True):
bundles.extend(self.with_file(filename, self._parse) or [])
return bundles
def _parse(self, filename, contents):
# parse the template for asset nodes
try:
t = template.Template(contents)
except template.TemplateSyntaxError as e:
raise LoaderError('Django parser failed: %s' % e)
else:
result = []
def _recurse_node(node):
# depending on whether the template tag is added to
# builtins, or loaded via {% load %}, it will be
# available in a different module
if node is not None and \
isinstance(node, AssetsNodeClasses):
# try to resolve this node's data; if we fail,
# then it depends on view data and we cannot
# manually rebuild it.
try:
bundle = node.resolve()
except template.VariableDoesNotExist:
raise LoaderError('skipping bundle %s, depends on runtime data' % node.output)
else:
result.append(bundle)
# see Django #7430
for subnode in hasattr(node, 'nodelist') \
and node.nodelist\
or []:
_recurse_node(subnode)
for node in t: # don't move into _recurse_node, ``Template`` has a .nodelist attribute
_recurse_node(node)
return result
|
Python
| 0.000004
|
@@ -1988,38 +1988,20 @@
emplate.
-loaders.app_directorie
+util
s import
@@ -2001,16 +2001,20 @@
import
+get_
app_temp
@@ -2056,16 +2056,20 @@
.extend(
+get_
app_temp
@@ -2077,16 +2077,29 @@
ate_dirs
+('templates')
)%0A
|
f1fdc5b507af7fd0fbe6ef3bb1b3c9e997dc7626
|
Fix a data format bug in get_trace.py
|
pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py
|
pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py
|
#!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
r"""
Generates json trace files viewable using chrome://tracing using RPCs from a
connected HdlcRpcClient.
Example usage:
python pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py -s localhost:33000
-o trace.json
-t out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
pw_trace_tokenized/pw_trace_protos/trace_rpc.proto
"""
import argparse
import logging
import glob
from pathlib import Path
import sys
from typing import Collection, Iterable, Iterator
import serial # type: ignore
from pw_tokenizer import database
from pw_trace import trace
from pw_hdlc.rpc import HdlcRpcClient, default_channels
from pw_hdlc.rpc_console import SocketClientImpl
from pw_trace_tokenized import trace_tokenized
_LOG = logging.getLogger('pw_trace_tokenizer')
PW_RPC_MAX_PACKET_SIZE = 256
SOCKET_SERVER = 'localhost'
SOCKET_PORT = 33000
MKFIFO_MODE = 0o666
def _expand_globs(globs: Iterable[str]) -> Iterator[Path]:
for pattern in globs:
for file in glob.glob(pattern, recursive=True):
yield Path(file)
def get_hdlc_rpc_client(device: str, baudrate: int,
proto_globs: Collection[str], socket_addr: str,
**kwargs):
"""Get the HdlcRpcClient based on arguments."""
del kwargs # ignore
if not proto_globs:
proto_globs = ['**/*.proto']
protos = list(_expand_globs(proto_globs))
if not protos:
_LOG.critical('No .proto files were found with %s',
', '.join(proto_globs))
_LOG.critical('At least one .proto file is required')
return 1
_LOG.debug('Found %d .proto files found with %s', len(protos),
', '.join(proto_globs))
# TODO(rgoliver): When pw has a generalized transport for RPC this should
# use it so it isn't specific to HDLC
if socket_addr is None:
serial_device = serial.Serial(device, baudrate, timeout=1)
read = lambda: serial_device.read(8192)
write = serial_device.write
else:
try:
socket_device = SocketClientImpl(socket_addr)
read = socket_device.read
write = socket_device.write
except ValueError:
_LOG.exception('Failed to initialize socket at %s', socket_addr)
return 1
return HdlcRpcClient(read, protos, default_channels(write))
def get_trace_data_from_device(client):
""" Get the trace data using RPC from a Client"""
data = b''
result = \
client.client.channel(1).rpcs.pw.trace.TraceService.GetTraceData().get()
for streamed_data in result:
data = data + bytes([len(streamed_data.data)])
data = data + streamed_data.data
_LOG.debug(''.join(format(x, '02x') for x in streamed_data.data))
return data
def _parse_args():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--device', help='the serial port to use')
parser.add_argument('-b',
'--baudrate',
type=int,
default=115200,
help='the baud rate to use')
group.add_argument('-s',
'--socket-addr',
type=str,
help='use socket to connect to server, type default for\
localhost:33000, or manually input the server address:port')
parser.add_argument('-o',
'--trace_output',
dest='trace_output_file',
help=('The json file to which to write the output.'))
parser.add_argument(
'-t',
'--trace_token_database',
help='Databases (ELF, binary, or CSV) to use to lookup trace tokens.')
parser.add_argument('proto_globs',
nargs='+',
help='glob pattern for .proto files')
parser.add_argument(
'-f',
'--ticks_per_second',
type=int,
dest='ticks_per_second',
default=1000,
help=('The clock rate of the trace events (Default 1000).'))
return parser.parse_args()
def _main(args):
token_database = \
database.load_token_database(args.trace_token_database, domain="trace")
_LOG.info(database.database_summary(token_database))
client = get_hdlc_rpc_client(**vars(args))
data = get_trace_data_from_device(client)
events = trace_tokenized.get_trace_events([token_database], data,
args.ticks_per_second)
json_lines = trace.generate_trace_json(events)
trace_tokenized.save_trace_file(json_lines, args.trace_output_file)
if __name__ == '__main__':
if sys.version_info[0] < 3:
sys.exit('ERROR: The detokenizer command line tools require Python 3.')
_main(_parse_args())
|
Python
| 0.000006
|
@@ -3003,17 +3003,16 @@
%0A %22%22%22
-
Get the
@@ -3072,26 +3072,17 @@
-result = %5C%0A
+service =
cli
@@ -3129,16 +3129,37 @@
eService
+%0A result = service
.GetTrac
@@ -3170,13 +3170,17 @@
a().
-get()
+responses
%0A
|
c5bfd55147e7fb18264f601c34e180453974f55e
|
DEBUG messages deleted
|
vt_manager/src/python/agent/provisioning/ProvisioningDispatcher.py
|
vt_manager/src/python/agent/provisioning/ProvisioningDispatcher.py
|
'''
@author: msune
Provisioning dispatcher. Selects appropiate Driver for VT tech
'''
from communications.XmlRpcClient import XmlRpcClient
from utils.VmMutexStore import VmMutexStore
import threading
class ProvisioningDispatcher:
@staticmethod
def __getProvisioningDispatcher(vtype):
#Import of Dispatchers must go here to avoid import circular dependecy
from xen.provisioning.XenProvisioningDispatcher import XenProvisioningDispatcher
if vtype == "xen":
return XenProvisioningDispatcher
else:
raise Exception("Virtualization type not supported by the agent")
@staticmethod
def __dispatchAction(dispatcher,action,vm):
#Inventory
if action.type_ == "create":
return dispatcher.createVMfromImage(action.id,vm)
if action.type_ == "modify" :
return dispatcher.modifyVM(action.id,vm)
if action.type_ == "delete" :
return dispatcher.deleteVM(action.id,vm)
#Scheduling
if action.type_ == "start":
return dispatcher.startVM(action.id,vm)
if action.type_ == "reboot" :
return dispatcher.rebootVM(action.id,vm)
if action.type_ == "stop" :
return dispatcher.stopVM(action.id,vm)
if action.type_ == "hardStop" :
return dispatcher.hardStopVM(action.id,vm)
raise Exception("Unknown action type")
@staticmethod
def processProvisioning(provisioning):
print "******************************************************************\n
LEODEBUG: CURRENT THREAD: "+str(threading.currentThread().get_ident())+"\n
*******************************************************************"
for action in provisioning.action:
vm = action.server.virtual_machines[0]
try:
dispatcher = ProvisioningDispatcher.__getProvisioningDispatcher(vm.virtualization_type)
except Exception as e:
XmlRpcClient.sendAsyncProvisioningActionStatus(action.id,"FAILED",str(e))
print e
return
try:
#Acquire VM lock
VmMutexStore.lock(vm)
#Send async notification
XmlRpcClient.sendAsyncProvisioningActionStatus(action.id,"ONGOING","")
ProvisioningDispatcher.__dispatchAction(dispatcher,action,vm)
except Exception as e:
#TODO improve this trace
print e
raise e
finally:
#Release VM lock
VmMutexStore.unlock(vm)
##Abstract methods definition for ProvisioningDispatchers
#Inventory
@staticmethod
def createVMfromImage(id,vm):
raise Exception("Abstract method cannot be called")
@staticmethod
def modifyVM(id,vm):
raise Exception("Abstract method cannot be called")
@staticmethod
def deleteVM(id,vm):
raise Exception("Abstract method cannot be called")
#Scheduling
def startVM(id,vm):
raise Exception("Abstract method cannot be called")
def rebootVM(id,vm):
raise Exception("Abstract method cannot be called")
def stopVM(id,vm):
raise Exception("Abstract method cannot be called")
def hardStopVM(id,vm):
raise Exception("Abstract method cannot be called")
|
Python
| 0.000001
|
@@ -1315,276 +1315,8 @@
g):%0A
-%09%09print %22******************************************************************%5Cn%0A LEODEBUG: CURRENT THREAD: %22+str(threading.currentThread().get_ident())+%22%5Cn%0A *******************************************************************%22%0A
%09%09fo
|
a41de0b10a9416a290bbaf0b36c071cc0492439d
|
Fix check of send_sci sync_timeout parameter
|
devicecloud/sci.py
|
devicecloud/sci.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Digi International, Inc.
"""Server Command Interface functionality"""
from devicecloud.apibase import APIBase
from xml.etree import ElementTree as ET
import six
SCI_TEMPLATE = """\
<sci_request version="1.0">
<{operation}{synchronous}{cache}{sync_timeout}{allow_offline}{wait_for_reconnect}>
<targets>
{targets}
</targets>
{payload}
</{operation}>
</sci_request>
""".replace(" ", "").replace("\r", "").replace("\n", "") # two spaces is indentation
class TargetABC(object):
"""Abstract base class for all target types"""
class DeviceTarget(TargetABC):
"""Target a specific device"""
def __init__(self, device_id):
self._device_id = device_id
def to_xml(self):
return '<device id="{}"/>'.format(self._device_id)
class AllTarget(TargetABC):
"""Target all devices"""
def __init__(self):
pass
def to_xml(self):
return '<device id="all"/>'
class TagTarget(TargetABC):
"""Target devices having a specific tag"""
def __init__(self, tag):
self._tag = tag
def to_xml(self):
return '<device tag="{}"/>'.format(self._tag)
class GroupTarget(TargetABC):
"""Target devices in a specific group"""
def __init__(self, group):
self._group = group
def to_xml(self):
return '<group path="{}"/>'.format(self._group)
class AsyncRequestProxy(object):
"""An object representing an asynychronous SCI request.
Can be used for polling the status of the corresponding request.
:ivar job_id: the ID in device cloud of the job
:ivar response: the response to the request if completed
:ivar completed: True if the request has completed, False otherwise; queries on read
"""
def __init__(self, job_id, conn):
self.job_id = job_id
self._conn = conn
self.response = None
@property
def completed(self):
if self.response is not None:
return True
resp = self._conn.get('/ws/sci/{0}'.format(self.job_id))
dom = ET.fromstring(resp.content)
status = dom.find('.//status')
if status is not None and status.text == 'complete':
self.response = resp.content
return True
else:
return False
class ServerCommandInterfaceAPI(APIBase):
"""Encapsulate Server Command Interface API"""
def get_async_job(self, job_id):
"""Query an asynchronous SCI job by ID
This is useful if the job was not created with send_sci_async().
:param int job_id: The job ID to query
:returns: The SCI response from GETting the job information
"""
uri = "/ws/sci/{0}".format(job_id)
# TODO: do parsing here?
return self._conn.get(uri)
def send_sci_async(self, operation, target, payload, **sci_options):
"""Send an asynchronous SCI request, and wraps the job in an object
to manage it
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params
"""
sci_options['synchronous'] = False
resp = self.send_sci(operation, target, payload, **sci_options)
dom = ET.fromstring(resp.content)
job_element = dom.find('.//jobId')
if job_element is None:
return
job_id = int(job_element.text)
return AsyncRequestProxy(job_id, self._conn)
def send_sci(self, operation, target, payload, reply=None, synchronous=None, sync_timeout=None,
cache=None, allow_offline=None, wait_for_reconnect=None):
"""Send SCI request to 1 or more targets
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params
"""
if not isinstance(payload, six.string_types) and not isinstance(payload, six.binary_type):
raise TypeError("payload is required to be a string or bytes")
# validate targets and bulid targets xml section
try:
iter(target)
targets = target
except TypeError:
targets = [target, ]
if not all(isinstance(t, TargetABC) for t in targets):
raise TypeError("Target(s) must each be instances of TargetABC")
targets_xml = "".join(t.to_xml() for t in targets)
# reply argument
if not isinstance(reply, (type(None), six.string_types)):
raise TypeError("reply must be either None or a string")
if reply is not None:
reply_xml = ' reply="{}"'.format(reply)
else:
reply_xml = ''
# synchronous argument
if not isinstance(synchronous, (type(None), bool)):
raise TypeError("synchronous expected to be either None or a boolean")
if synchronous is not None:
synchronous_xml = ' synchronous="{}"'.format('true' if synchronous else 'false')
else:
synchronous_xml = ''
# sync_timeout argument
# TODO: What units is syncTimeout in? seconds?
if not sync_timeout is None or isinstance(sync_timeout, six.integer_types):
raise TypeError("sync_timeout expected to either be None or a number")
if sync_timeout is not None:
sync_timeout_xml = ' syncTimeout="{}"'.format(sync_timeout)
else:
sync_timeout_xml = ''
# cache argument
if not isinstance(cache, (type(None), bool)):
raise TypeError("cache expected to either be None or a boolean")
if cache is not None:
cache_xml = ' cache="{}"'.format('true' if cache else 'false')
else:
cache_xml = ''
# allow_offline argument
if not isinstance(allow_offline, (type(None), bool)):
raise TypeError("allow_offline is expected to be either None or a boolean")
if allow_offline is not None:
allow_offline_xml = ' allowOffline="{}"'.format('true' if allow_offline else 'false')
else:
allow_offline_xml = ''
# wait_for_reconnect argument
if not isinstance(wait_for_reconnect, (type(None), bool)):
raise TypeError("wait_for_reconnect expected to be either None or a boolean")
if wait_for_reconnect is not None:
wait_for_reconnect_xml = ' waitForReconnect="{}"'.format('true' if wait_for_reconnect else 'false')
else:
wait_for_reconnect_xml = ''
full_request = SCI_TEMPLATE.format(
operation=operation,
targets=targets_xml,
reply=reply_xml,
synchronous=synchronous_xml,
sync_timeout=sync_timeout_xml,
cache=cache_xml,
allow_offline=allow_offline_xml,
wait_for_reconnect=wait_for_reconnect_xml,
payload=payload
)
# TODO: do parsing here?
return self._conn.post("/ws/sci", full_request)
|
Python
| 0
|
@@ -5763,20 +5763,16 @@
if
-not
sync_tim
@@ -5779,23 +5779,32 @@
eout is
+not
None
-or
+and not
isinsta
|
b78165d68e1e01e722b746e926a36b5680debdfa
|
remove email filter and rfactor
|
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
|
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
|
# MIT License
# Copyright (c) 2019 MassChallenge, Inc.
from impact.v1.views.base_list_view import BaseListView
from impact.v1.helpers import (
MentorProgramOfficeHourHelper,
)
LOOKUPS = {
'mentor_email': 'mentor__email__icontains',
'mentor_id': 'mentor_id',
'finalist_email': 'finalist__email__icontains',
'finalist_id': 'finalist_id',
}
class MentorProgramOfficeHourListView(BaseListView):
view_name = "office_hour"
helper_class = MentorProgramOfficeHourHelper
def filter(self, queryset):
if self.request.query_params.keys():
filter_values = self._get_filter()
return queryset.filter(**filter_values)
return queryset
def _get_filter(self):
query_params = self.request.query_params.dict()
query_filter = {
LOOKUPS[key]: value for key, value in query_params.items()
if key in LOOKUPS.keys()
}
return query_filter
|
Python
| 0
|
@@ -180,188 +180,8 @@
)%0A%0A%0A
-LOOKUPS = %7B%0A 'mentor_email': 'mentor__email__icontains',%0A 'mentor_id': 'mentor_id',%0A 'finalist_email': 'finalist__email__icontains',%0A 'finalist_id': 'finalist_id',%0A%7D%0A%0A%0A
clas
@@ -353,272 +353,188 @@
-if self.request.query_params.keys():%0A filter_values = self._get_filter()%0A return queryset.filter(**filter_values)%0A
+allowed_params = %5B'mentor_id', 'finalist_d'%5D%0A param_items = self.request.query_params.items()%0A%0A
-return queryset%0A%0A def _get_filter(self):%0A query_params = self.request.query_params.dict()%0A query_filter
+ if not param_items:%0A return queryset%0A%0A filter_values
= %7B
@@ -550,20 +550,11 @@
-LOOKUPS%5B
key
-%5D
: va
@@ -561,16 +561,17 @@
lue for
+(
key, val
@@ -576,32 +576,24 @@
alue
+)
in
-query_
param
-s.
+_
items
-()
%0A
@@ -615,57 +615,68 @@
in
-LOOKUPS.keys()%0A %7D%0A return query_filter
+allowed_params%7D%0A return queryset.filter(**filter_values)
%0A
|
05b7f56bdfa600e72d4cca5a4c51324ff3c94d4d
|
Update file distancematrixtest.py
|
pymsascoring/distancematrix/test/distancematrixtest.py
|
pymsascoring/distancematrix/test/distancematrixtest.py
|
import unittest
__author__ = "Antonio J. Nebro"
class TestMethods(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -10,16 +10,87 @@
ittest%0A%0A
+from pymsascoring.distancematrix.distancematrix import DistanceMatrix%0A%0A
__author
@@ -188,16 +188,780 @@
pass%0A%0A
+ def test_should_default_gap_penalty_be_minus_eight(self):%0A matrix = DistanceMatrix()%0A%0A self.assertEqual(-8, matrix.get_gap_penalty())%0A%0A def test_should_constructor__modify_the_gap_penalty(self):%0A matrix = DistanceMatrix(-10)%0A%0A self.assertEqual(-10, matrix.get_gap_penalty())%0A%0A def test_should_get_distance_return_the_gap_penalty_if_a_char_is_a_gap(self):%0A matrix = DistanceMatrix()%0A%0A self.assertEqual(matrix.get_gap_penalty(), matrix.get_distance('A', '-'))%0A self.assertEqual(matrix.get_gap_penalty(), matrix.get_distance('-', 'B'))%0A%0A def test_should_get_distance_return_one_if_the_two_chars_are_gaps(self):%0A matrix = DistanceMatrix()%0A%0A self.assertEqual(1, matrix.get_distance('-', '-'))%0A
%0Aif __na
|
720ee3792dc45f514d82108f217bb99e538ef98b
|
Fix export_to_dot for networkx package changes
|
taskflow/types/graph.py
|
taskflow/types/graph.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os
import networkx as nx
import six
def _common_format(g, edge_notation):
lines = []
lines.append("Name: %s" % g.name)
lines.append("Type: %s" % type(g).__name__)
lines.append("Frozen: %s" % nx.is_frozen(g))
lines.append("Density: %0.3f" % nx.density(g))
lines.append("Nodes: %s" % g.number_of_nodes())
for n, n_data in g.nodes_iter(data=True):
if n_data:
lines.append(" - %s (%s)" % (n, n_data))
else:
lines.append(" - %s" % n)
lines.append("Edges: %s" % g.number_of_edges())
for (u, v, e_data) in g.edges_iter(data=True):
if e_data:
lines.append(" %s %s %s (%s)" % (u, edge_notation, v, e_data))
else:
lines.append(" %s %s %s" % (u, edge_notation, v))
return lines
class Graph(nx.Graph):
"""A graph subclass with useful utility functions."""
def __init__(self, data=None, name=''):
super(Graph, self).__init__(name=name, data=data)
self.frozen = False
def freeze(self):
"""Freezes the graph so that no more mutations can occur."""
if not self.frozen:
nx.freeze(self)
return self
def export_to_dot(self):
"""Exports the graph to a dot format (requires pydot library)."""
return nx.to_pydot(self).to_string()
def pformat(self):
"""Pretty formats your graph into a string."""
return os.linesep.join(_common_format(self, "<->"))
class DiGraph(nx.DiGraph):
"""A directed graph subclass with useful utility functions."""
def __init__(self, data=None, name=''):
super(DiGraph, self).__init__(name=name, data=data)
self.frozen = False
def freeze(self):
"""Freezes the graph so that no more mutations can occur."""
if not self.frozen:
nx.freeze(self)
return self
def get_edge_data(self, u, v, default=None):
"""Returns a *copy* of the edge attribute dictionary between (u, v).
NOTE(harlowja): this differs from the networkx get_edge_data() as that
function does not return a copy (but returns a reference to the actual
edge data).
"""
try:
return dict(self.adj[u][v])
except KeyError:
return default
def topological_sort(self):
"""Return a list of nodes in this graph in topological sort order."""
return nx.topological_sort(self)
def pformat(self):
"""Pretty formats your graph into a string.
This pretty formatted string representation includes many useful
details about your graph, including; name, type, frozeness, node count,
nodes, edge count, edges, graph density and graph cycles (if any).
"""
lines = _common_format(self, "->")
cycles = list(nx.cycles.recursive_simple_cycles(self))
lines.append("Cycles: %s" % len(cycles))
for cycle in cycles:
buf = six.StringIO()
buf.write("%s" % (cycle[0]))
for i in range(1, len(cycle)):
buf.write(" --> %s" % (cycle[i]))
buf.write(" --> %s" % (cycle[0]))
lines.append(" %s" % buf.getvalue())
return os.linesep.join(lines)
def export_to_dot(self):
"""Exports the graph to a dot format (requires pydot library)."""
return nx.to_pydot(self).to_string()
def is_directed_acyclic(self):
"""Returns if this graph is a DAG or not."""
return nx.is_directed_acyclic_graph(self)
def no_successors_iter(self):
"""Returns an iterator for all nodes with no successors."""
for n in self.nodes_iter():
if not len(self.successors(n)):
yield n
def no_predecessors_iter(self):
"""Returns an iterator for all nodes with no predecessors."""
for n in self.nodes_iter():
if not len(self.predecessors(n)):
yield n
def bfs_predecessors_iter(self, n):
"""Iterates breadth first over *all* predecessors of a given node.
This will go through the nodes predecessors, then the predecessor nodes
predecessors and so on until no more predecessors are found.
NOTE(harlowja): predecessor cycles (if they exist) will not be iterated
over more than once (this prevents infinite iteration).
"""
visited = set([n])
queue = collections.deque(self.predecessors_iter(n))
while queue:
pred = queue.popleft()
if pred not in visited:
yield pred
visited.add(pred)
for pred_pred in self.predecessors_iter(pred):
if pred_pred not in visited:
queue.append(pred_pred)
class OrderedDiGraph(DiGraph):
"""A directed graph subclass with useful utility functions.
This derivative retains node, edge, insertation and iteration
ordering (so that the iteration order matches the insertation
order).
"""
node_dict_factory = collections.OrderedDict
adjlist_dict_factory = collections.OrderedDict
edge_attr_dict_factory = collections.OrderedDict
def merge_graphs(graph, *graphs, **kwargs):
"""Merges a bunch of graphs into a new graph.
If no additional graphs are provided the first graph is
returned unmodified otherwise the merged graph is returned.
"""
tmp_graph = graph
allow_overlaps = kwargs.get('allow_overlaps', False)
overlap_detector = kwargs.get('overlap_detector')
if overlap_detector is not None and not six.callable(overlap_detector):
raise ValueError("Overlap detection callback expected to be callable")
elif overlap_detector is None:
overlap_detector = (lambda to_graph, from_graph:
len(to_graph.subgraph(from_graph.nodes_iter())))
for g in graphs:
# This should ensure that the nodes to be merged do not already exist
# in the graph that is to be merged into. This could be problematic if
# there are duplicates.
if not allow_overlaps:
# Attempt to induce a subgraph using the to be merged graphs nodes
# and see if any graph results.
overlaps = overlap_detector(graph, g)
if overlaps:
raise ValueError("Can not merge graph %s into %s since there "
"are %s overlapping nodes (and we do not "
"support merging nodes)" % (g, graph,
overlaps))
graph = nx.algorithms.compose(graph, g)
# Keep the first graphs name.
if graphs:
graph.name = tmp_graph.name
return graph
|
Python
| 0.000294
|
@@ -702,16 +702,54 @@
x as nx%0A
+from networkx.drawing import nx_pydot%0A
import s
@@ -2003,32 +2003,38 @@
return nx
+_pydot
.to_pydot(self).
|
bd32faf934bd26957a16a0aa2ac092c5759d2342
|
annotate new test
|
python/ql/test/experimental/dataflow/fieldflow/test.py
|
python/ql/test/experimental/dataflow/fieldflow/test.py
|
# These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
# Preamble
class MyObj(object):
def __init__(self, foo):
self.foo = foo
def setFoo(self, foo):
self.foo = foo
class NestedObj(object):
def __init__(self):
self.obj = MyObj("OK")
def getObj(self):
return self.obj
def setFoo(obj, x):
SINK_F(obj.foo)
obj.foo = x
def test_example1():
myobj = MyObj("OK")
setFoo(myobj, SOURCE)
SINK(myobj.foo)
def test_example1_method():
myobj = MyObj("OK")
myobj.setFoo(SOURCE)
SINK(myobj.foo)
def test_example2():
x = SOURCE
a = NestedObj()
a.obj.foo = x
SINK(a.obj.foo)
def test_example2_method():
x = SOURCE
a = NestedObj()
a.getObj().foo = x
SINK(a.obj.foo) # Flow missing
def test_example3():
obj = MyObj(SOURCE)
SINK(obj.foo)
def test_example3_kw():
obj = MyObj(foo=SOURCE)
SINK(obj.foo)
def fields_with_local_flow(x):
obj = MyObj(x)
a = obj.foo
return a
def test_fields():
SINK(fields_with_local_flow(SOURCE))
|
Python
| 0.004804
|
@@ -911,32 +911,50 @@
SINK(myobj.foo)
+ # Flow not found
%0A%0A%0Adef test_exam
|
091ebd935c6145ac233c03bedeb52c65634939f4
|
Include the version-detecting code to allow PyXML to override the "standard" xml package. Require at least PyXML 0.6.1.
|
Lib/xml/__init__.py
|
Lib/xml/__init__.py
|
"""Core XML support for Python.
This package contains three sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
"""
try:
import _xmlplus
except ImportError:
pass
else:
import sys
sys.modules[__name__] = _xmlplus
|
Python
| 0
|
@@ -409,68 +409,374 @@
%22%0A%0A%0A
-try:%0A import _xmlplus%0Aexcept ImportError:%0A pass%0Aelse:%0A
+__all__ = %5B%22dom%22, %22parsers%22, %22sax%22%5D%0A%0A__version__ = %22$Revision$%22%5B1:-1%5D.split()%5B1%5D%0A%0A%0A_MINIMUM_XMLPLUS_VERSION = (0, 6, 1)%0A%0A%0Atry:%0A import _xmlplus%0Aexcept ImportError:%0A pass%0Aelse:%0A try:%0A v = _xmlplus.version_info%0A except AttributeError:%0A # _xmlplue is too old; ignore it%0A pass%0A else:%0A if v %3E= _MINIMUM_XMLPLUS_VERSION:%0A
@@ -786,16 +786,24 @@
ort sys%0A
+
sys.
@@ -823,16 +823,48 @@
e__%5D = _xmlplus%0A
+ else:%0A del v%0A
|
3f0fc980629f0645acb813b2ef8ed5d91761cbcc
|
add missing pkgconfig dependency and fix boost version range (#9835)
|
var/spack/repos/builtin/packages/wt/package.py
|
var/spack/repos/builtin/packages/wt/package.py
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Wt(CMakePackage):
"""Wt, C++ Web Toolkit.
Wt is a C++ library for developing web applications."""
homepage = "http://www.webtoolkit.eu/wt"
url = "https://github.com/emweb/wt/archive/3.3.7.tar.gz"
git = "https://github.com/emweb/wt.git"
version('master', branch='master')
version('3.3.7', '09858901f2dcf5c3d36a9237daba3e3f')
# wt builds in parallel, but requires more than 5 GByte RAM per -j <njob>
# which most machines do not provide and crash the build
parallel = False
variant('openssl', default=True,
description='SSL and WebSockets support in the built-in httpd, '
'the HTTP(S) client, and additional cryptographic '
'hashes in the authentication module')
variant('libharu', default=True, description='painting to PDF')
# variant('graphicsmagick', default=True,
# description='painting to PNG, GIF')
variant('sqlite', default=False, description='create SQLite3 DBO')
variant('mariadb', default=False, description='create MariaDB/MySQL DBO')
variant('postgresql', default=False, description='create PostgreSQL DBO')
# variant('firebird', default=False, description='create Firebird DBO')
variant('pango', default=True,
description='improved font support in PDF and raster image '
'painting')
variant('zlib', default=True,
description='compression in the built-in httpd')
# variant('fastcgi', default=False,
# description='FastCGI connector via libfcgi++')
depends_on('boost@1.46.1:')
depends_on('openssl', when='+openssl')
depends_on('libharu', when='+libharu')
depends_on('sqlite', when='+sqlite')
depends_on('mariadb', when='+mariadb')
depends_on('postgresql', when='+postgresql')
depends_on('pango', when='+pango')
depends_on('zlib', when='+zlib')
def cmake_args(self):
spec = self.spec
cmake_args = [
'-DBUILD_EXAMPLES:BOOL=OFF',
'-DCONNECTOR_FCGI:BOOL=OFF',
'-DENABLE_OPENGL:BOOL=OFF',
'-DENABLE_QT4:BOOL=OFF'
]
cmake_args.extend([
'-DENABLE_SSL:BOOL={0}'.format((
'ON' if '+openssl' in spec else 'OFF')),
'-DENABLE_HARU:BOOL={0}'.format((
'ON' if '+libharu' in spec else 'OFF')),
'-DENABLE_PANGO:BOOL={0}'.format((
'ON' if '+pango' in spec else 'OFF')),
'-DENABLE_SQLITE:BOOL={0}'.format((
'ON' if '+sqlite' in spec else 'OFF')),
'-DENABLE_MYSQL:BOOL={0}'.format((
'ON' if '+mariadb' in spec else 'OFF')),
'-DENABLE_POSTGRES:BOOL={0}'.format((
'ON' if '+postgres' in spec else 'OFF'))
])
return cmake_args
|
Python
| 0
|
@@ -1803,24 +1803,66 @@
ibfcgi++')%0A%0A
+ depends_on('pkgconfig', type='build')%0A
depends_
@@ -1878,16 +1878,20 @@
@1.46.1:
+1.65
')%0A d
|
308b3f9b2b8a4f2be9bfc09f0c026b54880ec94c
|
Remove unwanted print statement
|
gemdeps/views.py
|
gemdeps/views.py
|
import json
import os
from flask import Markup, render_template, request
from gemdeps import app
@app.route('/', methods=['GET', 'POST'])
def index():
completedeplist = {}
gemnames = []
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
for app in ['diaspora', 'gitlab', 'asciinema']:
appname = app + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appname)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
completedeplist[app] = deps
gemnames += [str(x['name']) for x in deps]
gemnames = list(set(gemnames))
gemnames = Markup(gemnames)
print completedeplist
if request.method == 'GET':
return render_template('index.html', gemnames=gemnames)
else:
apps = request.form.getlist('appname')
gemname = request.form.get('gemname')
gems = {}
flag = 0
for app in apps:
gem = [x for x in completedeplist[app] if x['name'] == gemname]
if gem:
flag = 1
gems[app] = gem
return render_template('index.html',
gemnames=gemnames,
gemname=gemname,
gemlist=gems,
flag=flag)
@app.route('/status/<appname>')
def status(appname):
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
appfilename = appname + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appfilename)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
packaged_count = 0
unpackaged_count = 0
itp_count = 0
total = 0
for n in deps:
if n['status'] == 'Packaged' or n['status'] == 'NEW':
packaged_count += 1
elif n['status'] == 'ITP':
itp_count += 1
else:
unpackaged_count += 1
total = len(deps)
percent_complete = (packaged_count * 100) / total
return render_template('status.html',
appname=appname.title(),
deps=deps,
packaged_count=packaged_count,
unpackaged_count=unpackaged_count,
itp_count=itp_count,
total=total,
percent_complete=percent_complete
)
|
Python
| 0.000034
|
@@ -708,34 +708,8 @@
es)%0A
- print completedeplist%0A
|
3fe92a109ce6a21e98ee3c6a6c604d3e59cc2854
|
Update export script
|
src/scripts/export_data.py
|
src/scripts/export_data.py
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import os
PIPELINE_DIR = os.path.join(os.path.dirname(__file__), '../../', 'src/pipeline')
sys.path.append(PIPELINE_DIR)
import args_utils
import license_utils
import export_utils
import doc_utils
import config
import path_utils
args = args_utils.get_parser().parse_args()
path_utils.root_dir = args.publish_dir
if not args.allowlist:
logging.warning('RUNNING WITHOUT THE ALLOWLIST! DO NOT MAKE A PULL REQUEST WITH THE OUTPUT!')
sources_all = config.read_config(
cc_by=True, cc_by_sa=True, cc_by_nc=True, google_tos=True, filter_not_approved=args.allowlist)
sources_cc_by = config.read_config(
cc_by=True, cc_by_sa=False, cc_by_nc=False, google_tos=False, filter_not_approved=args.allowlist)
sources_cc_by_sa = config.read_config(
cc_by=True, cc_by_sa=True, cc_by_nc=False, google_tos=False, filter_not_approved=args.allowlist)
sources_cc_by_nc = config.read_config(
cc_by=True, cc_by_sa=False, cc_by_nc=True, google_tos=False, filter_not_approved=args.allowlist)
sources_google_tos = config.read_config(
cc_by=False, cc_by_sa=False, cc_by_nc=False, google_tos=True, filter_not_approved=args.allowlist)
google_search_source = {'search_trends_symptoms_dataset': sources_google_tos['search_trends_symptoms_dataset']}
google_mobility_source = {'google_mobility_reports': sources_google_tos['google_mobility_reports']}
# Step 1: Write source docs
# sources_md contains every source, used to create the README.
doc_utils.write_sources(sources_all, path_utils.path_to('sources_md'))
# sources_cc_by_md is used to create aggregated license for cc-by.
doc_utils.write_sources(sources_cc_by, path_utils.path_to('sources_cc_by_md'))
# sources_cc_by_sa_md is used to create aggregated license for cc-by-sa.
doc_utils.write_sources(sources_cc_by_sa, path_utils.path_to('sources_cc_by_sa_md'))
# sources_cc_by_nc_md is used to create aggregated license for cc-by-nc.
doc_utils.write_sources(sources_cc_by_nc, path_utils.path_to('sources_cc_by_nc_md'))
# Step 2: Write the README (needs to happen after writing the source docs)
with open(path_utils.path_to('readme_md'), 'w') as outfile:
with open(path_utils.path_to('about_md'), 'r') as infile:
outfile.write(infile.read())
outfile.write('\n\n## Data Sources\n')
with open(path_utils.path_to('sources_md'), 'r') as infile:
outfile.write(infile.read())
# Step 3: Export aggregated license files
cc_by_header = ('''The file `aggregated_cc_by.csv` is licensed under Creative Commons Attribution'''
''' 4.0 International.\n\nIt includes content under the following licenses:\n\n''')
cc_by_sa_header = ('''The file `aggregated_cc_by_sa.csv` is licensed under Creative Commons Attribution-ShareAlike'''
''' 4.0 International.\n\nIt includes content under the following licenses:\n\n''')
cc_by_nc_header = ('''The file `aggregated_cc_by_nc.csv` is licensed under Creative Commons Attribution-NonCommercial'''
''' 4.0 International.\n\nIt includes content under the following licenses:\n\n''')
all_license_files_cc_by = license_utils.get_license_files(sources_cc_by,
required_licenses=['docs/license_files/cc-by-4.0'])
all_license_files_cc_by_sa = license_utils.get_license_files(sources_cc_by_sa,
required_licenses=['docs/license_files/cc-by-sa-4.0'])
all_license_files_cc_by_nc = license_utils.get_license_files(sources_cc_by_nc,
required_licenses=[
'docs/license_files/cc-by-nc-4.0',
'docs/license_files/nytimes'])
license_utils.export_aggregated_license(path_utils.path_to('export_cc_by_license'),
path_utils.path_to('sources_cc_by_md'),
all_license_files_cc_by,
cc_by_header)
license_utils.export_aggregated_license(path_utils.path_to('export_cc_by_sa_license'),
path_utils.path_to('sources_cc_by_sa_md'),
all_license_files_cc_by_sa,
cc_by_sa_header)
license_utils.export_aggregated_license(path_utils.path_to('export_cc_by_nc_license'),
path_utils.path_to('sources_cc_by_nc_md'),
all_license_files_cc_by_nc,
cc_by_nc_header)
# Step 4: Export data files
export_utils.export_data(config_dict=sources_cc_by, export_path=path_utils.path_to('export_cc_by_csv'))
print('Done exporting cc by data.')
export_utils.export_data(config_dict=sources_cc_by_sa, export_path=path_utils.path_to('export_cc_by_sa_csv'))
print('Done exporting cc by-sa data.')
export_utils.export_data(config_dict=sources_cc_by_nc, export_path=path_utils.path_to('export_cc_by_nc_csv'))
print('Done exporting cc by-nc data.')
export_utils.export_data(config_dict=google_mobility_source, export_path=path_utils.path_to('export_mobility'))
print('Done exporting Google Mobility data.')
# export_utils.export_data(config_dict=google_search_source, export_path=path_utils.path_to('export_search'))
# print('Done exporting Google Search data.')
|
Python
| 0
|
@@ -1578,32 +1578,34 @@
args.allowlist)%0A
+#
sources_google_t
@@ -1621,32 +1621,34 @@
ig.read_config(%0A
+#
cc_by=False,
@@ -1733,16 +1733,18 @@
owlist)%0A
+#
google_s
@@ -1847,16 +1847,18 @@
aset'%5D%7D%0A
+#
google_m
@@ -5721,32 +5721,34 @@
by-nc data.')%0A%0A
+#
export_utils.exp
@@ -5839,24 +5839,26 @@
mobility'))%0A
+#
print('Done
|
49fafd2107719f0d0c588e85bb8c37a9d60a0845
|
Fix PEP8 and remove pdb
|
sponsorship_tracking/wizard/sub_sponsorship_wizard.py
|
sponsorship_tracking/wizard/sub_sponsorship_wizard.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, models, fields, exceptions, _
import pdb
class sub_sponsorship_wizard(models.TransientModel):
_name = "sds.subsponsorship.wizard"
state = fields.Selection([
('sub', 'sub'),
('no_sub', 'no_sub')])
child_id = fields.Many2one(
'compassion.child', 'Child')
channel = fields.Selection('_get_channels')
no_sub_default_reasons = fields.Selection(
'_get_no_sub_reasons', 'No sub reason')
no_sub_reason = fields.Char('No sub reason')
def _get_no_sub_reasons(self):
return [
('other_sponsorship', _('Sponsors other children')),
('financial', _('Financial reasons')),
('old', _('Is too old to sponsor another child')),
('other_support', _('Wants to support with fund donations')),
('other_organization', _('Supports another organization')),
('not_now', _("Doesn't want to take another child right now")),
('not_given', _('Not given')),
('other', _('Other...'))
]
def _get_channels(self):
"""Returns the available channel through the new sponsor
reached Compassion.
"""
return self.env['recurring.contract']._get_channels()
@api.multi
def create_subsponsorship(self):
""" Creates a subsponsorship. """
self.ensure_one()
child = self.child_id
if not child:
raise exceptions.Warning(
_("No child selected"),
_("Please select a child"))
sponsorship_id = self.env.context.get('active_id')
contract_obj = self.env['recurring.contract']
contract = contract_obj.browse(sponsorship_id)
origin_obj = self.env['recurring.contract.origin']
sub_origin_id = origin_obj.search([('type', '=', 'sub')], limit=1).id
pdb.set_trace()
sub_contract = contract.copy({
'parent_id': sponsorship_id,
'origin_id': sub_origin_id,
'channel': self.channel,
})
sub_contract.write({'child_id': child.id})
sub_contract.signal_workflow('contract_validated')
return True
@api.multi
def no_sub(self):
""" No SUB for the sponsorship. """
self.ensure_one()
sponsorship_id = self.env.context.get('active_id')
contract = self.env['recurring.contract'].browse(sponsorship_id)
default_reason = self.no_sub_default_reasons
reason = False
if default_reason == 'other':
reason = self.no_sub_reason
else:
reason = dict(self._get_no_sub_reasons()).get(default_reason)
contract.write({'no_sub_reason': reason})
contract.signal_workflow('no_sub')
return True
|
Python
| 0.000001
|
@@ -466,18 +466,8 @@
_%0D%0A
-import pdb
%0D%0A%0D%0A
@@ -1475,20 +1475,16 @@
%5D%0D%0A
-
%0D%0A de
@@ -2299,33 +2299,8 @@
id%0D%0A
- pdb.set_trace()%0D%0A
|
48f593bae26e1a587789a41aa82f9f984271bb4c
|
add check mode to dhcp_server
|
library/mt_dhcp_server.py
|
library/mt_dhcp_server.py
|
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
module: mt_dhcp_server.py
author:
- "Valentin Gurmeza"
version_added: "2.4"
short_description: Manage mikrotik dhcp-server endpoints
requirements:
- mt_api
description:
- Mikrotik dhcp-server generic module
options:
hostname:
description:
- hotstname of mikrotik router
required: True
username:
description:
- username used to connect to mikrotik router
required: True
password:
description:
- password used for authentication to mikrotik router
required: True
parameter:
description:
- sub endpoint for mikrotik tool
required: True
options:
- netwatch
- e-mail
settings:
description:
- All Mikrotik compatible parameters for this particular endpoint.
Any yes/no values must be enclosed in double quotes
state:
description:
- absent or present
'''
EXAMPLES = '''
- mt_dhcp_server:
hostname: "{{ inventory_hostname }}"
username: "{{ mt_user }}"
password: "{{ mt_pass }}"
parameter: network
settings:
address: 192.168.1.0/24
dns: 192.168.1.20
'''
from mt_common import clean_params, MikrotikIdempotent
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec = dict(
hostname = dict(required=True),
username = dict(required=True),
password = dict(required=True),
settings = dict(required=False, type='dict'),
parameter = dict(
required = True,
choices = ['network', 'option', 'dhcp-server'],
type = 'str'
),
state = dict(
required = False,
choices = ['present', 'absent'],
type = 'str'
),
)
)
idempotent_parameter = None
params = module.params
if params['parameter'] == 'network':
idempotent_parameter = 'address'
params['parameter'] = "dhcp-server/network"
if params['parameter'] == 'option':
idempotent_parameter = 'name'
params['parameter'] = "dhcp-server/option"
if params['parameter'] == 'dhcp-server':
idempotent_parameter = 'name'
mt_obj = MikrotikIdempotent(
hostname = params['hostname'],
username = params['username'],
password = params['password'],
state = params['state'],
desired_params = params['settings'],
idempotent_param = idempotent_parameter,
api_path = '/ip/' + str(params['parameter']),
)
mt_obj.sync_state()
if mt_obj.failed:
module.fail_json(
msg = mt_obj.failed_msg
)
elif mt_obj.changed:
module.exit_json(
failed=False,
changed=True,
msg=mt_obj.changed_msg,
diff={ "prepared": {
"old": mt_obj.old_params,
"new": mt_obj.new_params,
}},
)
else:
module.exit_json(
failed=False,
changed=False,
#msg='',
msg=params['settings'],
)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1871,24 +1871,54 @@
),%0A )
+,%0A supports_check_mode=True
%0A )%0A%0A
@@ -2687,16 +2687,62 @@
eter'%5D),
+%0A check_mode = module.check_mode,
%0A%0A )%0A
|
7cebbd615544dc165d6711833747bc978c3bd2d6
|
fix call
|
dihedral_mutinf.py
|
dihedral_mutinf.py
|
import numpy as np
import mdtraj as md
import argparse
import cPickle
import time
from multiprocessing import Pool
from itertools import combinations_with_replacement as combinations
from sklearn.metrics import mutual_info_score
from contextlib import closing
class timing(object):
"Context manager for printing performance"
def __init__(self, iter):
self.iter = iter
def __enter__(self):
self.start = time.time()
def __exit__(self, ty, val, tb):
end = time.time()
print("Round %s : %0.3f seconds" %
(self.iter, end-self.start))
return False
def rbins(n=30):
return np.linspace(-np.pi, np.pi, n+3)[1:-1]
def mi(X, Y, r=rbins()):
H = np.histogram2d(X, Y, [r, r])[0]
return mutual_info_score(None, None, contingency=H)
def dihedrals(traj):
kinds = [md.compute_phi,
md.compute_psi]
return [kind(traj)[1].T for kind in kinds]
class f(object):
def __class__(self, i):
return sum([mi(d[0][i[0]], d[1][i[1]])
for d in combinations(self.D, 2)])
def __init__(self, D):
self.D = D
def run(traj, iter, N):
D = dihedrals(traj)
n = D[0].shape[0]
R = []
for i in range(iter+1):
r = np.zeros((n, n))
g = f(D)
with timing(i):
with closing(Pool(processes=N)) as pool:
r[np.triu_indices(n)] = pool.map(g, combinations(range(n), 2))
pool.terminate()
r[np.triu_indices(n)[::-1]] = r[np.triu_indices(n)]
R.append(r)
[np.random.shuffle(d) for d in D]
return R[0] - np.mean(R[1:], axis=0)
def parse_cmdln():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--input', dest='traj',
help='File containing trajectory.')
parser.add_argument('-s', '--shuffle-iter', dest='iter',
help='Number of shuffle iterations.',
default=100, type=int)
parser.add_argument('-t', '--topology', dest='top',
help='File containing topology.', default=None)
parser.add_argument('-n', '--n-proc', dest='N',
help='Number of processors to be used.',
default=4, type=int)
parser.add_argument('-o', '--output', dest='out',
help='Name of output file.', default='mutinf.pkl')
args = parser.parse_args()
return args
if __name__ == "__main__":
options = parse_cmdln()
traj = md.load(options.traj, top=options.top)
M = run(traj, options.iter, options.N)
cPickle.dump(M, open(options.out, 'wb'))
|
Python
| 0.000001
|
@@ -958,20 +958,19 @@
def __c
-lass
+all
__(self,
|
c7a657f0ab2c39a66ed35056a8df976c913ea3c9
|
Allow SparseBasis to take a specific op basis
|
src/pysme/sparse_system_builder.py
|
src/pysme/sparse_system_builder.py
|
"""Construct integrators using sparse arrays.
"""
import numpy as np
import sparse
from sparse import COO
import pysme.gellmann as gm
def sparse_real(sparse_array):
"""`numpy.conjugate` returns a sparse array, but numpy.real does not, so
use this function to get a sparse real part.
"""
return (sparse_array + np.conj(sparse_array)) / 2
def sparse_imag(sparse_array):
"""`numpy.conjugate` returns a sparse array, but numpy.imag does not, so
use this function to get a sparse imaginary part.
"""
return (sparse_array - np.conj(sparse_array)) / 2.j
class SparseBasis:
def __init__(self, dim):
self.dim = dim
self.basis = COO.from_numpy(np.array(gm.get_basis(dim)))
self.sq_norms = COO.from_numpy(np.einsum('jmn,jnm->j',
self.basis.todense(),
self.basis.todense()))
sq_norms_inv = COO.from_numpy(1 / self.sq_norms.todense())
self.dual = self.basis * sq_norms_inv[:,None,None]
self.struct = sparse.tensordot(sparse.tensordot(self.basis, self.basis,
([2], [1])),
self.dual, ([1, 3], [2, 1]))
if type(self.struct) == np.ndarray:
# Sometimes sparse.tensordot returns numpy arrays. We want to force
# it to be sparse, since sparse.tensordot fails when passed two
# numpy arrays.
self.struct = COO.from_numpy(self.struct)
def vectorize(self, op):
sparse_op = COO.from_numpy(op)
result = sparse.tensordot(self.dual, sparse_op, ([1,2], [1,0]))
if type(result) == np.ndarray:
# I want the result stored in a sparse format even if it isn't
# sparse.
result = COO.from_numpy(result)
return result
def dualize(self, op):
return np.conj(self.vectorize(op)) * self.sq_norms
def matrize(self, vec):
"""Take a (sparse) vectorized operator and return it in matrix form.
"""
return sparse.tensordot(self.basis, vec, ([0], [0]))
def make_real_sand_matrix(self, x, y):
r"""Make the superoperator matrix representation of
N[X,Y](rho) = (1/2) ( X rho Y† + Y rho X† )
In the basis {Λ_j}, N[X,Y](rho) = N(x,y)_jk rho_k Λ_j where
N(x,y)_jk = Re[x_m (D_mlj + iF_mlj) (y*)_n (D_knl + iF_knl) ]
Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l
x and y are vectorized representations of the operators X and Y stored
in sparse format.
`sparse.tensordot` might decide to return something dense, so the user
should be aware of that.
"""
result_A = sparse.tensordot(x, self.struct, ([0], [0]))
result_B = sparse.tensordot(np.conj(y), self.struct, ([0], [1]))
# sparse.tensordot fails if both arguments are numpy ndarrays, so we
# force the intermediate arrays to be sparse
if type(result_B) == np.ndarray:
result_B = COO.from_numpy(result_B)
if type(result_A) == np.ndarray:
result_A = COO.from_numpy(result_A)
result = sparse_real(sparse.tensordot(result_A, result_B, ([0], [1])))
# We want our result to be dense, to make things predictable from the
# outside.
if type(result) == sparse.coo.COO:
result = result.todense()
return result.real
def make_real_comm_matrix(self, x, y):
r"""Make the superoperator matrix representation of
M[X,Y](rho) = (1/2) ( [X rho, Y†] + [Y, rho X†] )
In the basis {Λ_j}, M[X,Y](rho) = M(x,y)_jk rho_k Λ_j where
M(x,y)_jk = -2 Im[ (y*)_n F_lnj x_m (D_mkl + iF_mkl) ]
Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l
x and y are vectorized representations of the operators X and Y stored
in sparse format.
`sparse.tensordot` might decide to return something dense, so the user
should be aware of that.
"""
struct_imag = sparse_imag(self.struct)
# sparse.tensordot fails if both arguments are numpy ndarrays, so we
# force the intermediate arrays to be sparse
result_A = sparse.tensordot(np.conj(y), struct_imag, ([0], [1]))
result_B = sparse.tensordot(x, self.struct, ([0], [0]))
if type(result_B) == np.ndarray:
result_B = COO.from_numpy(result_B)
if type(result_A) == np.ndarray:
result_A = COO.from_numpy(result_A)
result = -2 * sparse_imag(sparse.tensordot(result_A, result_B,
([0], [1])))
# We want our result to be dense, to make things predictable from the
# outside.
if type(result) == sparse.coo.COO:
result = result.todense()
return result.real
def make_diff_op_matrix(self, x):
"""Make the superoperator matrix representation of
X rho X† - (1/2) ( X† X rho + rho X† X )
x is the vectorized representation of the operator X stored in sparse
format.
`sparse.tensordot` might decide to return something dense, so the user
should be aware of that.
"""
return self.make_real_comm_matrix(x, x)
def make_hamil_comm_matrix(self, h):
"""Make the superoperator matrix representation of
-i[H,rho]
h is the vectorized representation of the Hamiltonian H stored in sparse
format.
Returns a dense matrix.
"""
struct_imag = sparse_imag(self.struct)
result = 2 * sparse.tensordot(struct_imag, h, ([0], [0])).T
if type(result) == sparse.coo.COO:
result = result.todense()
return result.real
def make_wiener_linear_matrix(self, x):
Id_vec = self.vectorize(np.eye(self.dim))
return 2 * self.make_real_sand_matrix(x, Id_vec)
|
Python
| 0
|
@@ -629,33 +629,168 @@
dim
-):%0A self.dim = dim
+, basis=None):%0A if basis is None:%0A self.dim = dim%0A basis = gm.get_basis(dim)%0A else:%0A self.dim = basis%5B0%5D.shape%5B0%5D
%0A
@@ -831,33 +831,21 @@
p.array(
-gm.get_basis(dim)
+basis
))%0A
|
05916ed32ec9206baff2dd917440569bbae8c106
|
Update tutorials/future/tf2/cifar10_tutorial.py
|
tutorials/future/tf2/cifar10_tutorial.py
|
tutorials/future/tf2/cifar10_tutorial.py
|
import numpy as np
import math
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import app, flags
from easydict import EasyDict
from tensorflow.keras import Model
from tensorflow.keras.layers import AveragePooling2D, Dense, Flatten, Conv2D, MaxPool2D
from cleverhans.future.tf2.attacks import projected_gradient_descent, fast_gradient_method
FLAGS = flags.FLAGS
class CNN(Model):
def __init__(self, nb_filters=64):
super(CNN, self).__init__()
img_size = 32
log_resolution = int(round(math.log(img_size) / math.log(2)))
conv_args = dict(
activation=tf.nn.leaky_relu,
kernel_size=3,
padding='same')
self.layers_obj = []
for scale in range(log_resolution - 2):
conv1 = Conv2D(nb_filters << scale, **conv_args)
conv2 = Conv2D(nb_filters << (scale + 1), **conv_args)
pool = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))
self.layers_obj.append(conv1)
self.layers_obj.append(conv2)
self.layers_obj.append(pool)
conv = Conv2D(10, **conv_args)
self.layers_obj.append(conv)
def call(self, x):
for layer in self.layers_obj:
x = layer(x)
return tf.reduce_mean(x, [1, 2])
def ld_cifar10():
"""Load training and test data."""
def convert_types(image, label):
image = tf.cast(image, tf.float32)
image /= 127.5
image -= 1.
return image, label
dataset, info = tfds.load('cifar10',
with_info=True,
as_supervised=True)
mnist_train, mnist_test = dataset['train'], dataset['test']
mnist_train = mnist_train.map(convert_types).shuffle(10000).batch(128)
mnist_test = mnist_test.map(convert_types).batch(128)
return EasyDict(train=mnist_train, test=mnist_test)
def main(_):
# Load training and test data
data = ld_cifar10()
model = CNN()
loss_object = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.optimizers.Adam(learning_rate=0.001)
# Metrics to track the different accuracies.
train_loss = tf.metrics.Mean(name='train_loss')
test_acc_clean = tf.metrics.SparseCategoricalAccuracy()
test_acc_fgsm = tf.metrics.SparseCategoricalAccuracy()
test_acc_pgd = tf.metrics.SparseCategoricalAccuracy()
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
predictions = model(x)
loss = loss_object(y, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
# Train model with adversarial training
for epoch in range(FLAGS.nb_epochs):
# keras like display of progress
progress_bar_train = tf.keras.utils.Progbar(50000)
for (x, y) in data.train:
if FLAGS.adv_train:
# Replace clean example with adversarial example for adversarial training
x = projected_gradient_descent(model, x, FLAGS.eps, 0.01, 40, np.inf)
train_step(x, y)
progress_bar_train.add(x.shape[0], values=[('loss', train_loss.result())])
# Evaluate on clean and adversarial data
progress_bar_test = tf.keras.utils.Progbar(10000)
for x, y in data.test:
y_pred = model(x)
test_acc_clean(y, y_pred)
x_fgm = fast_gradient_method(model, x, FLAGS.eps, np.inf)
y_pred_fgm = model(x_fgm)
test_acc_fgsm(y, y_pred_fgm)
x_pgd = projected_gradient_descent(model, x, FLAGS.eps, 0.01, 40, np.inf)
y_pred_pgd = model(x_pgd)
test_acc_pgd(y, y_pred_pgd)
progress_bar_test.add(x.shape[0])
print('test acc on clean examples (%): {:.3f}'.format(test_acc_clean.result() * 100))
print('test acc on FGM adversarial examples (%): {:.3f}'.format(test_acc_fgsm.result() * 100))
print('test acc on PGD adversarial examples (%): {:.3f}'.format(test_acc_pgd.result() * 100))
if __name__ == '__main__':
flags.DEFINE_integer('nb_epochs', 200, 'Number of epochs.')
flags.DEFINE_float('eps', 0.05, 'Total epsilon for FGM and PGD attacks.')
flags.DEFINE_bool('adv_train', False, 'Use adversarial training (on PGD adversarial examples).')
app.run(main)
|
Python
| 0
|
@@ -1549,69 +1549,417 @@
ue)%0A
- mnist_train, mnist_test = dataset%5B'train'%5D, dataset%5B'test'%5D
+%0A def augment_mirror(x):%0A return tf.image.random_flip_left_right(x)%0A%0A def augment_shift(x, w=4):%0A y = tf.pad(x, %5B%5Bw%5D * 2, %5Bw%5D * 2, %5B0%5D * 2%5D, mode='REFLECT')%0A return tf.random_crop(y, tf.shape(x))%0A%0A mnist_train, mnist_test = dataset%5B'train'%5D, dataset%5B'test'%5D%0A # Augmentation helps a lot in CIFAR10%0A mnist_train = mnist_train.map(lambda x, y: (augment_mirror(augment_shift(x)), y))
%0A m
|
3a27568211c07cf614aa9865a2f08d2a9b9bfb71
|
Return errors in json only
|
dinosaurs/views.py
|
dinosaurs/views.py
|
import os
import json
import httplib as http
import tornado.web
import tornado.ioloop
from dinosaurs import api
from dinosaurs import settings
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
class DomainAPIHandler(tornado.web.RequestHandler):
def get(self):
self.write({
'availableDomains': settings.DOMAINS.keys()
})
class EmailAPIHandler(tornado.web.RequestHandler):
def post(self):
try:
req_json = json.loads(self.request.body)
except ValueError:
raise tornado.web.HTTPError(http.BAD_REQUEST)
email = req_json.get('email')
domain = req_json.get('domain')
connection = api.get_connection(domain)
if not email or not domain or not connection:
raise tornado.web.HTTPError(http.BAD_REQUEST)
ret, passwd = api.create_email(connection, email)
self.write({
'password': passwd,
'email': ret['login'],
'domain': ret['domain']
})
self.set_status(http.CREATED)
|
Python
| 0.000002
|
@@ -663,32 +663,186 @@
equestHandler):%0A
+ def write_error(self, status_code, **kwargs):%0A self.finish(%7B%0A %22code%22: status_code,%0A %22message%22: self._reason,%0A %7D)%0A%0A
def post(sel
@@ -1230,32 +1230,49 @@
p.BAD_REQUEST)%0A%0A
+ try:%0A
ret, pas
@@ -1312,16 +1312,202 @@
, email)
+%0A except api.YandexException as e:%0A if e.message != 'occupied':%0A raise%0A self.write(%7B%7D)%0A raise tornado.web.HTTPError(http.FORBIDDEN)
%0A%0A
|
be912178ca6a15a69f6479f71a9561a08bc3d301
|
use hostnames in cbcollectinfo archives
|
perfrunner/tests/__init__.py
|
perfrunner/tests/__init__.py
|
import exceptions as exc
import os
import shutil
import time
from logger import logger
from perfrunner.helpers.cbmonitor import CbAgent
from perfrunner.helpers.experiments import ExperimentHelper
from perfrunner.helpers.memcached import MemcachedHelper
from perfrunner.helpers.metrics import MetricHelper
from perfrunner.helpers.misc import log_phase, target_hash
from perfrunner.helpers.monitor import Monitor
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.helpers.reporter import Reporter
from perfrunner.helpers.rest import RestHelper
from perfrunner.helpers.worker import WorkerManager
from perfrunner.settings import TargetSettings
class TargetIterator(object):
def __init__(self, cluster_spec, test_config, prefix=None):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.prefix = prefix
def __iter__(self):
username, password = self.cluster_spec.rest_credentials
prefix = self.prefix
for master in self.cluster_spec.yield_masters():
for bucket in self.test_config.buckets:
if self.prefix is None:
prefix = target_hash(master.split(':')[0])
yield TargetSettings(master, bucket, username, password, prefix)
class PerfTest(object):
COLLECTORS = {}
def __init__(self, cluster_spec, test_config, experiment=None):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.target_iterator = TargetIterator(self.cluster_spec,
self.test_config)
self.memcached = MemcachedHelper(cluster_spec)
self.monitor = Monitor(cluster_spec)
self.rest = RestHelper(cluster_spec)
self.remote = RemoteHelper(cluster_spec)
if experiment:
self.experiment = ExperimentHelper(experiment,
cluster_spec, test_config)
self.master_node = cluster_spec.yield_masters().next()
self.build = self.rest.get_version(self.master_node)
self.cbagent = CbAgent(self)
self.metric_helper = MetricHelper(self)
self.reporter = Reporter(self)
self.reports = {}
self.snapshots = []
self.worker_manager = WorkerManager(cluster_spec, test_config)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.worker_manager.terminate(self.cluster_spec, self.test_config)
if exc_type != exc.KeyboardInterrupt:
self.debug()
for master in self.cluster_spec.yield_masters():
num_failovers = self.rest.get_failover_counter(master)
if num_failovers:
logger.interrupt(
'Failover happened {} time(s)'.format(num_failovers)
)
def compact_bucket(self):
for master in self.cluster_spec.yield_masters():
for bucket in self.test_config.buckets:
self.rest.trigger_bucket_compaction(master, bucket)
for master in self.cluster_spec.yield_masters():
self.monitor.monitor_task(master, 'bucket_compaction')
def wait_for_persistence(self):
for master in self.cluster_spec.yield_masters():
for bucket in self.test_config.buckets:
self.monitor.monitor_disk_queue(master, bucket)
self.monitor.monitor_tap_replication(master, bucket)
def load(self):
load_settings = self.test_config.load_settings
log_phase('load phase', load_settings)
self.worker_manager.run_workload(load_settings, self.target_iterator)
self.worker_manager.wait_for_workers()
def hot_load(self):
hot_load_settings = self.test_config.hot_load_settings
if '2.0.0' < self.build < '2.1.0':
log_phase('hot load phase', hot_load_settings)
self.worker_manager.run_workload(hot_load_settings,
self.target_iterator)
self.worker_manager.wait_for_workers()
hot_load_settings.seq_updates = False
log_phase('hot load phase', hot_load_settings)
self.worker_manager.run_workload(hot_load_settings,
self.target_iterator)
self.worker_manager.wait_for_workers()
def access(self):
access_settings = self.test_config.access_settings
log_phase('access phase', access_settings)
self.worker_manager.run_workload(access_settings, self.target_iterator)
self.worker_manager.wait_for_workers()
def access_bg(self):
access_settings = self.test_config.access_settings
log_phase('access in background', access_settings)
self.worker_manager.run_workload(access_settings, self.target_iterator,
timer=access_settings.time)
def access_bg_with_ddocs(self):
access_settings = self.test_config.access_settings
log_phase('access phase', access_settings)
self.worker_manager.run_workload(access_settings, self.target_iterator,
timer=access_settings.time,
ddocs=self.ddocs)
def timer(self):
access_settings = self.test_config.access_settings
logger.info('Running phase for {} seconds'.format(access_settings.time))
time.sleep(access_settings.time)
def debug(self):
self.remote.collect_info()
self.reporter.save_web_logs()
for root, _, files in os.walk('.'):
for f in files:
if f.endswith('.zip'):
shutil.move(os.path.join(root, f), '.')
|
Python
| 0
|
@@ -25,18 +25,20 @@
%0Aimport
-os
+glob
%0Aimport
@@ -5532,46 +5532,70 @@
-self.reporter.save_web_logs()%0A
+for hostname in self.cluster_spec.yield_hostnames():%0A
for
@@ -5594,167 +5594,160 @@
+
for
-root, _, files in os.walk('.'):%0A for f in files:%0A if f.endswith('.zip'):%0A shutil.move(os.path.join(root, f), '.'
+fname in glob.glob('%7B%7D/*.zip'.format(hostname)):%0A shutil.move(fname, '%7B%7D.zip'.format(hostname))%0A self.reporter.save_web_logs(
)%0A
|
c9f25b7fb983c3d635ab7f13f350a53422059a8c
|
Handle errors in reloaded code
|
cpp/pineal-run.py
|
cpp/pineal-run.py
|
#!/usr/bin/env python
from __future__ import print_function
import os
from time import sleep
from sys import argv
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import hy
from pineal.hy_utils import run_hy_code
def update_file(file_name, ns, history):
"Update running code, saving in the history"
print("Updating file") # TODO logging
with open(file_name) as f:
code = f.read()
history.append(code)
try:
run_hy_code(code, ns)
except e:
print(e)
history.pop() # TODO test and debug this
def watch_file(file_name, action, *args, **kwargs):
"Return a watchdog observer, it will call the action callback"
def on_modified(event):
"File-changed event"
print("File changed") # TODO logging
if event.src_path == file_name:
action(file_name, *args, **kwargs)
handler = FileSystemEventHandler()
handler.on_modified = on_modified
observer = Observer()
base_path = os.path.split(file_name)[0]
observer.schedule(handler, path=base_path)
observer.start()
return observer
def main(file_name):
"Main function"
ns = {} # namespace
history = [] # handle old versions of code
update_file(file_name, ns, history)
watcher = watch_file(file_name, update_file, ns, history)
try:
while True:
ns["loop"]()
sleep(1.0/120)
except KeyboardInterrupt:
watcher.stop()
watcher.join()
if __name__ == "__main__":
if argv[1:]:
main(argv[1])
else:
print("Usage: ", argv[0], "filename")
|
Python
| 0.000001
|
@@ -107,17 +107,31 @@
rt argv%0A
+import logging
%0A
-
from wat
@@ -265,16 +265,519 @@
y_code%0A%0A
+logger = logging.getLogger(%22pineal-run%22)%0Alogger.setLevel(logging.INFO)%0Alogger.addHandler(logging.StreamHandler())%0A%0A%0Adef run_code(ns, history):%0A %22Run last code in the history, if available%22%0A if history:%0A try:%0A run_hy_code(history%5B-1%5D, ns)%0A except Exception as e:%0A logger.info(%22Error evaluating code%22)%0A logger.error(e)%0A history.pop()%0A run_code(ns, history)%0A else:%0A logger.error(%22Empty history, there is no valid code%22)%0A%0A
%0Adef upd
@@ -863,21 +863,27 @@
ry%22%0A
-print
+logger.info
(%22Updati
@@ -891,32 +891,16 @@
g file%22)
- # TODO logging
%0A%0A wi
@@ -978,29 +978,16 @@
de)%0A
- try:%0A
run_
hy_c
@@ -986,106 +986,25 @@
run_
-hy_
code(
-code, ns)%0A except e:%0A print(e)%0A history.pop() # TODO test and debug this
+ns, history)
%0A%0A%0Ad
@@ -1191,45 +1191,42 @@
-print(%22File changed%22) # TODO logging
+logger.info(file_name, %22 changed%22)
%0A
@@ -1797,16 +1797,37 @@
e True:%0A
+ try:%0A
@@ -1843,16 +1843,151 @@
oop%22%5D()%0A
+ except Exception as e:%0A logger.error(e)%0A history.pop()%0A run_code(ns, history)%0A
|
ae241ec7ecc75e9b66c2f47a97d7786f984e2470
|
Fix prices query
|
db.py
|
db.py
|
import os
import psycopg2
import urlparse # import urllib.parse for python 3+
class model:
def __init__(self, uri):
parsed = urlparse.urlparse(uri)
username = parsed.username
password = parsed.password
database = parsed.path[1:]
hostname = parsed.hostname
self.conn = psycopg2.connect(
database = database,
user = username,
password = password,
host = hostname
)
def getNearestAirports(self, lat, lon):
cur = self.conn.cursor()
cur.execute("""
SELECT *
FROM
(SELECT airports.iata as iata,
airports.name as name,
airport_rankings.rank as rank,
St_distance(airport_spatial.location, St_makepoint(%(lat)s, %(lon)s)::geography)::int as distance
FROM airports
INNER JOIN airport_spatial
ON airports.id = airport_spatial.id
JOIN airport_rankings
ON airports.IATA = airport_rankings.IATA
WHERE airports.IATA != '' AND St_distance(airport_spatial.location, St_makepoint(%(lat)s, %(lon)s)::geography)::int < 300000
ORDER BY distance
LIMIT 5) as t1
UNION
(SELECT airports.iata as iata,
airports.name as name,
airport_rankings.rank as rank,
St_distance(airport_spatial.location, St_makepoint(%(lat)s, %(lon)s)::geography)::int as distance
FROM airports
INNER JOIN airport_spatial
ON airports.id = airport_spatial.id
LEFT JOIN airport_rankings
ON airports.IATA = airport_rankings.IATA
WHERE airports.IATA != ''
ORDER BY distance
LIMIT 5)
ORDER BY distance;
""", {'lat': lat, 'lon': lon})
rows = cur.fetchall()
res = []
for row in rows:
it = {}
it['IATA'] = row[0]
it['name'] = row[1]
it['ranking'] = row[2]
it['distance'] = row[3]
res.append(it)
return res
def get_airline_data(self, iata):
cur = self.conn.cursor()
cur.execute("""
SELECT name, alias, iata, icao, callsign, country, active
FROM airlines
WHERE iata = %(iata)s
LIMIT 1;
""", {'iata':iata})
rows = cur.fetchall()
row = map(lambda x: unicode(str(x), 'utf-8'), rows[0])
it = {}
it['name'] = row[0]
it['alias'] = row[1]
it['iata'] = row[2]
it['icao'] = row[3]
it['callsign'] = row[4]
it['country'] = row[5]
it['active'] = row[6]
return it
def get_airline_reviews(self, iata):
cur = self.conn.cursor()
cur.execute("""
SELECT content, helpful_percentage, rating, name, url
FROM flightdiary_airline_comments
WHERE airline_id = (SELECT id FROM flightdiary_airlines WHERE iata = %(iata)s LIMIT 1)
ORDER BY helpful_percentage DESC
LIMIT 10;
""", {'iata':iata})
rows = cur.fetchall()
res = []
for row in rows:
row = map(lambda x: unicode(str(x), 'utf-8'), row)
it = {}
it['content'] = row[0]
it['helpful_percentage'] = row[1]
it['rating'] = row[2]
it['name'] = row[3]
it['from'] = 'flightdiary'
it['url'] = row[4]
res.append(it)
return res
def getAirlinesCoveringAirports(self, iatas):
cur = self.conn.cursor()
cur.execute("""
SELECT airline, count(*) as num_routes, (count(*) * 1.0) / sum(count(*)) over() as p
FROM routes
INNER JOIN (
SELECT src, dest FROM routes_unique
WHERE src = ANY(%(iatas)s) OR dest = ANY(%(iatas)s)
) AS my_routes
ON (routes.src_airport = my_routes.src AND routes.dest_airport = my_routes.dest)
GROUP BY airline
ORDER BY p DESC;
""", {'iatas': iatas})
rows = cur.fetchall()
res = []
for row in rows:
it = {}
it['iata'] = row[0]
it['num_routes'] = row[1]
it['p'] = str(row[2])
res.append(it)
return res
def get_airport_locations(self, iatas):
cur = self.conn.cursor()
cur.execute("""
SELECT iata, latitude as lat, longitude as lon
FROM airports
WHERE iata IN (%s)
""" % ','.join(map(lambda x: "'"+x+"'", iatas))) # UNSAFE
locations = {}
for row in cur.fetchall():
locations[row[0]] = {'lat': row[1], 'lng': row[2]}
return locations
def getPricesCoveringAirports(self, iatas):
cur = self.conn.cursor()
# The last join condition is really long because skyscanner prices
cur.execute("""
SELECT DISTINCT prices.origin, prices.destination, prices.minprice, airlines.name
FROM routes
INNER JOIN (
SELECT src, dest FROM routes_unique
WHERE src = ANY(%(iatas)s) OR dest = ANY(%(iatas)s)
) AS my_routes
ON (routes.src_airport = my_routes.src AND routes.dest_airport = my_routes.dest)
INNER JOIN airlines
ON routes.airline = airlines.iata
INNER JOIN sky_open_join
ON sky_open_join.open_name = airlines.name
INNER JOIN prices
ON (
prices.origin = routes.src_airport
AND prices.destination = routes.dest_airport
AND prices.outboundcarrier = sky_open_join.sky_name)
OR (
prices.origin = routes.dest_airport
AND prices.destination = routes.src_airport
AND prices.inboundcarrier = sky_open_join.sky_name);
""", {'iatas': iatas})
rows = cur.fetchall()
res = []
for row in rows:
it = {}
it['origin'] = rows[0]
it['destination'] = rows[1]
it['minprice'] = str(rows[2])
it['airline'] = rows[3]
res.append(it)
return res
|
Python
| 0.999991
|
@@ -4491,16 +4491,262 @@
r prices
+ are for return tickets%0A # which may have different outbound and inbound carriers%0A # therefore we need to match both directions of a quote separately, and use DISTINCT to deduplicate%0A # when they happen to be the same carrier
%0A
|
f574e19b14ff861c45f6c66c64a2570bdb0e3a3c
|
Apply change of file name
|
crawl_comments.py
|
crawl_comments.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
main_crawl.py [--sqlite <sqlite>] [--csv <csv>]
Options:
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
# コマンドライン引数の取得
args = docopt(__doc__)
sqlite_path = args['--sqlite']
csv_path = args['--csv']
ncrawler = NicoCrawler()
ncrawler.connect_sqlite(sqlite_path)
url = 'http://ch.nicovideo.jp/2016winter_anime'
df = ncrawler.get_all_video_url_of_season(url)
ncrawler.initialize_csv_from_db(csv_path)
# # デイリーランキング1~300位の動画を取得する
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, csv_path, max_page=3)
# ncrawler.get_all_comments_of_csv(csv_path, max_n_iter=1)
|
Python
| 0.000001
|
@@ -103,18 +103,22 @@
-main_
crawl
+_comments
.py
|
3bc4fa33c3ec9272fed565260677518dcf5957fe
|
change version to 0.10.0.dev0
|
csaps/_version.py
|
csaps/_version.py
|
# -*- coding: utf-8 -*-
__version__ = '0.9.0'
|
Python
| 0.000006
|
@@ -39,9 +39,15 @@
'0.
-9.
+10.0.dev
0'%0A
|
214d4de14324e0097bad9bfb8b3b0fb009368abd
|
Add a docstring for EmptyInterval class
|
src/tempo/interval.py
|
src/tempo/interval.py
|
# coding=utf-8
"""Provides Interval class and EmptyInterval singleton value."""
class Interval(object):
"""
Interval(stop)
Interval(start, stop)
Represents an interval between two numbers.
Parameters
----------
start : decimal.Decimal or float or int
Start of the interval.
stop : decimal.Decimal or float or int
Inclusive stop of the interval.
Examples
--------
>>> interval = Interval(5)
>>> interval
... Interval(start=0.0, stop=5.0)
>>> 3 in interval
... True
>>> 10 in interval
... False
"""
__slots__ = ('start', 'stop')
def __init__(self, *args, **kwargs):
try:
self.stop = kwargs.get('stop', args[1])
self.start = kwargs.get('start', args[0])
except IndexError:
self.start = 0
self.stop = kwargs.get('start', args[0])
def __str__(self):
return ('Interval(start={start}, stop={stop})'
.format(start=repr(self.start),
stop=repr(self.stop)))
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash((self.start, self.stop))
def __contains__(self, item):
"""Containment test.
Parameters
----------
item : int or float or decimal.Decimal
Number to test.
Returns
-------
bool
"""
return self.start <= item <= self.stop
def __eq__(self, other):
return (self.start == other.start and
self.stop == other.stop)
def __gt__(self, other):
return self.start < other.start and self.stop > other.stop
def __ge__(self, other):
return self.start <= other.start and self.stop >= other.stop
def __lt__(self, other):
return self.start > other.start and self.stop < other.stop
def __le__(self, other):
return self.start >= other.start and self.stop <= other.stop
def isoverlap(self, other):
"""Is this interval overlaps with other one?"""
if other is EmptyInterval:
return False
return not (self.stop < other.start or self.start > other.stop)
def overlap(self, other):
"""Returns a new instance of Interval, that represents overlap
between this interval and a given one.
If intervals does not overlap, `EmptyInterval` is returned.
"""
if not self.isoverlap(other):
return EmptyInterval
elif self <= other:
return self.__class__(other.start, other.stop)
elif self > other:
return self.__class__(self.start, self.stop)
else:
_, start, stop, _ = sorted((self.start, self.stop,
other.start, other.stop))
return self.__class__(start, stop)
def combine(self, other):
"""If two intervals intersect, returns a new interval,
that cover space of both. Otherwise - returns `EmptyInterval`.
"""
if not self.isoverlap(other):
return EmptyInterval
start, _, _, stop = sorted((self.start, self.stop,
other.start, other.stop))
return self.__class__(start, stop)
class EmptyIntervalType(Interval):
__slots__ = ()
def __new__(cls, *args, **kwargs):
global EmptyInterval
if EmptyInterval is None:
return super(EmptyIntervalType, cls).__new__(cls, *args, **kwargs)
else:
raise TypeError('Can not create new instances.')
def __init__(self):
pass
def __str__(self):
return 'EmptyInterval'
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if isinstance(other, type(self)):
return True
else:
return False
def __hash__(self):
return 185453665
def __contains__(self, item):
return False
def __gt__(self, other):
return False
def __ge__(self, other):
return self.__eq__(other)
def __lt__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return True
def isoverlap(self, other):
return False
def overlap(self, other):
return self
def combine(self, other):
return other
EmptyInterval = None
EmptyInterval = EmptyIntervalType()
|
Python
| 0.000001
|
@@ -3310,16 +3310,68 @@
terval):
+%0A %22%22%22A class of EmptyInterval singleton value.%22%22%22
%0A%0A __
|
3bb9c0aacdfff372e41d7a8d4c43e71535bff930
|
Remove perf regression in not yet finished size estimation code
|
sdks/python/google/cloud/dataflow/worker/opcounters.py
|
sdks/python/google/cloud/dataflow/worker/opcounters.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counters collect the progress of the Worker for reporting to the service."""
from __future__ import absolute_import
from google.cloud.dataflow.utils.counters import Accumulator
from google.cloud.dataflow.utils.counters import Counter
class OperationCounters(object):
"""The set of basic counters to attach to an Operation."""
def __init__(self, counter_factory, step_name, coder, output_index):
self.element_counter = counter_factory.get_counter(
'%s-out%d-ElementCount' % (step_name, output_index), Counter.SUM)
self.mean_byte_counter = counter_factory.get_counter(
'%s-out%d-MeanByteCount' % (step_name, output_index), Counter.MEAN)
self.coder = coder
self._active_accumulators = []
def update_from(self, windowed_value, coder=None):
"""Add one value to this counter."""
self.element_counter.update(1)
byte_size_accumulator = Accumulator(self.mean_byte_counter.name)
self._active_accumulators.append(byte_size_accumulator)
# TODO(gildea):
# Actually compute the encoded size of this value.
# In spirit, something like this:
# if coder is None:
# coder = self.coder
# coder.store_estimated_size(windowed_value, byte_size_accumulator)
# but will need to do sampling.
def update_collect(self):
"""Collects the accumulated size estimates.
Now that the element has been processed, we ask our accumulator
for the total and store the result in a counter.
"""
for pending in self._active_accumulators:
self.mean_byte_counter.update(pending.total)
self._active_accumulators = []
def __str__(self):
return '<%s [%s]>' % (self.__class__.__name__,
', '.join([str(x) for x in self.__iter__()]))
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__,
[x for x in self.__iter__()], hex(id(self)))
|
Python
| 0.000001
|
@@ -716,69 +716,8 @@
rt%0A%0A
-from google.cloud.dataflow.utils.counters import Accumulator%0A
from
@@ -769,16 +769,16 @@
Counter%0A
+
%0A%0Aclass
@@ -1227,43 +1227,8 @@
oder
-%0A self._active_accumulators = %5B%5D
%0A%0A
@@ -1362,132 +1362,59 @@
-byte_size_accumulator = Accumulator(self.mean_byte_counter.name)%0A self._active_accumulators.append(byte_size_accumulator)
+# TODO(silviuc): Implement estimated size sampling.
%0A
@@ -1909,135 +1909,68 @@
-for pending in self._active_accumulators:%0A self.mean_byte_counter.update(pending.total)%0A self._active_accumulators = %5B%5D
+# TODO(silviuc): Implement estimated size sampling.%0A pass
%0A%0A
|
7a456cda2c39280f6facf273edb647badb78cc1e
|
Fix docstring
|
src/adhocracy_frontend/adhocracy_frontend/tests/acceptance/shared.py
|
src/adhocracy_frontend/adhocracy_frontend/tests/acceptance/shared.py
|
"""Shared acceptance test functions."""
from random import choice
from time import sleep
import requests
import json
from splinter.driver.webdriver import WebDriverElement
from adhocracy_core.testing import god_login
from adhocracy_core.testing import god_password
from adhocracy_core.testing import annotator_password
from adhocracy_core.testing import annotator_login
from selenium.common.exceptions import NoSuchElementException
# FIXME: root_uri must be constructed from etc/*.ini, not hard-coded here!
root_uri = 'http://localhost:6542'
verbose = False
ALPHABET = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
WHITESPACE = ' '
def get_random_string(n=10, whitespace=False) -> str:
"""Return autogenerated string."""
alphabet = ALPHABET + WHITESPACE if whitespace else ALPHABET
return ''.join(choice(alphabet) for i in range(n))
def wait(condition, step=0.1, max_steps=10) -> bool:
"""Wait for a condition to become true."""
for i in range(max_steps - 1):
try:
result = condition()
if hasattr(result, 'visible'):
if result.visible:
return True
else:
sleep(step)
else:
if result:
return True
else:
sleep(step)
except ValueError:
pass
except NoSuchElementException:
pass
return condition()
def login(browser, name_or_email, password,
expect_success=True,
visit_root=True):
"""Login user with name and password."""
if is_logged_in(browser):
return
login_url = browser.app_url + 'login'
browser.visit(login_url)
fill_input(browser, '.login [name="nameOrEmail"]', name_or_email)
fill_input(browser, '.login [name="password"]', password)
click_button(browser, '.login [type="submit"]')
if expect_success and not browser.wait_for_condition(is_logged_in, 20):
raise Exception('login failed.')
if visit_root:
browser.visit(browser.root_url)
def login_god(browser, **kwargs):
"""Login god user."""
login(browser, god_login, god_password, **kwargs)
def login_annotator(browser, **kwargs):
"""Login god user."""
login(browser, annotator_login, annotator_password, **kwargs)
def logout(browser):
"""Logout user."""
if is_logged_in(browser):
click_button(browser, '.user-indicator-logout')
browser.wait_for_condition(is_logged_out, 30)
def is_logged_in(browser):
"""Check if user is logged in."""
return browser.is_element_present_by_css('.user-indicator-logout')
def is_logged_out(browser):
"""Check if user is logged out."""
return browser.is_element_not_present_by_css(
'.user-indicator-logout')
def fill_input(browser, css_selector, value):
"""Find `css_selector` and fill value."""
element = browser.find_by_css(css_selector).first
element.fill(value)
def click_button(browser, css_selector):
"""Find `css_selector` and click."""
element = browser.find_by_css(css_selector).first
element.click()
def title_is_in_listing(listing, title: str) -> bool:
"""Check that a listing element with text == `title` exists."""
for element in listing.find_by_css('.listing-element'):
wait(lambda: element.text, max_steps=5)
if element.text == title:
return True
def get_listing_create_form(listing) -> WebDriverElement:
"""Open and return the create form of a listing."""
return listing.find_by_css('.listing-create-form').first
def get_column_listing(browser, column_name: str) -> WebDriverElement:
"""Return the listing in the content column ."""
column = browser.find_by_css('.moving-column-' + column_name)
listing = column.first.find_by_css('.listing')
return listing
def get_list_element(listing, text, descendant=None, max_steps=20):
"""Return list element with text == `text`."""
for element in listing.find_by_css('.listing-element'):
wait(lambda: element.text, max_steps=max_steps)
if descendant is None:
element_text = element.text
else:
element_text = element.find_by_css(descendant).first.text
if element_text == text:
return element
def api_login(name_or_email: str, password: str) -> dict:
"""Login user and return user token and path."""
uri = root_uri + '/login_username'
headers = {
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip,deflate',
'Connection': 'keep-alive',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64)',
'Content-Length': '36'
}
body = json.dumps({
'name': name_or_email,
'password': password
})
response = requests.post(uri, headers=headers, data=body)
if verbose:
print('\n')
print(uri)
print(headers)
print(body)
print(response)
print(response.text)
assert response.status_code == 200
data = response.json()
assert data['status'] == 'success'
return {'user_token': data['user_token'],
'user_path': data['user_path']}
def api_login_god() -> dict:
"""Login in as god and return user token and path."""
return api_login(god_login, god_password)
|
Python
| 0.000002
|
@@ -2254,35 +2254,41 @@
):%0A %22%22%22Login
-god
+annotator
user.%22%22%22%0A lo
|
6c57985a4f214978af59479ade330d829a02f457
|
Add Event.tags
|
src/sentry/models/event.py
|
src/sentry/models/event.py
|
"""
sentry.models.event
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import warnings
from django.db import models
from django.utils import timezone
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import (
Model, NodeField, BoundedIntegerField, BoundedPositiveIntegerField,
BaseManager, sane_repr
)
from sentry.interfaces.base import get_interface
from sentry.utils.cache import memoize
from sentry.utils.safe import safe_execute
from sentry.utils.strings import truncatechars, strip
class Event(Model):
"""
An individual event.
"""
group = models.ForeignKey('sentry.Group', blank=True, null=True, related_name="event_set")
event_id = models.CharField(max_length=32, null=True, db_column="message_id")
project = models.ForeignKey('sentry.Project', null=True)
message = models.TextField()
checksum = models.CharField(max_length=32, db_index=True)
num_comments = BoundedPositiveIntegerField(default=0, null=True)
platform = models.CharField(max_length=64, null=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
time_spent = BoundedIntegerField(null=True)
data = NodeField(blank=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_message'
verbose_name = _('message')
verbose_name_plural = _('messages')
unique_together = (('project', 'event_id'),)
index_together = (('group', 'datetime'),)
__repr__ = sane_repr('project_id', 'group_id', 'checksum')
def error(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 100)
return message
error.short_description = _('error')
def has_two_part_message(self):
message = strip(self.message)
return '\n' in message or len(message) > 100
@property
def message_short(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 100)
return message
@property
def team(self):
return self.project.team
@property
def organization(self):
return self.project.organization
@property
def version(self):
return self.data.get('version', '5')
@memoize
def ip_address(self):
http_data = self.data.get('sentry.interfaces.Http')
if http_data and 'env' in http_data:
value = http_data['env'].get('REMOTE_ADDR')
if value:
return value
user_data = self.data.get('sentry.interfaces.User')
if user_data:
value = user_data.get('ip_address')
if value:
return value
return None
@memoize
def user_ident(self):
"""
The identifier from a user is considered from several interfaces.
In order:
- User.id
- User.email
- User.username
- Http.env.REMOTE_ADDR
"""
user_data = self.data.get('sentry.interfaces.User', self.data.get('user'))
if user_data:
ident = user_data.get('id')
if ident:
return 'id:%s' % (ident,)
ident = user_data.get('email')
if ident:
return 'email:%s' % (ident,)
ident = user_data.get('username')
if ident:
return 'username:%s' % (ident,)
ident = self.ip_address
if ident:
return 'ip:%s' % (ident,)
return None
@memoize
def interfaces(self):
result = []
for key, data in self.data.iteritems():
try:
cls = get_interface(key)
except ValueError:
continue
value = safe_execute(cls.to_python, data)
if not value:
continue
result.append((key, value))
return SortedDict((k, v) for k, v in sorted(result, key=lambda x: x[1].get_score(), reverse=True))
def get_tags(self, with_internal=True):
try:
return [
(t, v) for t, v in self.data.get('tags') or ()
if with_internal or not t.startswith('sentry:')
]
except ValueError:
# at one point Sentry allowed invalid tag sets such as (foo, bar)
# vs ((tag, foo), (tag, bar))
return []
def as_dict(self):
# We use a SortedDict to keep elements ordered for a potential JSON serializer
data = SortedDict()
data['id'] = self.event_id
data['culprit'] = self.group.culprit
data['message'] = self.message
data['checksum'] = self.checksum
data['project'] = self.project.slug
data['datetime'] = self.datetime
data['time_spent'] = self.time_spent
for k, v in sorted(self.data.iteritems()):
data[k] = v
return data
@property
def size(self):
data_len = len(self.message)
for value in self.data.itervalues():
data_len += len(repr(value))
return data_len
# XXX(dcramer): compatibility with plugins
def get_level_display(self):
warnings.warn('Event.get_level_display is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.group.get_level_display()
@property
def level(self):
warnings.warn('Event.level is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.group.level
@property
def logger(self):
warnings.warn('Event.logger is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.tags.get('logger')
@property
def site(self):
warnings.warn('Event.site is deprecated. Use Event.tags instead.',
DeprecationWarning)
return self.tags.get('site')
@property
def server_name(self):
warnings.warn('Event.server_name is deprecated. Use Event.tags instead.')
return self.tags.get('server_name')
@property
def culprit(self):
warnings.warn('Event.culprit is deprecated. Use Event.tags instead.')
return self.tags.get('culprit')
|
Python
| 0.000001
|
@@ -4764,16 +4764,47 @@
urn %5B%5D%0A%0A
+ tags = property(get_tags)%0A%0A
def
|
4920391c4e6d690264ebc0bb829ad9b9a374917d
|
math is hard
|
services/extract-entities/entityextractor/aggregate.py
|
services/extract-entities/entityextractor/aggregate.py
|
import logging
from banal import ensure_list
from collections import Counter
from alephclient.services.entityextract_pb2 import ExtractedEntity
from entityextractor.extract import extract_polyglot, extract_spacy
from entityextractor.patterns import extract_patterns
from entityextractor.cluster import Cluster
log = logging.getLogger(__name__)
class EntityAggregator(object):
MAX_COUNTRIES = 3
CUTOFF = 0.01
def __init__(self):
self.clusters = []
self._countries = Counter()
self.record = 0
def extract(self, text, languages):
self.record += 1
for result in extract_polyglot(self, text, languages):
self.add(result)
for result in extract_spacy(self, text, languages):
self.add(result)
for result in extract_patterns(self, text):
self.add(result)
def add(self, result):
countries = [c.lower() for c in ensure_list(result.countries)]
self._countries.update(countries)
if not result.valid:
return
# TODO: make a hash?
for cluster in self.clusters:
if cluster.match(result):
return cluster.add(result)
self.clusters.append(Cluster(result))
@property
def countries(self):
cs = self._countries.most_common(n=self.MAX_COUNTRIES)
return [c for (c, n) in cs]
@property
def entities(self):
total_weight = sum([c.weight for c in self.clusters if c.strict])
for cluster in self.clusters:
# only using locations for country detection at the moment:
if cluster.category == ExtractedEntity.LOCATION:
continue
# skip entities that do not meet a threshold of relevance:
if not cluster.strict:
if (cluster.weight / total_weight) < self.CUTOFF:
continue
# log.info('%s: %s: %s', group.label, group.category, group.weight)
yield cluster.label, cluster.category, cluster.weight
for (country, weight) in self._countries.items():
yield country, ExtractedEntity.COUNTRY, weight
def __len__(self):
return len(self.clusters)
|
Python
| 0.998297
|
@@ -1478,16 +1478,20 @@
ters if
+not
c.strict
@@ -1493,16 +1493,67 @@
trict%5D)%0A
+ total_weight = float(max(1, total_weight))%0A
|
aab7c01c94088594258e33e3074f76d8735b8c2e
|
Add default config and config schema
|
mopidy/frontends/mpd/__init__.py
|
mopidy/frontends/mpd/__init__.py
|
from __future__ import unicode_literals
import mopidy
from mopidy import ext
__doc__ = """The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
The frontend is enabled by default.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication are not supported
- Stickers are not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
"""
class Extension(ext.Extension):
name = 'Mopidy-MPD'
version = mopidy.__version__
def get_default_config(self):
return '[ext.mpd]'
def validate_config(self, config):
pass
def validate_environment(self):
pass
def get_frontend_classes(self):
from .actor import MpdFrontend
return [MpdFrontend]
|
Python
| 0
|
@@ -71,16 +71,804 @@
ort ext%0A
+from mopidy.utils import config, formatting%0A%0A%0Adefault_config = %22%22%22%0A%5Bext.mpd%5D%0A%0A# If the MPD extension should be enabled or not%0Aenabled = true%0A%0A# Which address the MPD server should bind to%0A#%0A# 127.0.0.1%0A# Listens only on the IPv4 loopback interface%0A# ::1%0A# Listens only on the IPv6 loopback interface%0A# 0.0.0.0%0A# Listens on all IPv4 interfaces%0A# ::%0A# Listens on all interfaces, both IPv4 and IPv6%0Ahostname = 127.0.0.1%0A%0A# Which TCP port the MPD server should listen to%0Aport = 6600%0A%0A# The password required for connecting to the MPD server%0Apassword =%0A%0A# The maximum number of concurrent connections the MPD server will accept%0Amax_connections = 20%0A%0A# Number of seconds an MPD client can stay inactive before the connection is%0A# closed by the server%0Aconnection_timeout = 60%0A%22%22%22
%0A%0A__doc_
@@ -1113,154 +1113,58 @@
%0A%0A**
-Settings:**%0A%0A- :attr:%60mopidy.settings.MPD_SERVER_HOSTNAME%60%0A- :attr:%60mopidy.settings.MPD_SERVER_PORT%60%0A- :attr:%60mopidy.settings.MPD_SERVER_PASSWORD%60
+Default config:**%0A%0A.. code-block:: ini%0A%0A%25(config)s
%0A%0A**
@@ -2050,16 +2050,64 @@
rted%0A%22%22%22
+ %25 %7B'config': formatting.indent(default_config)%7D
%0A%0A%0Aclass
@@ -2245,72 +2245,410 @@
urn
-'%5Bext.mpd%5D'%0A%0A def validate_config(self, config):%0A pass
+default_config%0A%0A def get_config_schema(self):%0A schema = config.ExtensionConfigSchema()%0A schema%5B'hostname'%5D = config.Hostname()%0A schema%5B'port'%5D = config.Port()%0A schema%5B'password'%5D = config.String(optional=True, secret=True)%0A schema%5B'max_connections'%5D = config.Integer(minimum=1)%0A schema%5B'connection_timeout'%5D = config.Integer(minimum=1)%0A return schema
%0A%0A
|
43653246bfdcf78e76bb41846fbf80ac2e5dc0f2
|
Use declared_attr for ColorMixin columns
|
indico/core/db/sqlalchemy/colors.py
|
indico/core/db/sqlalchemy/colors.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import string
from collections import namedtuple
from indico.core.db import db
class ColorTuple(namedtuple('ColorTuple', ('text', 'background'))):
"""A tuple that contains text and background color.
Both colors are unified to 'rrggbb' notation (in case 'rgb' is
passed) and leading ``#`` is stripped.
When a text/background color is specifie, the other color needs
to be specified too. If no color is specified, the ColorTuple
is falsy.
"""
def __new__(cls, text, background):
colors = [text, background]
for i, color in enumerate(colors):
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
color = ''.join(x * 2 for x in color)
colors[i] = color.lower()
if any(colors):
if not all(colors):
raise ValueError('Both colors must be specified')
if not all(len(x) == 6 for x in colors):
raise ValueError('Colors must be be `rgb` or `rrggbb`')
if not all(c in string.hexdigits for color in colors for c in color):
raise ValueError('Colors must only use hex digits')
return super(ColorTuple, cls).__new__(cls, *colors)
def __nonzero__(self):
return all(self)
class ColorMixin(object):
"""Mixin to store text+background colors in a model.
For convenience (e.g. for WTForms integrations when selecting both
colors at the same time from a palette or in a compound field) it
provides a `colors` property which returns/accepts a `ColorTuple`
holding text color and background color.
"""
text_color = db.Column(
db.String,
nullable=False,
default=''
)
background_color = db.Column(
db.String,
nullable=False,
default=''
)
@property
def colors(self):
return ColorTuple(self.text_color, self.background_color)
@colors.setter
def colors(self, value):
if value is None:
value = '', ''
self.text_color, self.background_color = value
|
Python
| 0
|
@@ -776,17 +776,16 @@
string%0A
-%0A
from col
@@ -812,16 +812,70 @@
dtuple%0A%0A
+from sqlalchemy.ext.declarative import declared_attr%0A%0A
from ind
@@ -2476,20 +2476,62 @@
-text_color =
+@declared_attr%0A def text_color(cls):%0A return
db.
@@ -2538,32 +2538,36 @@
Column(%0A
+
+
db.String,%0A
@@ -2553,32 +2553,36 @@
db.String,%0A
+
nullable
@@ -2589,32 +2589,36 @@
=False,%0A
+
+
default=''%0A )
@@ -2620,32 +2620,79 @@
-)%0A background_color =
+ )%0A%0A @declared_attr%0A def background_color(cls):%0A return
db.
@@ -2699,32 +2699,36 @@
Column(%0A
+
+
db.String,%0A
@@ -2714,32 +2714,36 @@
db.String,%0A
+
nullable
@@ -2750,32 +2750,36 @@
=False,%0A
+
+
default=''%0A )
@@ -2769,24 +2769,28 @@
default=''%0A
+
)%0A%0A @
|
e92a177124400c932e58e3980371837b82d64e20
|
Clean up
|
maxfilter_from_db.py
|
maxfilter_from_db.py
|
"""
Doc string here.
@author: cjb
@author: mje
last edited: Wed Sep 9 2015
"""
from __future__ import print_function
import os
import sys
import subprocess
# import subprocess
# import numpy as np
# from mne.preprocessing.maxfilter import apply_maxfilter
CLOBBER = False
FAKE = False
VERBOSE = True
# ENH: install "official" version of stormdb on isis/hyades
path_to_stormdb = '/usr/local/common/meeg-cfin/stormdb'
sys.path.append(path_to_stormdb)
from stormdb.access import Query
from stormdb.process import Maxfilter
# MAXFILTER PARAMS #
mf_params = dict(origin='0 0 40',
frame='head',
autobad="on",
st=True,
st_buflen=30,
st_corr=0.95,
movecomp=True,
# cal=cal,
# ctc=ctc,
mx_args='',
maxfilter_bin='maxfilter',
force=True
)
# path to submit_to_isis
cmd = "/usr/local/common/meeg-cfin/configurations/bin/submit_to_isis"
proj_code = "MINDLAB2015_MEG-CorticalAlphaAttention"
db = Query(proj_code)
proj_folder = os.path.join('/projects', proj_code)
scratch_folder = os.path.join(proj_folder, 'scratch/maxfiltered/')
subjects_dir = os.path.join(scratch_folder, 'fs_subjects_dir')
script_dir = proj_folder + '/scripts/'
included_subjects = db.get_subjects()
# just test with first one!
included_subjects = [included_subjects[3]]
for sub in included_subjects:
# this is an example of getting the DICOM files as a list
# sequence_name='t1_mprage_3D_sag'
MEG_study = db.get_studies(sub, modality='MEG')
if len(MEG_study) > 0:
# This is a 2D list with [series_name, series_number]
series = db.get_series(sub, MEG_study[0], 'MEG')
series = series["data"] # only use the "data" series for now.
# Change this to be more elegant: check whether any item in series
# matches sequence_name
in_name = db.get_files(sub, MEG_study[0], 'MEG', series)
out_name = "%s_%s_mc_raw_tsss.fif" % (sub[:4], "data")
# print(out_name)
# if len(in_name) > 1:
for j, in_file in enumerate(in_name):
if j == 0:
out_fname = scratch_folder + out_name
else:
out_fname = scratch_folder\
+ out_name[:-4] + "-%d.fif" % j
# print(out_fname)
tsss_mc_log = out_fname[:-3] + "log"
headpos_log = out_fname[:-4] + "_hp.log"
# print(tsss_mc_log)
# print(headpos_log)
mf_params["logfile"] = tsss_mc_log
mf_params["mv_hp"] = headpos_log
mf=Maxfilter(proj_code)
mf.build_maxfilter_cmd(in_file, out_fname, **mf_params)
print(mf.cmd)
subprocess.call([cmd, "2", mf.cmd])
# apply_maxfilter(in_fname=in_file,
# out_fname=out_fname,
# frame='head',
# # origin= "0 0 40",
# autobad="on",
# st=True,
# st_buflen=30,
# st_corr=0.95,
# mv_comp=True,
# mv_hp=headpos_log,
# # cal=cal,
# # ctc=ctc,
# overwrite=True,
# mx_args=' -v > %s' % tsss_mc_log)
|
Python
| 0.000002
|
@@ -1029,19 +1029,17 @@
to_isis%22
-
%0A
+
proj_cod
@@ -1220,16 +1220,21 @@
filtered
+_data
/')%0A%0Asub
@@ -1419,17 +1419,16 @@
jects =
-%5B
included
@@ -1438,17 +1438,17 @@
bjects%5B3
-%5D
+:
%5D%0A%0Afor s
@@ -2070,24 +2070,25 @@
%22data%22)%0A
-#
+
+#
print(o
@@ -2096,20 +2096,16 @@
t_name)%0A
-
%0A
@@ -2373,22 +2373,17 @@
if%22 %25 j%0A
- %0A#
+%0A
@@ -2381,24 +2381,26 @@
%0A
+ #
print(out_f
@@ -2507,22 +2507,17 @@
hp.log%22%0A
- %0A#
+%0A
@@ -2515,24 +2515,26 @@
%0A
+ #
print(tsss_
@@ -2541,17 +2541,17 @@
mc_log)%0A
-#
+
@@ -2549,24 +2549,25 @@
+#
print(headp
@@ -2574,20 +2574,16 @@
os_log)%0A
-
%0A
@@ -2681,17 +2681,19 @@
mf
-=
+ =
Maxfilte
@@ -2773,29 +2773,17 @@
params)%0A
-
%0A
+
@@ -2848,28 +2848,16 @@
f.cmd%5D)%0A
-
%0A#
|
2b50fd475829aa25889b49da4d4a2dcdcece9893
|
Remove unused imports.
|
src/Products/UserAndGroupSelectionWidget/at/widget.py
|
src/Products/UserAndGroupSelectionWidget/at/widget.py
|
import types
from zope.component import ComponentLookupError
from Globals import InitializeClass
from AccessControl import ClassSecurityInfo
from Products.Archetypes.Widget import TypesWidget
from Products.Archetypes.Registry import registerWidget
from Products.Archetypes.utils import shasattr
from Products.UserAndGroupSelectionWidget.interfaces import IGenericGroupTranslation
class UserAndGroupSelectionWidget(TypesWidget):
_properties = TypesWidget._properties.copy()
_properties.update({
'macro' : "userandgroupselect",
'helper_js' : ('userandgroupselect.js',),
'size' : 7, # size of form-element taking the users
'groupName' : '', # takes the given group as default,
# a group id
'usersOnly' : False, # only allow user selection
'groupsOnly' : False, # allow only group selection
'groupIdFilter' : '*', # allow all groups
'searchableProperties' : (), # which properties you want to search as well
# eg. ('email', 'fullname', 'location')
})
security = ClassSecurityInfo()
security.declarePublic('process_form')
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=None,):
"""process the form data and return it."""
result = TypesWidget.process_form (self, instance, field, form,
empty_marker, emptyReturnsMarker, )
if result is empty_marker:
return result
value, kwargs = result
# The widget always returns a empty item (strange) when we use the
# multival option.
# Remove the empty items manually
if type(value) is types.ListType:
value = [item for item in value if item]
return value, kwargs
security.declarePublic('getGroupId')
def getGroupId(self, instance):
groupid = self.groupName
try:
translator = IGenericGroupTranslation(instance)
except ComponentLookupError:
pass
except TypeError, e:
if e[0] == 'Could not adapt':
pass
else:
raise
else:
groupid = translator.translateToRealGroupId(self.groupName)
return groupid
registerWidget(
UserAndGroupSelectionWidget,
title='User and Group Selection Widget',
description=('You can select users searched from a popup window.'),
used_for=('Products.Archetypes.Field.LinesField',
'Products.Archetypes.Field.StringField', ))
|
Python
| 0
|
@@ -6,17 +6,16 @@
t types%0A
-%0A
from zop
@@ -57,45 +57,8 @@
rror
-%0A%0Afrom Globals import InitializeClass
%0Afro
@@ -209,56 +209,8 @@
get%0A
-from Products.Archetypes.utils import shasattr%0A%0A
from
@@ -291,17 +291,16 @@
lation%0A%0A
-%0A
class Us
@@ -2643,8 +2643,9 @@
ld', ))%0A
+%0A
|
317926c18ac2e139d2018acd767d10b4f53428f3
|
Remove unneeded post method from CreateEnvProfile view
|
installer/installer_config/views.py
|
installer/installer_config/views.py
|
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.views.generic import CreateView, UpdateView, DeleteView
from installer_config.models import EnvironmentProfile, UserChoice, Step
from installer_config.forms import EnvironmentForm
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
class CreateEnvironmentProfile(CreateView):
model = EnvironmentProfile
template_name = 'env_profile_form.html'
form_class = EnvironmentForm
success_url = '/profile'
def form_valid(self, form):
form.instance.user = self.request.user
return super(CreateEnvironmentProfile, self).form_valid(form)
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = form_class(request.POST)
if form.is_valid():
config_profile = form.save(commit=False)
config_profile.user = request.user
config_profile.save()
return HttpResponseRedirect(reverse('profile:profile'))
return self.render_to_response({'form': form})
class UpdateEnvironmentProfile(UpdateView):
model = EnvironmentProfile
context_object_name = 'profile'
template_name = 'env_profile_form.html'
form_class = EnvironmentForm
success_url = '/profile'
class DeleteEnvironmentProfile(DeleteView):
model = EnvironmentProfile
success_url = '/profile'
def download_profile_view(request, **kwargs):
choices = UserChoice.objects.filter(profiles=kwargs['pk']).all()
# import pdb; pdb.set_trace()
response = render_to_response('installer_template.py', {'choices': choices},
content_type='application')
response['Content-Disposition'] = 'attachment; filename=something.py'
return response
|
Python
| 0
|
@@ -318,53 +318,9 @@
rse%0A
-from django.http import HttpResponseRedirect
%0A
+
%0Acla
@@ -653,423 +653,8 @@
)%0A%0A%0A
- def post(self, request, *args, **kwargs):%0A form_class = self.get_form_class()%0A form = form_class(request.POST)%0A if form.is_valid():%0A config_profile = form.save(commit=False)%0A config_profile.user = request.user%0A config_profile.save()%0A return HttpResponseRedirect(reverse('profile:profile'))%0A return self.render_to_response(%7B'form': form%7D)%0A%0A
clas
@@ -1093,42 +1093,8 @@
l()%0A
- # import pdb; pdb.set_trace()%0A
@@ -1170,16 +1170,42 @@
oices%7D,%0A
+
|
c24dbc2d4d8b59a62a68f326edb350b3c633ea25
|
Change the comment of InterleavingMethod.evaluate
|
interleaving/interleaving_method.py
|
interleaving/interleaving_method.py
|
class InterleavingMethod(object):
'''
Interleaving
'''
def interleave(self, k, a, b):
'''
k: the maximum length of resultant interleaving
a: a list of document IDs
b: a list of document IDs
Return an instance of Ranking
'''
raise NotImplementedError()
def multileave(self, k, *lists):
'''
k: the maximum length of resultant multileaving
*lists: lists of document IDs
Return an instance of Ranking
'''
raise NotImplementedError()
def evaluate(self, ranking, clicks):
'''
ranking: an instance of Ranking generated by Balanced.interleave
clicks: a list of indices clicked by a user
Return one of the following tuples:
- (1, 0): Ranking 'a' won
- (0, 1): Ranking 'b' won
- (0, 0): Tie
'''
raise NotImplementedError()
|
Python
| 0.000001
|
@@ -748,36 +748,41 @@
urn
-one of the following tupl
+a list of pairs of ranker indic
es
-:
%0A
@@ -790,89 +790,186 @@
-- (1, 0): Ranking 'a' won%0A - (0, 1): Ranking 'b' won%0A - (0, 0): Tie
+in which element (i, j) indicates i won j.%0A%0A e.g. a result %5B(1, 0), (2, 1), (2, 0)%5D indicates%0A ranker 1 won ranker 0, and ranker 2 won ranker 0 as well as ranker 1.
%0A
|
0e36a18ced74b3d7e1195994702305c26ac57dfa
|
Change gift and link it to correspondent
|
sponsorship_compassion/wizards/generate_gift_wizard.py
|
sponsorship_compassion/wizards/generate_gift_wizard.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, fields, models, _
from dateutil.relativedelta import relativedelta
from ..models.product import GIFT_NAMES
import logging
logger = logging.getLogger(__name__)
class GenerateGiftWizard(models.TransientModel):
""" This wizard generates a Gift Invoice for a given contract. """
_name = 'generate.gift.wizard'
amount = fields.Float("Gift Amount", required=True)
product_id = fields.Many2one(
'product.product', "Gift Type", required=True)
invoice_date = fields.Date(default=fields.Date.today())
description = fields.Char("Additional comments", size=200)
force = fields.Boolean(
'Force creation', help="Creates the gift even if one was already "
"made the same year.")
@api.multi
def generate_invoice(self):
# Read data in english
self.ensure_one()
self = self.with_context(lang='en_US')
if not self.description:
self.description = self.product_id.display_name
invoice_ids = list()
gen_states = self.env['recurring.contract.group']._get_gen_states()
# Ids of contracts are stored in context
for contract in self.env['recurring.contract'].browse(
self.env.context.get('active_ids', list())).filtered(
lambda c: 'S' in c.type and c.state in gen_states
):
if self.product_id.name == GIFT_NAMES[0]:
# Birthday Gift
if not contract.child_id.birthdate:
logger.error(
'The birthdate of the child is missing!')
continue
invoice_date = self.compute_date_birthday_invoice(
contract.child_id.birthdate,
self.invoice_date)
begin_year = fields.Date.from_string(
self.invoice_date).replace(month=1, day=1)
end_year = begin_year.replace(month=12, day=31)
# If a gift was already made for the year, abort
invoice_line_ids = self.env[
'account.invoice.line'].search([
('product_id', '=', self.product_id.id),
('due_date', '>=', fields.Date.to_string(
begin_year)),
('due_date', '<=', fields.Date.to_string(
end_year)),
('contract_id', '=', contract.id),
('state', '!=', 'cancel')])
if invoice_line_ids and not self.force:
continue
else:
invoice_date = self.invoice_date
inv_data = self._setup_invoice(contract, invoice_date)
invoice = self.env['account.invoice'].create(inv_data)
# Commit at each invoice creation. This does not break
# the state
self.env.cr.commit() # pylint: disable=invalid-commit
invoice_ids.append(invoice.id)
return {
'name': _('Generated Invoices'),
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'account.invoice',
'domain': [('id', 'in', invoice_ids)],
'context': {'form_view_ref': 'account.invoice_form'},
'type': 'ir.actions.act_window',
}
@api.multi
def _setup_invoice(self, contract, invoice_date):
journal_id = self.env['account.journal'].search([
('type', '=', 'sale'),
('company_id', '=', 1)], limit=1).id
return {
'type': 'out_invoice',
'partner_id': contract.partner_id.id,
'journal_id': journal_id,
'date_invoice': invoice_date,
'payment_mode_id': contract.payment_mode_id.id,
'recurring_invoicer_id': self.env.context.get(
'recurring_invoicer_id', False),
'invoice_line_ids': [(0, 0, self.with_context(
journal_id=journal_id)._setup_invoice_line(contract))]
}
@api.multi
def _setup_invoice_line(self, contract):
self.ensure_one()
product = self.product_id
account = product.property_account_income_id.id or self.env[
'account.invoice.line']._default_account()
inv_line_data = {
'name': self.description,
'account_id': account,
'price_unit': self.amount,
'quantity': 1,
'product_id': product.id,
'contract_id': contract.id,
}
# Define analytic journal
analytic = self.env['account.analytic.default'].account_get(
product.id, contract.partner_id.id, date=fields.Date.today())
if analytic and analytic.analytic_id:
inv_line_data['account_analytic_id'] = analytic.analytic_id.id
return inv_line_data
@api.model
def compute_date_birthday_invoice(self, child_birthdate, payment_date):
"""Set date of invoice two months before child's birthdate"""
inv_date = fields.Date.from_string(payment_date)
birthdate = fields.Date.from_string(child_birthdate)
new_date = inv_date
if birthdate.month >= inv_date.month + 2:
new_date = inv_date.replace(day=28, month=birthdate.month-2)
elif birthdate.month + 3 < inv_date.month:
new_date = birthdate.replace(
day=28, year=inv_date.year+1) + relativedelta(months=-2)
new_date = max(new_date, inv_date)
return fields.Date.to_string(new_date)
|
Python
| 0
|
@@ -4392,31 +4392,37 @@
': contract.
-partner
+correspondant
_id.id,%0D%0A
|
e94af78bbeae26933d987494e628b18e201f8da2
|
fix logger error message
|
spotseeker_server/management/commands/sync_techloan.py
|
spotseeker_server/management/commands/sync_techloan.py
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from schema import Schema
from .techloan.techloan import Techloan
from .techloan.spotseeker import Spots
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Sync techloan data from the cte"
_settings_scheme = Schema({
'server_host': str,
'oauth_key': str,
'oauth_secret': str,
'oauth_user': str,
})
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
try:
self._settings_scheme.validate(
settings.SPOTSEEKER_TECHLOAN_UPDATER)
except Exception as ex:
logger.error("Settings misconfigured: ", ex)
return
techloan = self.get_techloan()
spots = self.get_spots()
self.sync_techloan_to_spots(techloan, spots)
def get_techloan(self):
return Techloan.from_cte_api()
def get_spots(self):
return Spots.from_spotseeker_server(
settings.SPOTSEEKER_TECHLOAN_UPDATER)
def sync_techloan_to_spots(self, techloan, spots):
spots.sync_with_techloan(techloan)
spots.upload_data()
|
Python
| 0.000005
|
@@ -858,16 +858,17 @@
r.error(
+f
%22Setting
@@ -888,13 +888,18 @@
ed:
-%22, ex
+%7Bstr(ex)%7D%22
)%0A
|
a9bcbe8bf69403dbf7780843fe362cf8e1f02c95
|
update tree topo
|
mininet/tree/tree.py
|
mininet/tree/tree.py
|
#!/usr/bin/env python
from mininet.cli import CLI
from mininet.link import Link
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.term import makeTerm
def ofp_version(switch, protocols):
protocols_str = ','.join(protocols)
command = 'ovs-vsctl set Bridge %s protocols=%s' % (switch, protocols)
switch.cmd(command.split(' '))
if '__main__' == __name__:
net = Mininet(controller=RemoteController)
c0 = net.addController('c0')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
s5 = net.addSwitch('s5')
s6 = net.addSwitch('s6')
h1 = net.addHost('h1')
h2 = net.addHost('h2')
h3 = net.addHost('h3')
h4 = net.addHost('h4')
Link(s1, h1)
Link(s2, h2)
Link(s5, h3)
Link(s6, h4)
Link(s1, s2)
Link(s2, s3)
Link(s2, s4)
Link(s4, s5)
Link(s4, s6)
net.build()
c0.start()
s1.start([c0])
s2.start([c0])
s3.start([c0])
s4.start([c0])
s5.start([c0])
s6.start([c0])
ofp_version(s1, ['OpenFlow13'])
ofp_version(s2, ['OpenFlow13'])
ofp_version(s3, ['OpenFlow13'])
ofp_version(s4, ['OpenFlow13'])
ofp_version(s5, ['OpenFlow13'])
ofp_version(s6, ['OpenFlow13'])
CLI(net)
net.stop()
|
Python
| 0.000001
|
@@ -61,12 +61,12 @@
net.
-link
+node
imp
@@ -182,16 +182,47 @@
akeTerm%0A
+from functools import partial%0A%0A
def ofp_
@@ -448,16 +448,25 @@
troller=
+partial(
RemoteCo
@@ -473,16 +473,46 @@
ntroller
+, ip='10.42.0.27', port=6633 )
)%0A c0 =
@@ -798,24 +798,31 @@
st('h4')%0A%0A
+net.add
Link(s1, h1)
@@ -820,24 +820,31 @@
k(s1, h1)%0A
+net.add
Link(s2, h2)
@@ -842,24 +842,31 @@
k(s2, h2)%0A
+net.add
Link(s5, h3)
@@ -864,24 +864,31 @@
k(s5, h3)%0A
+net.add
Link(s6, h4)
@@ -891,16 +891,23 @@
h4)%0A%0A
+net.add
Link(s1,
@@ -909,24 +909,31 @@
k(s1, s2)%0A
+net.add
Link(s2, s3)
@@ -931,24 +931,31 @@
k(s2, s3)%0A
+net.add
Link(s2, s4)
@@ -953,24 +953,31 @@
k(s2, s4)%0A
+net.add
Link(s4, s5)
@@ -979,16 +979,23 @@
, s5)%0A
+net.add
Link(s4,
|
e2e57a89b63943857eb2954d0c5bdcf8e2191ff4
|
simplify logic for player count requirement
|
mk2/plugins/alert.py
|
mk2/plugins/alert.py
|
import os
import random
from mk2.plugins import Plugin
from mk2.events import Hook, StatPlayerCount
class Alert(Plugin):
interval = Plugin.Property(default=200)
command = Plugin.Property(default="say {message}")
path = Plugin.Property(default="alerts.txt")
min_pcount = Plugin.Property(default=0)
messages = []
def setup(self):
self.register(self.count_check, StatPlayerCount)
if self.path and os.path.exists(self.path):
f = open(self.path, 'r')
for l in f:
l = l.strip()
if l:
self.messages.append(l)
f.close()
def count_check(self, event):
if event.players_current >= self.min_pcount:
self.requirements_met = True
else:
self.requirements_met = False
def server_started(self, event):
if self.messages:
self.repeating_task(self.repeater, self.interval)
def repeater(self, event):
if self.requirements_met:
self.send_format(self.command, message=random.choice(self.messages))
|
Python
| 0.000011
|
@@ -694,18 +694,39 @@
-if
+self.requirements_met =
event.p
@@ -762,106 +762,8 @@
ount
-:%0A self.requirements_met = True%0A else:%0A self.requirements_met = False
%0A%0A
|
cf79b0f8c1d2099927db9b1021d3e5263c591709
|
Create RooArgList of RooConstVas from list in rf316_llratioplot
|
tutorials/roofit/rf316_llratioplot.py
|
tutorials/roofit/rf316_llratioplot.py
|
## \file
## \ingroup tutorial_roofit
## \notebook
## Multidimensional models: using the likelihood ratio techique to construct a signal
## enhanced one-dimensional projection of a multi-dimensional pdf
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Create 3D pdf and data
# -------------------------------------------
# Create observables
x = ROOT.RooRealVar("x", "x", -5, 5)
y = ROOT.RooRealVar("y", "y", -5, 5)
z = ROOT.RooRealVar("z", "z", -5, 5)
# Create signal pdf gauss(x)*gauss(y)*gauss(z)
gx = ROOT.RooGaussian("gx", "gx", x, ROOT.RooFit.RooConst(0), ROOT.RooFit.RooConst(1))
gy = ROOT.RooGaussian("gy", "gy", y, ROOT.RooFit.RooConst(0), ROOT.RooFit.RooConst(1))
gz = ROOT.RooGaussian("gz", "gz", z, ROOT.RooFit.RooConst(0), ROOT.RooFit.RooConst(1))
sig = ROOT.RooProdPdf("sig", "sig", ROOT.RooArgList(gx, gy, gz))
# Create background pdf poly(x)*poly(y)*poly(z)
px = ROOT.RooPolynomial("px", "px", x, ROOT.RooArgList(ROOT.RooFit.RooConst(-0.1), ROOT.RooFit.RooConst(0.004)))
py = ROOT.RooPolynomial("py", "py", y, ROOT.RooArgList(ROOT.RooFit.RooConst(0.1), ROOT.RooFit.RooConst(-0.004)))
pz = ROOT.RooPolynomial("pz", "pz", z)
bkg = ROOT.RooProdPdf("bkg", "bkg", ROOT.RooArgList(px, py, pz))
# Create composite pdf sig+bkg
fsig = ROOT.RooRealVar("fsig", "signal fraction", 0.1, 0.0, 1.0)
model = ROOT.RooAddPdf("model", "model", ROOT.RooArgList(sig, bkg), ROOT.RooArgList(fsig))
data = model.generate(ROOT.RooArgSet(x, y, z), 20000)
# Project pdf and data on x
# -------------------------------------------------
# Make plain projection of data and pdf on x observable
frame = x.frame(Title="Projection of 3D data and pdf on X", Bins=40)
data.plotOn(frame)
model.plotOn(frame)
# Define projected signal likelihood ratio
# ----------------------------------------------------------------------------------
# Calculate projection of signal and total likelihood on (y,z) observables
# i.e. integrate signal and composite model over x
sigyz = sig.createProjection(ROOT.RooArgSet(x))
totyz = model.createProjection(ROOT.RooArgSet(x))
# Construct the log of the signal / signal+background probability
llratio_func = ROOT.RooFormulaVar("llratio", "log10(@0)-log10(@1)", ROOT.RooArgList(sigyz, totyz))
# Plot data with a LL ratio cut
# -------------------------------------------------------
# Calculate the llratio value for each event in the dataset
data.addColumn(llratio_func)
# Extract the subset of data with large signal likelihood
dataSel = data.reduce(Cut="llratio>0.7")
# Make plot frame
frame2 = x.frame(Title="Same projection on X with LLratio(y,z)>0.7", Bins=40)
# Plot select data on frame
dataSel.plotOn(frame2)
# Make MC projection of pdf with same LL ratio cut
# ---------------------------------------------------------------------------------------------
# Generate large number of events for MC integration of pdf projection
mcprojData = model.generate(ROOT.RooArgSet(x, y, z), 10000)
# Calculate LL ratio for each generated event and select MC events with
# llratio)0.7
mcprojData.addColumn(llratio_func)
mcprojDataSel = mcprojData.reduce(Cut="llratio>0.7")
# Project model on x, projected observables (y,z) with Monte Carlo technique
# on set of events with the same llratio cut as was applied to data
model.plotOn(frame2, ProjWData=mcprojDataSel)
c = ROOT.TCanvas("rf316_llratioplot", "rf316_llratioplot", 800, 400)
c.Divide(2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
frame.GetYaxis().SetTitleOffset(1.4)
frame.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
frame2.GetYaxis().SetTitleOffset(1.4)
frame2.Draw()
c.SaveAs("rf316_llratioplot.png")
|
Python
| 0
|
@@ -981,80 +981,21 @@
x,
-ROOT.RooArgList(ROOT.RooFit.RooConst(
+%5B
-0.1
-)
,
-ROOT.RooFit.RooConst(
0.004
-))
+%5D
)%0Apy
@@ -1035,80 +1035,21 @@
y,
-ROOT.RooArgList(ROOT.RooFit.RooConst(
+%5B
0.1
-)
,
-ROOT.RooFit.RooConst(
-0.004
-))
+%5D
)%0Apz
|
7aca118be8db36370ff793b6cbedecb050dc869d
|
Change control plugin name to 'control'.
|
twisted/plugins/automatron_control.py
|
twisted/plugins/automatron_control.py
|
from twisted.internet import defer
from zope.interface import classProvides, implements
from automatron.command import IAutomatronCommandHandler
from automatron.plugin import IAutomatronPluginFactory, STOP
class AutomatronControlPlugin(object):
classProvides(IAutomatronPluginFactory)
implements(IAutomatronCommandHandler)
name = 'notify_control'
priority = 100
def __init__(self, controller):
self.controller = controller
command_map = {
#command: (help, min_args, max_args, permission)
'join': ('<channel> [key]', 1, 2, 'channel'),
'leave': ('<channel> [reason]', 1, 2, 'channel'),
'say': ('<channel> <message>', 2, 2, 'say'),
'nickname': ('<nickname>', 1, 1, 'admin'),
'identify': ('[channel]', 0, 1, None),
}
def on_command(self, client, user, command, args):
if command in self.command_map:
self._on_command(client, user, command, args)
return STOP
@defer.inlineCallbacks
def _on_command(self, client, user, command, args):
config = self.command_map[command]
if config[3] is not None:
if not (yield self.controller.config.has_permission(client.server, None, user, config[3])):
client.msg(user, 'You\'re not authorized to do that.')
return
if not config[1] <= len(args) <= config[2]:
client.msg(user, 'Invalid syntax. Use: %s %s' % (command, config[0]))
return
getattr(self, '_on_command_%s' % command)(client, user, *args)
def _on_command_join(self, client, user, channel, key=None):
if key is not None:
self.controller.config.update_value('channel', client.server, channel, 'key', key)
client.join(channel, key)
else:
d = self.controller.config.get_value('channel', client.server, channel, 'key')
d.addCallback(lambda (channel_key, _): client.join(channel, channel_key))
def _on_command_leave(self, client, user, channel, reason='Leaving...'):
client.leave(channel, reason)
def _on_command_say(self, client, user, channel, message):
client.msg(channel, message)
def _on_command_nickname(self, client, user, nickname):
client.setNick(nickname)
@defer.inlineCallbacks
def _on_command_identify(self, client, user, channel=None):
username, username_relevance = yield self.controller.config.get_username_by_hostmask(client.server, user)
if username is not None:
if username_relevance == 0:
identity = 'You are globally known as %s' % username
else:
identity = 'You are known as %s' % username
role, role_relevance = yield self.controller.config.get_role_by_username(client.server, channel, username)
if role_relevance is not None and role_relevance < username_relevance:
role = role_relevance = None
if role_relevance is None:
client.msg(user, identity)
elif role_relevance in (2, 3):
client.msg(user, '%s and your role in %s is %s' % (identity, channel, role))
else:
client.msg(user, '%s and your role is %s' % (identity, role))
else:
client.msg(user, 'I don\'t know you...')
|
Python
| 0
|
@@ -343,15 +343,8 @@
= '
-notify_
cont
|
9c2fe0b1d894e2423a2efe750415c2284ee0488f
|
Fix representative choice.
|
ueberwachungspaket/representatives.py
|
ueberwachungspaket/representatives.py
|
from json import load
from random import choice
class Representatives():
def __init__(self):
self.parties = load_parties()
self.teams = load_teams()
self.representatives = load_representatives("representatives.json", self.parties, self.teams)
self.government = load_representatives("government.json", self.parties, self.teams)
def get_representative_by_id(self, id):
representatives = self.representatives + self.government
if id == "00000":
return choice(self.representatives)
try:
rep = [rep for rep in representatives if rep.id == id][0]
except IndexError:
rep = None
return rep
def get_representative_by_name(self, prettyname):
representatives = self.representatives + self.government
try:
rep = [rep for rep in representatives if rep.name.prettyname == prettyname][0]
except IndexError:
rep = None
return rep
def get_party(self, shortname):
return self.parties[shortname]
class Contact():
def __init__(self, mail, phone, facebook, twitter):
self.mail = mail
self.phone = phone
self.facebook = facebook
self.twitter = twitter
class Party():
def __init__(self, name, shortname, prettyname, color, contact):
self.name = name
self.shortname = shortname
self.prettyname = prettyname
self.color = color
self.contact = contact
class Name():
def __init__(self, firstname, lastname, prettyname, prefix, suffix):
self.firstname = firstname
self.lastname = lastname
self.prettyname = prettyname
self.prefix = prefix
self.suffix = suffix
class Image():
def __init__(self, url, copyright):
self.url = url
self.copyright = copyright
class Team():
def __init__(self, name, prettyname):
self.name = name
self.prettyname = prettyname
def __repr__(self):
return self.name
class Representative():
def __init__(self, id, name, contact, image, party, team, sex, state):
self.id = id
self.name = name
self.contact = contact
self.image = image
self.party = party
self.team = team
self.sex = sex
self.state = state
if not self.contact.mail:
self.contact.mail = party.contact.mail
if not self.contact.phone:
self.contact.phone = party.contact.phone
if not self.contact.facebook:
self.contact.facebook = party.contact.facebook
if not self.contact.twitter:
self.contact.twitter = party.contact.twitter
def __repr__(self):
return self.name.firstname + " " + self.name.lastname
def fullname(self):
return (self.name.prefix + " " if self.name.prefix else "") + self.name.firstname + " " + self.name.lastname + (" " + self.name.suffix if self.name.suffix else "")
def load_parties():
parties = {}
with open("ueberwachungspaket/data/parties.json", "r") as f:
lparties = load(f)
for prettyname in lparties:
lparty = lparties[prettyname]
lcontact = lparty["contact"]
contact = Contact(lcontact["mail"], lcontact["phone"], lcontact["facebook"], lcontact["twitter"])
party = Party(lparty["name"], lparty["shortname"], prettyname, lparty["color"], contact)
parties[prettyname] = party
return parties
def load_teams():
teams = {}
with open("ueberwachungspaket/data/teams.json", "r") as f:
lteams = load(f)
for prettyname in lteams:
lteam = lteams[prettyname]
team = Team(lteam["name"], prettyname)
teams[prettyname] = team
return teams
def load_representatives(filename, parties, teams):
representatives = []
with open("ueberwachungspaket/data/" + filename, "r") as f:
lrepresentatives = load(f)
for lrep in lrepresentatives:
lname = lrep["name"]
name = Name(lname["firstname"], lname["lastname"], lname["prettyname"], lname["prefix"], lname["suffix"])
lcontact = lrep["contact"]
contact = Contact(lcontact["mail"], lcontact["phone"], lcontact["facebook"], lcontact["twitter"])
image = Image(lrep["image"]["url"], lrep["image"]["copyright"])
party = parties[lrep["party"]]
team = teams[lrep["team"]]
representative = Representative(lrep["id"], name, contact, image, party, team, lrep["sex"], lrep["state"])
representatives.append(representative)
return representatives
|
Python
| 0.000002
|
@@ -519,16 +519,32 @@
choice(
+%5Brep for rep in
self.rep
@@ -555,16 +555,38 @@
ntatives
+ if rep.team == %22spy%22%5D
)%0A%0A
|
1519d9dc2f483671aee0f92252dd839a4d7af9c3
|
Add About page TemplateView
|
painindex_app/urls.py
|
painindex_app/urls.py
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView, FormView
from painindex_app import views
urlpatterns = patterns('',
url(r'^$', views.homepage, name='homepage'),
url(r'^painsource/(?P<painsource_id>\d+)/$', views.painsource_detail, name='painsource_detail'),
# url(r'^painreport/new/$', views.painreport_form, name='painreport'),
url(r'^painreport/new/$', views.PainReportView.as_view(), name='painreport'),
)
|
Python
| 0
|
@@ -198,24 +198,124 @@
homepage'),%0A
+ url(r'%5Eabout/$', TemplateView.as_view(template_name='painindex_app/about.html'), name='about'),%0A
url(r'%5Ep
@@ -555,25 +555,24 @@
ainreport'),
-%0A
%0A
|
09c8e9b11c584dcc03518fb5655d040db8ad63a6
|
Add an extra cylinder to raw HD images to work around grub problem. (RBL-8292)
|
jobslave/generators/raw_hd_image.py
|
jobslave/generators/raw_hd_image.py
|
#
# Copyright (c) 2011 rPath, Inc.
#
# All Rights Reserved
#
import logging
import os
from jobslave import lvm
from jobslave.generators import bootable_image, constants
from jobslave.geometry import FSTYPE_LINUX, FSTYPE_LINUX_LVM
from jobslave.util import logCall, divCeil
from conary.lib import util
log = logging.getLogger(__name__)
class HDDContainer(object):
def __init__(self, image, geometry, totalSize=None):
self.image = image
self.geometry = geometry
if totalSize is None:
totalSize = os.stat(image).st_size
self.totalSize = totalSize
def create(self):
# create the raw file
# NB: blocksize is unrelated to the one in constants.py, and is
# completely arbitrary.
blocksize = 512
seek = (self.totalSize - 1) / blocksize
logCall('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' % (
self.image, max(seek, 0), blocksize))
def destroy(self):
if self.image:
util.remove(self.image)
self.image = None
def partition(self, partitions):
# Extended partitions are not supported since we're either using a
# single partition for non-LVM or two for LVM (/boot + one PV)
assert len(partitions) <= 4
fObj = open(self.image, 'r+b')
fObj.seek(440)
fObj.write(os.urandom(4)) # disk signature
fObj.write('\0\0')
numParts = 0
for start, size, fsType, bootable in partitions:
numParts += 1
log.info("Partition %d: start %d size %d flags %02x type %02x",
numParts, start, size, bootable, fsType)
fObj.write(self.geometry.makePart(start, size, bootable, fsType))
assert numParts <= 4
while numParts < 4:
fObj.write('\0' * 16)
numParts += 1
fObj.write('\x55\xAA') # MBR signature
assert fObj.tell() == 512
fObj.close()
class RawHdImage(bootable_image.BootableImage):
def makeHDImage(self, image):
_, realSizes = self.getImageSize()
lvmContainer = None
# Align to the next cylinder
def align(size):
alignTo = self.geometry.bytesPerCylinder
return divCeil(size, alignTo) * alignTo
if os.path.exists(image):
util.rmtree(image)
util.mkdirChain(os.path.split(image)[0])
if '/boot' in realSizes:
rootPart = '/boot'
else:
rootPart = '/'
# Align root partition to nearest cylinder, but add in the
# partition offset so that the *end* of the root will be on a
# cylinder boundary.
rootStart = self.geometry.offsetBytes
rootEnd = align(rootStart + realSizes[rootPart])
rootSize = rootEnd - rootStart
# Collect sizes of all non-boot partitions, pad 10% for LVM
# overhead, and realign to the nearest cylinder.
lvmSize = sum(x[1] for x in realSizes.items() if x[0] != rootPart)
lvmSize += int(lvmSize * 0.10)
lvmSize = align(lvmSize)
totalSize = rootEnd + lvmSize
container = HDDContainer(image, self.geometry, totalSize)
container.create()
# Calculate the offsets and sizes of the root and LVM partitions.
# Note that the Start/Blocks variables are measured in blocks.
# NB: both of these sizes are already block-aligned.
rootStartBlock = rootStart / self.geometry.BLOCK
rootSizeBlock = rootSize / self.geometry.BLOCK
partitions = [(rootStartBlock, rootSizeBlock, FSTYPE_LINUX, True)]
if len(realSizes) > 1:
lvmStartBlock = rootStartBlock + rootSizeBlock
lvmSizeBlock = lvmSize / self.geometry.BLOCK
partitions.append((lvmStartBlock, lvmSizeBlock,
FSTYPE_LINUX_LVM, False))
lvmContainer = lvm.LVMContainer(lvmSize, image,
lvmStartBlock * self.geometry.BLOCK)
container.partition(partitions)
rootFs = bootable_image.Filesystem(image, self.mountDict[rootPart][2],
rootSize, offset=rootStart, fsLabel = rootPart)
rootFs.format()
self.addFilesystem(rootPart, rootFs)
for mountPoint, (reqSize, freeSpace, fsType) in self.mountDict.items():
if mountPoint == rootPart:
continue
# FIXME: this code is broken - fs is only set in a branch
# it only happens to work now because we only support one
# partition and the continue above gets hit
if lvmContainer:
fs = lvmContainer.addFilesystem(mountPoint, fsType, realSizes[mountPoint])
fs.format()
self.addFilesystem(mountPoint, fs)
self.mountAll()
# Install contents into image
root_dir = os.path.join(self.workDir, "root")
bootloader_installer = self.installFileTree(root_dir)
# Install bootloader's MBR onto the disk
# first bind mount the disk image into the root dir.
# this lets some bootloaders (like grub) write to the disk
diskpath = os.path.join(root_dir, 'disk.img')
f = open(diskpath, 'w')
f.close()
logCall('mount -n -obind %s %s' %(image, diskpath))
try:
bootloader_installer.install_mbr(root_dir, image, totalSize)
finally:
blkidtab = os.path.join(root_dir, "etc", "blkid.tab")
if os.path.exists(blkidtab):
os.unlink(blkidtab)
logCall('umount -n %s' % diskpath)
os.unlink(diskpath)
# Unmount and destroy LVM
try:
self.umountAll()
if lvmContainer:
lvmContainer.destroy()
except Exception, e:
log.warning("Error tearing down LVM setup: %s" % str(e))
return container
def write(self):
self.productName = 'Raw Hard Disk'
image = os.path.join(self.workDir, self.basefilename + '.hdd')
disk = self.makeHDImage(image)
self.capacity = disk.totalSize
finalImage = os.path.join(self.outputDir, self.basefilename + '.hdd.gz')
self.status('Compressing hard disk image')
outFile = self.gzip(image, finalImage)
if self.buildOVF10:
self.ovaPath = self.createOvf(self.basefilename,
self.jobData['description'], constants.RAWHD, finalImage,
self.capacity, True, self.workingDir,
self.outputDir)
self.outputFileList.append((self.ovaPath,
'Raw Hard Disk %s' % constants.OVFIMAGETAG))
self.outputFileList.append((finalImage, 'Raw Hard Disk Image'),)
self.postOutput(self.outputFileList)
|
Python
| 0
|
@@ -3079,24 +3079,223 @@
n(lvmSize)%0A%0A
+ # Add one cylinder to the disk to work around grub/VMware/BIOS bugs%0A # and/or a misunderstanding by this developer of how partition table%0A # sizes are calculated, see RBL-8292.%0A
tota
@@ -3298,24 +3298,30 @@
totalSize =
+align(
rootEnd + lv
@@ -3325,16 +3325,21 @@
lvmSize
+ + 1)
%0A
|
9e74874f3bc96f1d514ad6765e60910950ac9ee7
|
switch to paho-mqtt
|
mqtt_randompub/mqtt_randompub.py
|
mqtt_randompub/mqtt_randompub.py
|
# This file is part of mqtt-randompub
#
# Copyright (c) 2013-2014, Fabian Affolter <fabian@affolter-engineering.ch>
# Released under the MIT license. See LICENSE file for details.
#
import random
import time
import sys
import os
import signal
import itertools
import mosquitto
import opthandling
def send(broker, port, qos, number, interval, topic,
subtopic1, subtopic2, payload, random, timestamp, counter):
count = 1
mqttclient = mosquitto.Mosquitto("mqtt-randompub")
mqttclient.connect(broker, port=int(port))
if number == 0:
print 'Messages are published on topic %s/#... -> CTRL + C to shutdown' \
% topic
while True:
complete_topic = generate_topic(topic, subtopic1, subtopic2)
message = generate_message(payload, timestamp, random)
if counter:
mqttclient.publish(complete_topic, (str(count) + ' ' + message))
else:
mqttclient.publish(complete_topic, message, random)
time.sleep(interval)
count = count + 1
elif number == 1:
complete_topic = generate_topic(topic, subtopic1, subtopic2)
message = generate_message(payload, timestamp, random)
mqttclient.publish(complete_topic, message)
else:
for x in range(1, number + 1):
complete_topic = generate_topic(topic, subtopic1, subtopic2)
message = generate_message(payload, timestamp, random)
if counter:
mqttclient.publish(complete_topic, (str(count) + ' ' + message))
else:
mqttclient.publish(complete_topic, message)
count = count + 1
time.sleep(interval)
mqttclient.disconnect()
def generate_message(payload, timestamp, random):
if random:
generated_payload = generate_random_num()
else:
if type(payload) != list:
payload_lst = str2list(payload)
gen_payload = random_subtopic(payload_lst)
else:
gen_payload = random_subtopic(payload)
if timestamp:
generated_payload = gen_payload + ' ' + str(generate_timestamp())
else:
generated_payload = gen_payload
return generated_payload
def generate_topic(topic, subtopic1, subtopic2):
if type(subtopic1) != list:
stopic1_lst = str2list(subtopic1)
stopic1 = random_subtopic(stopic1_lst)
else:
stopic1 = random_subtopic(subtopic1)
if type(subtopic2) != list:
stopic2_lst = str2list(subtopic2)
stopic2 = random_subtopic(stopic2_lst)
else:
stopic2 = random_subtopic(subtopic2)
generated_topic = topic + '/' + str(stopic1) + '/' + str(stopic2)
return generated_topic
def random_subtopic(list):
return random.choice(list)
def str2list(string):
str_lst = string.split(',')
for i, s in enumerate(str_lst):
str_lst[i] = s.strip()
return str_lst
def generate_random_num():
return random.randrange(0, 100, 1)
def generate_timestamp():
timestamp = int(time.time())
return timestamp
def main(argv=None):
if argv is None:
argv = sys.argv
args = opthandling.argparsing()
if args.number:
send(args.broker, args.port, args.qos, int(args.number),
float(args.interval), args.topic, args.subtopic1, args.subtopic2,
args.load, args.random, args.timestamp, args.counter)
if __name__ == '__main__':
"""Main program entry point"""
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
sys.exit(main(sys.argv))
#sys.exit(main())
except KeyboardInterrupt:
print 'Interrupted, exiting...'
sys.exit(1)
|
Python
| 0.000005
|
@@ -258,24 +258,138 @@
ls%0A%0A
-import mosquitto
+try:%0A import paho.mqtt.client as mqtt%0Aexcept ImportError:%0A print 'Please install the paho-mqtt module to use mqtt-randompub'
%0A%0Aim
@@ -564,26 +564,18 @@
= m
-osquitto.Mosquitto
+qtt.Client
(%22mq
|
e2c0fbd32514147cbb98d184c89437885757c98a
|
Remove spurious print from filters processing.
|
nengo_spinnaker/utils/filters.py
|
nengo_spinnaker/utils/filters.py
|
import collections
import itertools
import numpy as np
from . import fixpoint as fp
from vertices import (region_pre_sizeof, region_sizeof, region_write,
region_pre_prepare, region_post_prepare)
FilterItem = collections.namedtuple('FilterItem', ['time_constant',
'accumulatory'])
FilterRoute = collections.namedtuple('FilterRoute', ['key', 'mask', 'index',
'dimension_mask'])
def with_filters(filter_id=14, routing_id=15):
"""Add input filtering to the given NengoVertex subclass.
:param filter_id: region ID to use for filters
:param routing_id: region ID to use for filter routing entries
"""
def cls_(cls):
cls.REGIONS.update({"FILTERS": filter_id,
"FILTER_ROUTING": routing_id})
cls._sizeof_region_filters = _sizeof_region_filters
cls._pre_sizeof_region_filter_routing = \
_pre_sizeof_region_filter_routing
cls._sizeof_region_filter_routing = _sizeof_region_filter_routing
cls._write_region_filters = _write_region_filters
cls._write_region_filter_routing = _write_region_filter_routing
cls._prep_region_filters = _pre_prepare_filters
cls._prep_region_filter_routing = _post_prepare_routing
return cls
return cls_
@region_pre_sizeof("FILTERS")
def _sizeof_region_filters(self, n_atoms):
# 3 words per filter + 1 for length
return 3 * len(self.__filters) + 1
@region_pre_sizeof("FILTER_ROUTING")
def _pre_sizeof_region_filter_routing(self, n_atoms):
return 4 * len(self.in_edges) * 5
@region_pre_prepare('FILTERS')
def _pre_prepare_filters(self):
"""Generate a list of filters from the incoming edges."""
self.__filters = list()
self.__filters_in = collections.defaultdict(list)
for edge in self.in_edges:
filter_item = FilterItem(edge.synapse, edge._filter_is_accumulatory)
if filter_item not in self.__filters:
self.__filters.append(filter_item)
self.__filters_in[self.__filters.index(filter_item)].append(edge)
self.n_filters = len(self.__filters)
@region_write("FILTERS")
def _write_region_filters(self, subvertex, spec):
spec.write(data=len(self.__filters))
for filter_item in self.__filters:
f = np.exp(-self.dt / filter_item.time_constant)
spec.write(data=fp.bitsk(f))
spec.write(data=fp.bitsk(1 - f))
spec.write(data=(0x0 if filter_item.accumulatory else 0xffffffff))
@region_post_prepare('FILTER_ROUTING')
def _post_prepare_routing(self):
# For each incoming subedge we write the key, mask and index of the
# filter to which it is connected. At some later point we can try
# to combine keys and masks to minimise the number of comparisons
# which are made in the SpiNNaker application.
# Mapping of subvertices to list of maps from keys and masks to filter
# indices (filter routing entries)
self.__subvertex_filter_keys = collections.defaultdict(list)
for (i, edges) in self.__filters_in.items():
for subvertex in self.subvertices:
subedges = itertools.chain(*[
filter(lambda se: se.postsubvertex == subvertex,
edge.subedges) for edge in edges]
)
kms = [(subedge.edge.prevertex.generate_routing_info(subedge),
subedge.edge.dimension_mask) for subedge in subedges]
# Add the key and mask entries to the filter keys list for this
# subvertex.
self.__subvertex_filter_keys[subvertex].extend(
[FilterRoute(km[0], km[1], i, dm) for (km, dm) in kms]
)
@region_sizeof("FILTER_ROUTING")
def _sizeof_region_filter_routing(self, subvertex):
# 4 words per entry, 1 entry per in_subedge + 1 for length
print self.__subvertex_filter_keys
return 4 * len(self.__subvertex_filter_keys[subvertex]) + 1
@region_write("FILTER_ROUTING")
def _write_region_filter_routing(self, subvertex, spec):
routes = self.__subvertex_filter_keys[subvertex]
spec.write(data=len(routes))
for route in routes:
spec.write(data=route.key)
spec.write(data=route.mask)
spec.write(data=route.index)
spec.write(data=route.dimension_mask)
|
Python
| 0
|
@@ -3904,47 +3904,8 @@
gth%0A
- print self.__subvertex_filter_keys%0A
|
40705a39292d0080126933b2318d20ef1a4499a2
|
Remove obsolete input.
|
lobster/cmssw/data/job.py
|
lobster/cmssw/data/job.py
|
#!/usr/bin/env python
import base64
import json
import os
import pickle
import shutil
import subprocess
import sys
fragment = """import FWCore.ParameterSet.Config as cms
process.source.fileNames = cms.untracked.vstring({input_files})
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange({lumis})"""
def edit_process_source(cmssw_config_file, files, lumis):
with open(cmssw_config_file, 'a') as config:
frag = fragment.format(input_files=repr([str(f) for f in files]), lumis=[str(l) for l in lumis])
print "--- config file fragment:"
print frag
print "---"
config.write(frag)
(config, data) = sys.argv[1:]
with open(data, 'rb') as f:
(args, files, lumis) = pickle.load(f)
configfile = config.replace(".py", "_mod.py")
shutil.copy2(config, configfile)
env = os.environ
env['X509_USER_PROXY'] = 'proxy'
edit_process_source(configfile, files, lumis)
# exit_code = subprocess.call('python "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(map(repr, args))), shell=True, env=env)
exit_code = subprocess.call('cmsRun -j report.xml "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(map(repr, args))), shell=True, env=env)
sys.exit(exit_code)
|
Python
| 0.000183
|
@@ -20,22 +20,8 @@
on%0A%0A
-import base64%0A
impo
|
efe75d083093bf7a421234831e27d661ef93dfdb
|
add labels and kwargs'
|
cupyx/time.py
|
cupyx/time.py
|
import time
import numpy
import cupy
from cupy import util
class _PerfCaseResult(object):
def __init__(self, name, ts):
assert ts.ndim == 2 and ts.shape[0] == 2 and ts.shape[1] > 0
self.name = name
self._ts = ts
@property
def cpu_times(self):
return self._ts[0]
@property
def gpu_times(self):
return self._ts[1]
@staticmethod
def _to_str_per_item(t):
assert t.size > 0
t *= 1e6
s = ' {:9.03f} us'.format(t.mean())
if t.size > 1:
s += ' +/-{:6.03f} (min:{:9.03f} / max:{:9.03f}) us'.format(
t.std(), t.min(), t.max())
return s
def to_str(self, show_gpu=False):
ts = self._ts if show_gpu else self._ts[[0]]
return '{:<20s}:{}'.format(
self.name, ' '.join([self._to_str_per_item(t) for t in ts]))
def __str__(self):
return self.to_str(show_gpu=True)
def repeat(func, args=(), n=10000, *, name=None, n_warmup=10):
util.experimental('cupyx.time.repeat')
if name is None:
name = func.__name__
if not callable(func):
raise ValueError('`func` should be a callable object.')
if not isinstance(args, tuple):
raise ValueError('`args` should be of tuple type.')
if not isinstance(n, int):
raise ValueError('`n` should be an integer.')
if not isinstance(name, str):
raise ValueError('`str` should be a string.')
if not isinstance(n_warmup, int):
raise ValueError('`n_warmup` should be an integer.')
ts = numpy.empty((2, n,), dtype=numpy.float64)
ev1 = cupy.cuda.stream.Event()
ev2 = cupy.cuda.stream.Event()
for i in range(n_warmup):
func(*args)
ev1.record()
ev1.synchronize()
for i in range(n):
ev1.record()
t1 = time.perf_counter()
func(*args)
t2 = time.perf_counter()
ev2.record()
ev2.synchronize()
cpu_time = t2 - t1
gpu_time = cupy.cuda.get_elapsed_time(ev1, ev2) * 1e-3
ts[0, i] = cpu_time
ts[1, i] = gpu_time
return _PerfCaseResult(name, ts)
|
Python
| 0
|
@@ -757,16 +757,49 @@
ts%5B%5B0%5D%5D%0A
+ devices = %5B%22CPU%22, %22GPU%22%5D%0A
@@ -859,16 +859,52 @@
'.join(%5B
+devices%5Bi%5D + %22: %22%0A +
self._to
@@ -922,21 +922,37 @@
em(t
+s%5Bi%5D
) for
-t
+i
in
-ts
+range(len(ts))
%5D))%0A
@@ -1044,16 +1044,27 @@
args=(),
+ kwargs=%7B%7D,
n=10000
@@ -1370,24 +1370,121 @@
ple type.')%0A
+ if not isinstance(kwargs, dict):%0A raise ValueError('%60args%60 should be of tuple type.')%0A
if not i
@@ -1910,24 +1910,34 @@
func(*args
+, **kwargs
)%0A%0A ev1.r
@@ -2059,24 +2059,34 @@
func(*args
+, **kwargs
)%0A%0A t
|
85769162560d83a58ccc92f818559ddd3dce2a09
|
Fix another bug in the authentication
|
pages/index.py
|
pages/index.py
|
import web
from modules.base import renderer
from modules.login import loginInstance
from modules.courses import Course
#Index page
class IndexPage:
#Simply display the page
def GET(self):
if loginInstance.isLoggedIn():
userInput = web.input();
if "logoff" in userInput:
loginInstance.disconnect();
return renderer.index(False)
else:
courses = Course.GetAllCoursesIds()
return renderer.main(courses)
else:
return renderer.index(False)
#Try to log in
def POST(self):
userInput = web.input();
if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password):
return renderer.main()
else:
return renderer.index(True)
|
Python
| 0.000005
|
@@ -432,18 +432,29 @@
-courses =
+return renderer.main(
Cour
@@ -478,53 +478,8 @@
ds()
-%0A return renderer.main(courses
)%0A
@@ -759,16 +759,41 @@
er.main(
+Course.GetAllCoursesIds()
)%0A
|
f7cfa7ec75243dbc4dc6cf75155d8083df504692
|
extend benchmark for lights
|
mpf/benchmarks/test_benchmark_light_shows.py
|
mpf/benchmarks/test_benchmark_light_shows.py
|
import time
from functools import partial
from mpf.core.logging import LogMixin
from mpf.tests.MpfGameTestCase import MpfGameTestCase
class BenchmarkLightShows(MpfGameTestCase):
def getConfigFile(self):
return 'config.yaml'
def getMachinePath(self):
return 'benchmarks/machine_files/shows/'
def getOptions(self):
options = super().getOptions()
if self.unittest_verbosity() <= 1:
options["production"] = True
return options
def get_platform(self):
return 'virtual'
def setUp(self):
LogMixin.unit_test = False
super().setUp()
def _output(self, name, start, end, end2, num):
print("Duration {} {:.5f}ms Processing {:.5f}ms Total: {:5f}ms Per second: {:2f}".format(
name,
(1000 * (end - start) / num), ((end2 - end) * 1000) / num, (1000 * (end2 - start)) / num,
(num * 1) / (end2 - start)
))
def _benchmark(self, function, name, num=10000, iterations=10):
function(num, True)
total = 0
for i in range(iterations):
start, end, end2 = function(num, False)
total += (end2 - start) / num
self._output(name, start, end, end2, num)
print("Total average {:.5f}ms".format(total * 1000/ iterations))
return total/iterations
def testBenchmark(self):
baseline = self._benchmark(partial(self._event_and_run, "random_event", "random_event2"), "baseline")
minimal_show = self._benchmark(partial(self._event_and_run, "play_minimal_light_show", "stop_minimal_light_show"), "minimal_show")
all_leds = self._benchmark(partial(self._event_and_run, "play_single_step_tag_playfield", "stop_single_step_tag_playfield"), "all_leds_tag")
multi_step = self._benchmark(partial(self._event_and_run, "play_multi_step", "stop_multi_step"), "multi_step", num=500)
print("Baseline: {:.5f}ms One LED: +{:.5f}ms 30 LEDs: +{:.5f}ms Multi Step: +{:.5f}".format(
baseline * 1000,
(minimal_show - baseline) * 1000,
(all_leds - baseline) * 1000,
(multi_step - baseline) * 1000
))
def _event_and_run(self, event, event2, num, test):
start = time.time()
for i in range(num):
self.post_event(event)
self.advance_time_and_run(.01)
end = time.time()
self.advance_time_and_run()
end2 = time.time()
self.post_event(event2)
return start, end, end2
|
Python
| 0
|
@@ -2250,134 +2250,514 @@
-start = time.time()%0A for i in range(num):%0A self.post_event(event)%0A self.advance_time_and_run(.01)
+channel_list = %5B%5D%0A for light in self.machine.lights.values():%0A for color, channels in light.hw_drivers.items():%0A channel_list.extend(channels)%0A%0A start = time.time()%0A for i in range(num):%0A self.post_event(event)%0A for channel in channel_list:%0A brightness = channel.current_brightness%0A self.advance_time_and_run(.01)%0A for channel in channel_list:%0A brightness = channel.current_brightness%0A
%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.