commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
f2de502608833dda82908a6bb4f639645f785c06
|
Change to support PEP Varible naming scheme
|
boundary/hostgroup_update.py
|
boundary/hostgroup_update.py
|
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import HostgroupModify
class HostgroupUpdate(HostgroupModify):
def __init__(self):
HostgroupModify.__init__(self, True)
self.method = "PUT"
self.host_group_id = None
def add_arguments(self):
HostgroupModify.add_arguments(self)
self.parser.add_argument('-i', '--host-group-id', dest='host_group_id', action='store',
required=True, metavar='host_group_id', help='Host group id to update')
self.parser.add_argument('-n', '--host-group-name', dest='host_group_name', action='store', required=False,
metavar="host_group_name", help='Host group name')
self.parser.add_argument('-s', '--sources', dest='sources', action='store', required=True, metavar='sources',
help='Comma separated sources to add to the host group. If empty adds all hosts.')
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
HostgroupModify.get_arguments(self)
if self.args.hostGroupId is not None:
self.hostGroupId = self.args.hostGroupId
self.path = "v1/hostgroup/" + str(self.hostGroupId)
def get_description(self):
return 'Updates host group definition in a {0} account'.format(self.product_name)
|
Python
| 0
|
@@ -1678,22 +1678,24 @@
rgs.host
-G
+_g
roup
-I
+_i
d is not
@@ -1722,22 +1722,24 @@
elf.host
-G
+_g
roup
-I
+_i
d = self
@@ -1748,22 +1748,24 @@
rgs.host
-G
+_g
roup
-I
+_i
d%0A%0A
@@ -1814,14 +1814,16 @@
host
-G
+_g
roup
-I
+_i
d)%0A%0A
|
9670454dbb7b2ec4ef61d60080a1bb798c3ace74
|
use integer amounts for payment capture
|
boxoffice/extapi/razorpay.py
|
boxoffice/extapi/razorpay.py
|
# -*- coding: utf-8 -*-
import requests
from coaster.utils import LabeledEnum
from baseframe import __
from boxoffice import app
# Don't use a trailing slash
base_url = 'https://api.razorpay.com/v1/payments'
__all__ = ['RAZORPAY_PAYMENT_STATUS', 'capture_payment']
class RAZORPAY_PAYMENT_STATUS(LabeledEnum):
"""
Reflects payment statuses as specified in
https://docs.razorpay.com/docs/return-objects
"""
CREATED = (0, __("Created"))
AUTHORIZED = (1, __("Authorized"))
CAPTURED = (2, __("Captured"))
# Only fully refunded payments.
REFUNDED = (3, __("Refunded"))
FAILED = (4, __("Failed"))
def capture_payment(paymentid, amount):
"""
Attempts to capture the payment, from Razorpay
"""
verify_https = False if app.config.get('VERIFY_RAZORPAY_HTTPS') is False else True
url = '{base_url}/{paymentid}/capture'.format(base_url=base_url, paymentid=paymentid)
# Razorpay requires the amount to be in paisa
resp = requests.post(url, data={'amount': amount*100},
auth=(app.config['RAZORPAY_KEY_ID'], app.config['RAZORPAY_KEY_SECRET']), verify=verify_https)
return resp
|
Python
| 0.000001
|
@@ -963,16 +963,36 @@
in paisa
+ and of type integer
%0A res
@@ -1030,16 +1030,20 @@
mount':
+int(
amount*1
@@ -1044,16 +1044,17 @@
ount*100
+)
%7D,%0A
|
c08493d92bef7428f900c9efded9f2e4999c8966
|
Support RSA, DSS and ECDSA key #11
|
brorig/connectivity.py
|
brorig/connectivity.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import base64
import subprocess
import os
import pymysql
import paramiko
import uuid
import brorig.log as log
import brorig.config as config
def db_connect_start():
dbconn = pymysql.connect(host=config.config['db']['host'],
port=3306,
user=config.config['db']['user'],
passwd=config.config['db']['passwd'],
db='smp')
log.info("DB connection started with %s" % config.config['db']['host'])
return dbconn
def db_connect_stop(dbconn):
dbconn.close()
log.info("DB connction stopped")
class Connection:
def __init__(self, host, username=None, passwd=None, pkey_path=None):
self.host = host
self.username = username
self.passwd = passwd
self.pkey = None
if pkey_path:
self.pkey = paramiko.RSAKey.from_private_key_file(pkey_path)
def open_ssh_connexion(self):
self.connection = paramiko.SSHClient()
self.connection.load_system_host_keys()
self.connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connection.connect(self.host, port=22, username=self.username, password=self.passwd, pkey=self.pkey, timeout=10)
self.connect_trans = paramiko.Transport((self.host, 22))
self.connect_trans.connect(username=self.username, password=self.passwd, pkey=self.pkey)
self.transport = paramiko.SFTPClient.from_transport(self.connect_trans)
log.debug("SSH connection established with %s" % self.host)
def close_ssh_connexion(self):
self.connect_trans.close()
self.connection.close()
del self.connection
del self.connect_trans
del self.transport
log.debug("Remote connection (%s) closed" % self.host)
class Transfer:
def __init__(self, conn):
self.sftp = conn
def get(self, remote_path, local_path):
log.debug("Download file from server %s to local %s" % (remote_path, local_path))
self.sftp.get(remote_path, local_path)
def put(self, local_path, remote_path):
self.sftp.put(local_path, remote_path)
class Script:
def __init__(self, connection, exe_remote=True, code="", interpret="bash", sudo=False,
ignore_error=False):
self.connection = connection
self.interpret = interpret
self.exe_remote = exe_remote
self.code = code
self.args = {}
self.file_name = None
self.sudo = sudo
self.ignore_error = ignore_error
def __iadd__(self, other):
self.code += other + "\n"
return self
def file(self, path):
self.file_name = path
def exe(self):
# TODO fast remote execution (one line). Don't use remote transfer in tmp script
# Define script name
path_script = '/tmp/brorig_{0!s}'.format(base64.b32encode(uuid.uuid4().bytes)[:26])
# Create local script file
if not self.file_name:
f = open(path_script, 'w')
f.write(self.code)
f.close()
chan = None
# Transfer script to remote server if needed
if self.exe_remote:
self.connection.open_ssh_connexion()
chan = self.connection.connection.get_transport().open_session()
t = Transfer(self.connection.transport)
t.put(self.file_name if self.file_name else path_script, path_script)
# Script execution
cmd = '{sudo}{interpret} {script} {args}'.format(
sudo=("sudo " if self.sudo else ""),
interpret=self.interpret,
script=path_script,
args=" ".join([("-" if len(str(arg)) == 1 else "--") + str(arg) + " " + str(val) for arg, val in
self.args.iteritems()]))
log.info("{1} code execution: {0:.100}".format(self.code, "Remote" if self.exe_remote else "Local"))
log.debug("Launch {1} command: {0}".format(cmd, "remote" if self.exe_remote else "local"))
if self.exe_remote:
# Remote execution
chan.exec_command(cmd)
stdout = chan.makefile('r', -1)
stderr = chan.makefile_stderr('r', -1)
self.err = stderr.read()
self.out = stdout.read()
return_code = chan.recv_exit_status()
else:
# Local execution
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
self.out, self.err = p.communicate()
return_code = p.returncode
# Remove script
os.remove(path_script)
if self.exe_remote:
self.connection.connection.exec_command("rm -rf {}".format(path_script))
# Close remote connection
if self.exe_remote:
self.connection.close_ssh_connexion()
# Error handler
if return_code != 0 and not self.ignore_error:
raise Exception('{1} script execution error: {0}'.format(self.err, "Remote" if self.exe_remote else "Local"))
return self.out
@staticmethod
def remote_exe(connect, cmd=None, filename=None):
script = Script(connect, code=cmd, exe_remote=True)
if filename:
script.file(filename)
return script.exe()
|
Python
| 0
|
@@ -918,104 +918,23 @@
elf.
-p
key
- = None%0A if pkey_path:%0A self.pkey = paramiko.RSAKey.from_private_key_file(
+_filename =
pkey
@@ -934,25 +934,24 @@
= pkey_path
-)
%0A%0A def op
@@ -1193,332 +1193,319 @@
ost,
- port=22, username=self.username, password=self.passwd, pkey=self.pkey, timeout=10)%0A self.connect_trans = paramiko.Transport((self.host, 22))%0A self.connect_trans.connect(username=self.username, password=self.passwd, pkey=self.pkey)%0A self.transport = paramiko.SFTPClient.from_transport(self.connect_trans
+%0A port=22,%0A username=self.username,%0A password=self.passwd,%0A key_filename=self.key_filename,%0A timeout=10)%0A self.transport = self.connection.open_sftp(
)%0A
@@ -1619,29 +1619,25 @@
self.
-connect_
trans
+port
.close()
@@ -1701,39 +1701,8 @@
ion%0A
- del self.connect_trans%0A
|
365c5a085d9fb877c76857741ea98a6a1294da0f
|
Disable spaceport for linux/android
|
tools/perf/benchmarks/spaceport.py
|
tools/perf/benchmarks/spaceport.py
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs spaceport.io's PerfMarks benchmark."""
import logging
import os
from core import perf_benchmark
from telemetry import benchmark
from telemetry.core import util
from telemetry import page as page_module
from telemetry.page import page_test
from telemetry import story
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
DESCRIPTIONS = {
'canvasDrawImageFullClear':
'Using a canvas element to render. Bitmaps are blitted to the canvas '
'using the "drawImage" function and the canvas is fully cleared at '
'the beginning of each frame.',
'canvasDrawImageFullClearAlign':
'Same as canvasDrawImageFullClear except all "x" and "y" values are '
'rounded to the nearest integer. This can be more efficient on '
'translate on certain browsers.',
'canvasDrawImagePartialClear':
'Using a canvas element to render. Bitmaps are blitted to the canvas '
'using the "drawImage" function and pixels drawn in the last frame '
'are cleared to the clear color at the beginning of each frame. '
'This is generally slower on hardware accelerated implementations, '
'but sometimes faster on CPU-based implementations.',
'canvasDrawImagePartialClearAlign':
'Same as canvasDrawImageFullClearAlign but only partially clearing '
'the canvas each frame.',
'css2dBackground':
'Using div elements that have a background image specified using CSS '
'styles. These div elements are translated, scaled, and rotated using '
'CSS-2D transforms.',
'css2dImg':
'Same as css2dBackground, but using img elements instead of div '
'elements.',
'css3dBackground':
'Same as css2dBackground, but using CSS-3D transforms.',
'css3dImg':
'Same as css2dImage but using CSS-3D tranforms.',
}
class _SpaceportMeasurement(page_test.PageTest):
def __init__(self):
super(_SpaceportMeasurement, self).__init__()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--disable-gpu-vsync')
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'!document.getElementById("start-performance-tests").disabled', 60)
tab.ExecuteJavaScript("""
window.__results = {};
window.console.log = function(str) {
if (!str) return;
var key_val = str.split(': ');
if (!key_val.length == 2) return;
__results[key_val[0]] = key_val[1];
};
document.getElementById('start-performance-tests').click();
""")
num_results = 0
num_tests_in_spaceport = 24
while num_results < num_tests_in_spaceport:
tab.WaitForJavaScriptExpression(
'Object.keys(window.__results).length > %d' % num_results, 180)
num_results = tab.EvaluateJavaScript(
'Object.keys(window.__results).length')
logging.info('Completed test %d of %d' %
(num_results, num_tests_in_spaceport))
result_dict = eval(tab.EvaluateJavaScript(
'JSON.stringify(window.__results)'))
for key in result_dict:
chart, trace = key.split('.', 1)
results.AddValue(scalar.ScalarValue(
results.current_page, '%s.%s'% (chart, trace),
'objects (bigger is better)', float(result_dict[key]),
important=False, description=DESCRIPTIONS.get(chart)))
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, 'Score', 'objects (bigger is better)',
[float(x) for x in result_dict.values()],
description='Combined score for all parts of the spaceport benchmark.'))
# crbug.com/166703: This test frequently times out on Windows.
@benchmark.Disabled('mac', 'win')
class Spaceport(perf_benchmark.PerfBenchmark):
"""spaceport.io's PerfMarks benchmark.
http://spaceport.io/community/perfmarks
This test performs 3 animations (rotate, translate, scale) using a variety of
methods (css, webgl, canvas, etc) and reports the number of objects that can
be simultaneously animated while still achieving 30FPS.
"""
test = _SpaceportMeasurement
@classmethod
def Name(cls):
return 'spaceport'
def CreateStorySet(self, options):
spaceport_dir = os.path.join(util.GetChromiumSrcDir(), 'chrome', 'test',
'data', 'third_party', 'spaceport')
ps = story.StorySet(base_dir=spaceport_dir)
ps.AddStory(page_module.Page('file://index.html', ps, ps.base_dir))
return ps
|
Python
| 0
|
@@ -3964,17 +3964,76 @@
', 'win'
-)
+,%0A 'linux', 'android') # crbug.com/525112
%0Aclass S
|
2dc2ca7a24f9ba5ad7370901f02310725d4756a2
|
Fix the dhcp checks on centos
|
playbooks/roles/bifrost-test-dhcp/files/test-dhcp.py
|
playbooks/roles/bifrost-test-dhcp/files/test-dhcp.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import csv
import json
import os
import sys
def _load_data_from_csv(path):
with open(path) as csvfile:
csvdata = [row for row in csv.reader(csvfile)]
inventory = {}
# NOTE(pas-ha) convert to structure similar to JSON inventory
for entry in csvdata:
mac = entry[0]
hostname = entry[10]
ip = entry[11]
inventory[hostname] = {
'nics': [{'mac': mac}],
'name': hostname,
'ipv4_address': ip
}
return inventory
def _load_data_from_json(path):
with open(path) as jsonfile:
inventory = json.load(jsonfile)
return inventory
def main(argv):
# first item is the inventory_dhcp setting
# second item is the inventory_dhcp_static_ip setting
inventory_dhcp = (argv[0] == 'True' or argv[0] == 'true')
inventory_dhcp_static_ip = (argv[1] == 'True' or argv[1] == 'true')
if not inventory_dhcp:
# nothing to validate
sys.exit(0)
# load data from json file
if os.path.exists('/tmp/baremetal.json'):
inventory = _load_data_from_json('/tmp/baremetal.json')
# load data from csv file
elif os.path.exists('/tmp/baremetal.csv'):
try:
inventory = _load_data_from_csv('/tmp/baremetal.csv')
except Exception:
# try load *.csv as json for backward compatibility
inventory = _load_data_from_json('/tmp/baremetal.csv')
else:
print('ERROR: Inventory file has not been generated')
sys.exit(1)
# now check that we only have these entries in leases file
leases = []
if not os.path.exists('/var/lib/misc/dnsmasq.leases'):
print('ERROR: dnsmasq leases file has not been generated')
sys.exit(1)
with open('/var/lib/misc/dnsmasq.leases') as csvfile:
leases_reader = csv.reader(csvfile, delimiter=' ')
for row in leases_reader:
leases.append(row)
# first we test number of entries
if len(leases) != len(inventory):
print('ERROR: Number of entries do not match with inventory')
sys.exit(1)
# then we check that all macs and hostnames are present
for value in inventory.values():
# NOTE(pas-ha) supporting only single nic
mac = value['nics'][0]['mac']
hostname = value['name']
ip = value['ipv4_address']
# mac check
for lease_entry in leases:
if lease_entry[1] == mac:
break
else:
print('ERROR: No mac found in leases')
sys.exit(1)
# hostname check
for lease_entry in leases:
if lease_entry[3] == hostname:
# if we use static ip, we need to check that ip matches
# with hostname in leases
if inventory_dhcp_static_ip:
if lease_entry[2] != ip:
print('ERROR: IP does not match with inventory')
sys.exit(1)
break
else:
print('ERROR: No hostname found in leases')
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
|
Python
| 0.00016
|
@@ -2329,24 +2329,94 @@
q.leases'):%0A
+ if not os.path.exists('/var/lib/dnsmasq/dnsmasq.leases'):%0A
prin
@@ -2470,32 +2470,36 @@
rated')%0A
+
+
sys.exit(1)%0A%0A
@@ -2494,31 +2494,116 @@
exit(1)%0A
-%0A with open(
+ else:%0A dns_path = '/var/lib/dnsmasq/dnsmasq.leases'%0A else:%0A dns_path =
'/var/li
@@ -2624,16 +2624,40 @@
.leases'
+%0A%0A with open(dns_path
) as csv
|
f8944c0ac5a80d72852d9b2ea1dc1fc7d79a1891
|
Add test for deserialising
|
plugins/data/enumerated/test/test_enumerated_type.py
|
plugins/data/enumerated/test/test_enumerated_type.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Tests the enumerated type data type.
These tests are mostly interface-based, meaning that they will not test actual
output a lot, but tests the behaviour of the units instead.
"""
import enum #To define example enumerated types to test with.
import test.test_enum #Built-in enumerated types to test with.
import enumerated.enumerated_type #The module we're testing.
import luna.tests #For parametrised tests.
class Animal(enum.Enum):
"""
An example enumerated type to perform tests on.
"""
CAT = 0
DOG = 1
BIRD = 2
class EnumContainer:
"""
A class that contains a nested enum to test with.
"""
class Material(enum.Enum):
"""
A nested enumerated type inside another class.
We test with this because it has a different import path if it is
defined this way.
"""
IRON = 3
STONE = 4
WOOD = 5
class TestEnumeratedType(luna.tests.TestCase):
"""
Tests the behaviour of various functions belonging to the enumerated type.
In particular, it focuses on how these functions interact and integrate with
each other.
"""
@luna.tests.parametrise({
"module_local": {
"instance": Animal.CAT
},
"module_local2": { #Different module-local one that is not the first-defined entry.
"instance": Animal.BIRD
},
"builtins": {
"instance": test.test_enum.Fruit.tomato
},
"nested": {
"instance": EnumContainer.Material.STONE
}
})
def test_serialise(self, instance):
"""
Tests whether we can serialise enumerated types.
:param instance: The enumerated type instance to serialise.
"""
result = enumerated.enumerated_type.serialise(instance)
self.assertIsInstance(result, bytes, "The serialised enumerated type must be a byte sequence.")
|
Python
| 0
|
@@ -1475,16 +1475,612 @@
.%0A%09%22%22%22%0A%0A
+%09@luna.tests.parametrise(%7B%0A%09%09%22custom%22: %7B%0A%09%09%09%22serialised%22: b%22enumerated.test.Animal.CAT%22%0A%09%09%7D,%0A%09%09%22custom2%22: %7B%0A%09%09%09%22serialised%22: b%22enumerated.test.Animal.BIRD%22%0A%09%09%7D,%0A%09%09%22builtins%22: %7B%0A%09%09%09%22serialised%22: b%22test.test_enum.Fruit.tomato%22%0A%09%09%7D,%0A%09%09%22nested%22: %7B%0A%09%09%09%22serialised%22: b%22enumerated.test.EnumContainer.Material.STONE%22%0A%09%09%7D%0A%09%7D)%0A%09def test_deserialise(self, serialised):%0A%09%09%22%22%22%0A%09%09Tests whether we can deserialise enumerated types.%0A%0A%09%09:param serialised: The serialised form of some enumerated type.%0A%09%09%22%22%22%0A%09%09result = enumerated.enumerated_type.deserialise(serialised)%0A%09%09self.assertIsInstance(result, enum.Enum)%0A%0A
%09@luna.t
|
9686b8165e0fcda8f64c6c6eadcc786c04d07803
|
make sure lsf post exec command sends exit code back
|
flow/shell_command/lsf/commands/post_exec.py
|
flow/shell_command/lsf/commands/post_exec.py
|
from flow import exit_codes
from flow.commands.base import CommandBase
from flow.configuration.inject.broker import BrokerConfiguration
from flow.configuration.inject.orchestrator import OrchestratorConfiguration
from flow.configuration.settings.injector import setting
from flow.util.exit import exit_process
from injector import inject
import flow.interfaces
import logging
import os
LOG = logging.getLogger(__name__)
@inject(orchestrator=flow.interfaces.IOrchestrator)
class LsfPostExecCommand(CommandBase):
injector_modules = [
BrokerConfiguration,
OrchestratorConfiguration,
]
@staticmethod
def annotate_parser(parser):
parser.add_argument('--color')
parser.add_argument('--color-group-idx')
parser.add_argument('--execute-failure', '-f')
parser.add_argument('--execute-success', '-s')
parser.add_argument('--net-key', '-n')
def _execute(self, parsed_arguments):
LOG.info("Begin LSF post exec")
info = os.environ.get('LSB_JOBEXIT_INFO', None)
stat = os.environ.get('LSB_JOBEXIT_STAT', None)
if stat is None:
LOG.critical("LSB_JOBEXIT_STAT environment variable wasn't "
"set... exiting!")
exit_process(exit_codes.EXECUTE_ERROR)
else:
stat = int(stat)
# we don't currently do migrating/checkpointing/requing so we're not
# going to check for those posibilities. Instead we will assume that
# the job has failed.
if info is not None or stat != 0:
exit_code = stat >> 8
signal = stat & 255
LOG.debug('Job exitted with code (%s) and signal (%s)',
exit_code, signal)
deferred = self.orchestrator.create_token(
net_key=parsed_arguments.net_key,
place_idx=parsed_arguments.execute_failure,
color=parsed_arguments.color,
color_group_idx=parsed_arguments.color_group_idx)
else:
LOG.debug("Process exited normally")
deferred = self.orchestrator.create_token(
net_key=parsed_arguments.net_key,
place_idx=parsed_arguments.execute_success,
color=parsed_arguments.color,
color_group_idx=parsed_arguments.color_group_idx)
return deferred
def _teardown(self, parsed_arguments):
LOG.info('End LSF post exec')
|
Python
| 0
|
@@ -1616,16 +1616,23 @@
signal
+_number
= stat
@@ -1636,16 +1636,145 @@
at & 255
+%0A token_data = %7B%0A 'exit_code': exit_code,%0A 'signal_number': signal_number,%0A %7D
%0A%0A
@@ -1872,16 +1872,23 @@
, signal
+_number
)%0A
@@ -2096,32 +2096,32 @@
rguments.color,%0A
-
@@ -2164,32 +2164,69 @@
.color_group_idx
+,%0A data=token_data
)%0A%0A else:
|
c7cea0167f8de6b6619c323a363b2ec1a9a5d65f
|
fix alembic foreign key naming
|
invenio_accounts/alembic/9848d0149abd_create_accounts_tables.py
|
invenio_accounts/alembic/9848d0149abd_create_accounts_tables.py
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Create accounts tables."""
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = '9848d0149abd'
down_revision = '843bc79c426f'
branch_labels = ()
depends_on = None
def upgrade():
"""Upgrade database."""
op.create_table(
'accounts_role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table(
'accounts_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(name='active'), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.Column('last_login_at', sa.DateTime(), nullable=True),
sa.Column('current_login_at', sa.DateTime(), nullable=True),
sa.Column('last_login_ip',
sqlalchemy_utils.types.ip_address.IPAddressType(),
nullable=True),
sa.Column('current_login_ip',
sqlalchemy_utils.types.ip_address.IPAddressType(),
nullable=True),
sa.Column('login_count', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table(
'accounts_user_session_activity',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('sid_s', sa.String(length=255), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['user_id'], [u'accounts_user.id'],
name='fk_accounts_session_activity_user_id',
),
sa.PrimaryKeyConstraint('sid_s')
)
op.create_table(
'accounts_userrole',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['role_id'], ['accounts_role.id'],
name='fk_accounts_userrole_role_id',
),
sa.ForeignKeyConstraint(
['user_id'], ['accounts_user.id'],
name='fk_accounts_userrole_user_id',
),
)
with op.batch_alter_table('transaction') as batch_op:
batch_op.add_column(sa.Column(
'user_id',
sa.Integer(),
sa.ForeignKey(
'accounts_user.id', name='fk_transaction_accounts_user_id'),
nullable=True,
))
batch_op.create_index(
op.f('ix_transaction_user_id'), ['user_id'], unique=False
)
def downgrade():
"""Downgrade database."""
ctx = op.get_context()
insp = Inspector.from_engine(ctx.connection.engine)
for fk in insp.get_foreign_keys('transaction'):
if fk['referred_table'] == 'accounts_user':
op.drop_constraint(
op.f(fk['name']), 'transaction', type_='foreignkey'
)
with op.batch_alter_table('transaction') as batch_op:
batch_op.drop_index(op.f('ix_transaction_user_id'))
batch_op.drop_column('user_id')
op.drop_table('accounts_userrole')
op.drop_table('accounts_user_session_activity')
op.drop_table('accounts_user')
op.drop_table('accounts_role')
|
Python
| 0
|
@@ -3685,80 +3685,23 @@
Key(
-%0A 'accounts_user.id', name='fk_transaction_accounts_user_
+'accounts_user.
id')
|
2be23846aabae5307ef817561661783b44c43160
|
Move error-message logic into exception class
|
falcom/table.py
|
falcom/table.py
|
# Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
class Table:
class InputStrContainsCarriageReturn (RuntimeError):
pass
class InconsistentColumnCounts (RuntimeError):
pass
def __init__ (self, tab_separated_text = None):
self.text = tab_separated_text
self.__raise_error_if_carriage_returns()
self.__create_internal_structure()
@property
def rows (self):
return len(self)
@property
def cols (self):
return len(self.__rows[0]) if self else 0
def __len__ (self):
return len(self.__rows)
def __iter__ (self):
return iter(self.__rows)
def __getitem__ (self, key):
return self.__rows[key]
def __repr__ (self):
return "<{} {}>".format(self.__class__.__name__,
repr(self.text))
def __raise_error_if_carriage_returns (self):
if self.text and "\r" in self.text:
raise self.InputStrContainsCarriageReturn
def __create_internal_structure (self):
if self.text:
self.__set_to_list_of_rows_from_text()
else:
self.__rows = []
def __set_to_list_of_rows_from_text (self):
self.__rows = [self.__split_row(r)
for r in self.__rows_from_text()]
self.__raise_error_unless_col_counts_are_consistent()
def __split_row (self, row_text):
return tuple(row_text.split("\t"))
def __rows_from_text (self):
return self.text.rstrip("\n").split("\n")
def __raise_error_unless_col_counts_are_consistent (self):
rows = iter(self.__rows)
expected_len = len(next(rows))
for row in rows:
if len(row) != expected_len:
raise Table.InconsistentColumnCounts(
"Expected every row to have len={:d}: {}".format(
expected_len, repr(row)))
|
Python
| 0.000006
|
@@ -312,28 +312,288 @@
r):%0A
-pass
+def __init__ (self, expected_len, row):%0A self.expected_len = expected_len%0A self.row = row%0A%0A def __str__ (self):%0A return %22Expected every row to have len=%7B:d%7D: %7B%7D%22.format(%0A self.expected_len, repr(self.row))
%0A%0A def __
@@ -2175,103 +2175,8 @@
nts(
-%0A %22Expected every row to have len=%7B:d%7D: %7B%7D%22.format(%0A
expe
@@ -2190,15 +2190,8 @@
n, r
-epr(row))
+ow
)%0A
|
4ce117b65d4a6f18d327f00866eaa4383f908094
|
Revert "meter import path."
|
faucet/meter.py
|
faucet/meter.py
|
"""Configure meters."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from conf import Conf
from valve_of import meteradd
except ImportError:
from faucet.conf import Conf
from faucet.valve_of import meteradd
class Meter(Conf):
"""Implement FAUCET configuration for an OpenFlow meter."""
name = None
entry_msg = None
defaults = {
'meter_id': None,
'entry': None,
}
defaults_type = {
'entry': dict,
'meter_id': int,
}
def __init__(self, _id, conf):
super(Meter, self).__init__(_id, conf)
assert conf['entry']
assert conf['entry']['flags']
assert conf['entry']['bands']
conf['entry']['meter_id'] = self.meter_id
self.entry_msg = meteradd(self.entry)
|
Python
| 0
|
@@ -883,23 +883,16 @@
from
-faucet.
valve_of
|
db8b991600ab0a812e1d9af1a6e4bb7be25b5bd4
|
fix apply_tote_contents_hint
|
jsk_2016_01_baxter_apc/node_scripts/apply_tote_contents_hint.py
|
jsk_2016_01_baxter_apc/node_scripts/apply_tote_contents_hint.py
|
#!/usr/bin/env python
import numpy as np
from jsk_2015_05_baxter_apc.msg import ObjectRecognition
import jsk_apc2016_common
from jsk_recognition_msgs.msg import ClassificationResult
from jsk_topic_tools import ConnectionBasedTransport
from jsk_topic_tools.log_utils import jsk_logwarn
import rospy
class ApplyToteContentsHint(ConnectionBasedTransport):
"""Use tote contents info to improve object recognition"""
def __init__(self):
super(self.__class__, self).__init__()
json_file = rospy.get_param('~json')
self.tote_contents = jsk_apc2016_common.get_tote_contents(json_file)
self.pub = self.advertise('~output', ObjectRecognition, queue_size=1)
def subscribe(self):
self.sub = rospy.Subscriber('~input', ClassificationResult,
self._apply)
def unsubscribe(self):
self.sub.unregister()
def _apply(self, msg):
# get candidates probabilities
candidates = self.tote_contents
candidates.append('no_object')
label_to_proba = dict(zip(msg.target_names, msg.probabilities))
candidates_proba = [label_to_proba[label] for label in candidates]
candidates_proba = np.array(candidates_proba)
candidates_proba = candidates_proba / candidates_proba.sum()
# compose output message
top_index = np.argmax(candidates_proba)
out_msg = ObjectRecognition(
header=msg.header,
matched=candidates[top_index],
probability=candidates_proba[top_index],
candidates=candidates,
probabilities=candidates_proba,
)
self.pub.publish(out_msg)
if __name__ == '__main__':
rospy.init_node('apply_tote_contents_hint')
app = ApplyToteContentsHint()
rospy.spin()
|
Python
| 0.000002
|
@@ -974,16 +974,32 @@
idates =
+ %5B'no_object'%5D +
self.to
@@ -1014,47 +1014,8 @@
nts%0A
- candidates.append('no_object')%0A
|
6654d6956982c31bbfe6eb22aae557049ea613ba
|
Fix incorrect formatting call in azure module (#33561)
|
lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py
|
lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup_facts
version_added: "2.1"
short_description: Get resource group facts.
description:
- Get facts for a specific resource group or all resource groups.
options:
name:
description:
- Limit results to a specific resource group.
required: false
default: null
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
required: false
default: null
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one resource group
azure_rm_resourcegroup_facts:
name: Testing
- name: Get facts for all resource groups
azure_rm_resourcegroup_facts:
- name: Get facts by tags
azure_rm_resourcegroup_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_resourcegroups:
description: List of resource group dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"properties": {
"provisioningState": "Succeeded"
},
"tags": {
"delete": "never",
"testing": "testing"
}
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'ResourceGroup'
class AzureRMResourceGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_resourcegroups=[])
)
self.name = None
self.tags = None
super(AzureRMResourceGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name:
self.results['ansible_facts']['azure_resourcegroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_resourcegroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.rm_client.resource_groups.get(self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return result
def list_items(self):
self.log('List all items')
try:
response = self.rm_client.resource_groups.list()
except CloudError as exc:
self.fail("Failed to list all items - {1}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMResourceGroupFacts()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -3712,17 +3712,17 @@
tems - %7B
-1
+0
%7D%22.forma
|
02efde47b5cf20b7385eacaa3f21454ffa636ad7
|
Update CodeStarConnections::Connection per 2020-07-23 update
|
troposphere/codestarconnections.py
|
troposphere/codestarconnections.py
|
# Copyright (c) 2012-2020, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, Tags
VALID_CONNECTION_PROVIDERTYPE = ('Bitbucket')
def validate_connection_providertype(connection_providertype):
"""Validate ProviderType for Connection"""
if connection_providertype not in VALID_CONNECTION_PROVIDERTYPE:
raise ValueError("Connection ProviderType must be one of: %s" %
", ".join(VALID_CONNECTION_PROVIDERTYPE))
return connection_providertype
class Connection(AWSObject):
resource_type = "AWS::CodeStarConnections::Connection"
props = {
'ConnectionName': (basestring, True),
'ProviderType': (validate_connection_providertype, True),
'Tags': (Tags, False),
}
|
Python
| 0
|
@@ -687,32 +687,72 @@
estring, True),%0A
+ 'HostArn': (basestring, False),%0A
'Provide
|
f89869e095bc6c5d99c59042f815b82b1dcd19dc
|
Update two_armed_bandit.py
|
twoarmedbandit/two_armed_bandit.py
|
twoarmedbandit/two_armed_bandit.py
|
#!/usr/bin/env python
# =============================================================================
# Two armed bandit
#
# @description: Program for computing the greedy choice, optimal reward,
# optimal choice or greedy reward for a two-armed bandit
# @author: Elisha Lai
# =============================================================================
import sys
# Returns the expectation for a win on the next pull of an arm
def expectation(a, b):
return (a + 1) / float(a + b + 2)
# Returns the greedy strategy's choice for the next pull
def greedy_choice(a1, b1, a2, b2, d):
expectation1 = expectation(a1, b1)
expectation2 = expectation(a2, b2)
if expectation1 >= expectation2:
return 1
else:
return 2
# Returns the expectation for the total reward (sum of the rewards
# obtained over the next d pulls) if the optimal strategy is followed
def optimal_reward(a1, b1, a2, b2, d):
optimal_rewards = [[[[0 for h in range(d + 1)] for g in range(d + 1)]
for f in range(d + 1)] for e in range(d + 1)]
for i in range(d + 1):
for e in range(d - i + 1):
for f in range(d - (i + e) + 1):
for g in range(d - (i + e + f) + 1):
h = d - (i + e + f + g)
if i == 0:
optimal_rewards[e][f][g][h] = 0
else:
expectation1 = expectation(a1 + e, b1 + f)
reward1 = \
expectation1 * (1 + optimal_rewards[e + 1][f][g][h]) + \
(1 - expectation1) * optimal_rewards[e][f + 1][g][h]
expectation2 = expectation(a2 + g, b2 + h)
reward2 = \
expectation2 * (1 + optimal_rewards[e][f][g + 1][h]) + \
(1 - expectation2) * optimal_rewards[e][f][g][h + 1]
optimal_rewards[e][f][g][h] = max(reward1, reward2)
return optimal_rewards[0][0][0][0]
# Returns the optimal strategy's choice for the next pull
def optimal_choice(a1, b1, a2, b2, d):
expectation1 = expectation(a1, b1)
reward1 = \
expectation1 * (1 + optimal_reward(a1 + 1, b1, a2, b2, d - 1)) + \
(1 - expectation1) * optimal_reward(a1, b1 + 1, a2, b2, d - 1)
expectation2 = expectation(a2, b2)
reward2 = \
expectation2 * (1 + optimal_reward(a1, b1, a2 + 1, b2, d - 1)) + \
(1 - expectation2) * optimal_reward(a1, b1, a2, b2 + 1, d - 1)
if reward1 >= reward2:
return 1
else:
return 2
# Returns the expectation for the total reward (sum of the rewards
# obtained over the next d pulls) if the greedy strategy is followed
def greedy_reward(a1, b1, a2, b2, d):
greedy_rewards = [[[[0 for h in range(d + 1)] for g in range(d + 1)]
for f in range(d + 1)] for e in range(d + 1)]
for i in range(d + 1):
for e in range(d - i + 1):
for f in range(d - (i + e) + 1):
for g in range(d - (i + e + f) + 1):
h = d - (i + e + f + g)
if i == 0:
greedy_rewards[e][f][g][h] = 0
else:
expectation1 = expectation(a1 + e, b1 + f)
expectation2 = expectation(a2 + g, b2 + h)
if expectation1 >= expectation2:
reward = \
expectation1 * (1 + greedy_rewards[e + 1][f][g][h]) + \
(1 - expectation1) * greedy_rewards[e][f + 1][g][h]
else:
reward = \
expectation2 * (1 + greedy_rewards[e][f][g + 1][h]) + \
(1 - expectation2) * greedy_rewards[e][f][g][h + 1]
greedy_rewards[e][f][g][h] = reward
return greedy_rewards[0][0][0][0]
# Returns the number truncated to exactly three digits after the
# decimal point
def truncate_number(n):
number_parts = str(n).split(".")
if len(number_parts) == 1:
return n
else:
integer_part = number_parts[0]
fractional_part = number_parts[1]
truncated_fractional_part = fractional_part[:3]
truncated_number = ".".join([integer_part, truncated_fractional_part])
return truncated_number
def main():
values = []
for line in sys.stdin:
line = line.strip()
s = line.split()[0]
a1 = int(line.split()[1])
b1 = int(line.split()[2])
a2 = int(line.split()[3])
b2 = int(line.split()[4])
d = int(line.split()[5])
if s == "GC":
value = greedy_choice(a1, b1, a2, b2, d)
elif s == "OR":
value = truncate_number(optimal_reward(a1, b1, a2, b2, d))
elif s == "OC":
value = optimal_choice(a1, b1, a2, b2, d)
else:
value = truncate_number(greedy_reward(a1, b1, a2, b2, d))
values.append(value)
for value in values:
print(value)
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -106,24 +106,24 @@
Two
-
+-
armed
-b
+B
andit%0A#%0A
# @
@@ -118,16 +118,39 @@
andit%0A#%0A
+# @author: Elisha Lai%0A
# @desc
@@ -280,34 +280,39 @@
dit%0A# @
-author: Elisha Lai
+version: 1.0 30/10/2016
%0A# =====
|
c86d689e3593b6221b8b9120f6af16b32c2211d9
|
Add __all__ and __version__ to __init__.py.
|
src/pu/__init__.py
|
src/pu/__init__.py
|
Python
| 0.000107
|
@@ -0,0 +1,48 @@
+__all__ = 'utils'.split()%0A__version__ = '1.0.0'%0A
|
|
92469456222a8d5c00595ca34ea21c66042a5040
|
modify main loop
|
parser/main.py
|
parser/main.py
|
import config
import parse
pipeout = open(config.PIPE_NAME, 'r')
while True:
input_record = pipeout.readline()
if input_record.split(',')[0] == '$GYRO':
gyro = input_record
accel = pipeout.readline()
magnet = pipeout.readline()
pressure = pipeout.readline()
parse.parse_IMU(gyro, accel, magnet, pressure)
|
Python
| 0.000003
|
@@ -188,125 +188,293 @@
ord%0A
- accel = pipeout.readline()%0A magnet = pipeout.readline()%0A pressure = pipeout.readline()%0A
+%09if input_record.split(',')%5B0%5D == '$ACCEL':%0A accel = pipeout.readline()%0A%09if input_record.split(',')%5B0%5D == '$MAGNET':%0A magnet = pipeout.readline()%0A%09if input_record.split(',')%5B0%5D == '$MBAR':%0A pressure = pipeout.readline()%0A%09if all(%5Bgyro, accel, magnet, pressure%5D):%0A%09%09
pars
@@ -516,8 +516,50 @@
essure)%0A
+%09%09gyro = accel = magnet = pressure = None%0A
|
39e5defbb12da62fc132e89437b1ce408b85ec6b
|
Fix parsertests script exit code.
|
parsertests.py
|
parsertests.py
|
#!/usr/bin/env python
import sys
import os
import os.path as path
import glob
import subprocess as sp
from collections import namedtuple
from multiprocessing import Pool
# TODO: remove this silly script and write the tests in scala/gradle.
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
statuscolor = {
'PASS': bcolors.OKGREEN,
'WARN': bcolors.WARNING,
'FAIL': bcolors.FAIL,
}
Result = namedtuple('Result', ['testname', 'testout', 'status'])
def run_test(test):
cmd = ['./build/install/svparse/bin/svparse', os.path.join(test, "project.xml")]
testenv = os.environ.copy()
testenv['SVPARSE_EXTRA'] = 'svparse_extra_test.xml'
pid = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, env=testenv)
rawout, _ = pid.communicate()
testdir, testname = os.path.split(test)
testout = os.path.join(testdir, '{}.log'.format(testname))
with open(testout, 'w') as f:
f.write(rawout)
if pid.returncode != 0:
return Result(test, testout, 'FAIL')
if detected_antlr_warnings(rawout):
return Result(test, testout, 'WARN')
return Result(test, testout, 'PASS')
def detected_antlr_warnings(testout):
return "reportAmbiguity" in testout
def main():
n_not_passing = 0
p = Pool(4)
for result in p.imap_unordered(run_test, [f for f in glob.glob("parsertests/*") if os.path.isdir(f)]):
status = statuscolor[result.status] + result.status + bcolors.ENDC
if result.status != 'PASS':
n_not_passing += 1
print "{}: {} - {}".format(status, result.testname, result.testout)
else:
print "{}: {}".format(status, result.testname)
print "Summary: {} tests did not pass cleanly".format(n_not_passing)
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0
|
@@ -1278,27 +1278,47 @@
():%0A n_
-not_passing
+total = 0%0A n_pass = 0%0A n_fail
= 0%0A p
@@ -1433,16 +1433,33 @@
r(f)%5D):%0A
+ n_total += 1%0A
stat
@@ -1559,29 +1559,58 @@
:%0A
-n_not_passing
+if result.status == 'FAIL':%0A n_fail
+= 1%0A
@@ -1691,16 +1691,34 @@
else:%0A
+ n_pass += 1%0A
pr
@@ -1762,16 +1762,17 @@
stname)%0A
+%0A
print
@@ -1784,38 +1784,101 @@
ary:
- %7B%7D tests did not pass cleanly
+%22%0A print %22- PASS: %7B%7D%22.format(n_pass)%0A print %22- FAIL: %7B%7D%22.format(n_fail)%0A print %22- WARN: %7B%7D
%22.fo
@@ -1888,20 +1888,76 @@
t(n_
-not_passing)
+total - n_fail - n_pass)%0A if n_fail == 0:%0A return 0%0A return 1
%0A%0Aif
|
400f5531ff1edb41a0958b741de85a1040212274
|
match timezone to Amsterdam
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Paul Salden'
SITENAME = u'paulsalden.com'
SITEURL = 'http://paulsalden.com'
PATH = 'content'
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
Python
| 0.999947
|
@@ -211,13 +211,17 @@
ope/
-Paris
+Amsterdam
'%0A%0AD
|
a9b7f92edb7b3a73a2b38a45c5ad6a0deee18e19
|
Add GA tracking
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'keyan'
SITENAME = u'keyan pishdadian'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Detroit'
DEFAULT_LANG = u'en'
THEME = "themes/flasky"
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Navigation sections and relative URL:
SECTIONS = [('blog', 'index.html'),
('archive', 'archives.html'),
('about', 'pages/about-me.html')]
DEFAULT_CATEGORY = 'Uncategorized'
DATE_FORMAT = {'en': '%m %d %Y'}
DEFAULT_DATE_FORMAT = '%m %d %Y'
DEFAULT_PAGINATION = False
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
FEED_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'
OUTPUT_PATH = 'output'
# static paths will be copied under the same name
STATIC_PATHS = ["images"]
# Optional social media links
# =============================
TWITTER_USERNAME = 'keyan__P'
LINKEDIN_URL = 'https://www.linkedin.com/in/keyanp'
GITHUB_URL = 'http://github.com/keyan'
MAIL_USERNAME = 'kpishdadian'
MAIL_HOST = 'gmail.com'
|
Python
| 0
|
@@ -236,16 +236,77 @@
lasky%22%0A%0A
+# Site analytics%0AGOOGLE_ANALYTICS_ACCOUNT = %22UA-93664476-1%22%0A%0A
# Feed g
|
c8a1b3a7475d3e964814cb9be2a82d00bba745d0
|
Update settings
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Yann Baumgartner'
SITENAME = u'Histoires de briques'
SITESUBTITLE = 'Un site dédié aux LEGO®'
SITEURL = ''
GITHUB_URL = 'https://github.com/yannbaumgartner/histoires-de-briques.git'
PATH = 'content'
PAGE_PATHS = ['pages']
ARTICLE_PATHS = ['articles']
STATIC_PATHS = ['images', 'extra/robots.txt', 'extra/favicon.png']
PLUGIN_PATHS = ['plugins']
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
'extra/favicon.ico': {'path': 'favicon.png'}
}
PLUGINS = ['tag_cloud', 'tipue_search']
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'fr'
LOCALE = 'fr_FR.utf8'
DATE_FORMATS = {
'fr': '%-d %B %Y',
}
DEFAULT_PAGINATION = False
SUMMARY_MAX_LENGTH = None
THEME = 'themes/pelican-bootstrap3'
BOOTSTRAP_THEME = 'flatly'
DIRECT_TEMPLATES = (('index', 'tags', 'categories', 'authors', 'archives', 'search'))
DISPLAY_TAGS_ON_SIDEBAR = 'True'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),)
|
Python
| 0.000001
|
@@ -924,16 +924,53 @@
rch'))%0A%0A
+DISPLAY_ARTICLE_INFO_ON_INDEX = True%0A
DISPLAY_
|
0a6e486a27a48c59e48f458ce1217848ed73ff24
|
Fix time zone config
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Eric, Etan, Fred, Genny, and Steve'
SITENAME = u'The Nest'
SITEURL = ''
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
Python
| 0.00004
|
@@ -182,20 +182,11 @@
= '
-Europe/Paris
+PST
'%0A%0AD
|
c08e28a23778280e577793156ce5b455ba80f92f
|
Tag new release: 2.3.0
|
floo/version.py
|
floo/version.py
|
PLUGIN_VERSION = '2.2.13'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.03'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
|
Python
| 0
|
@@ -17,12 +17,11 @@
'2.
-2.13
+3.0
'%0A#
|
fcc77ac1557ab7f5c3d5605240ea505b8b61b321
|
Update utils.py
|
follow/utils.py
|
follow/utils.py
|
from django.core.urlresolvers import reverse
from django.db.models.fields.related import ManyToManyField, ForeignKey
from follow.models import Follow
from follow.registry import registry, model_map
from django import VERSION as DjangoVersion
if float('%s.%s' % DjangoVersion[:2]) > 1.7:
module_name = 'model_name'
else:
module_name = 'module_name'
def get_followers_for_object(instance):
return Follow.objects.get_follows(instance)
def register(model, field_name=None, related_name=None, lookup_method_name='get_follows'):
"""
This registers any model class to be follow-able.
"""
if model in registry:
return
registry.append(model)
if not field_name:
field_name = 'target_%s' % model._meta.__getattribute__(module_name)
if not related_name:
related_name = 'follow_%s' % model._meta.__getattribute__(module_name)
field = ForeignKey(model, related_name=related_name, null=True,
blank=True, db_index=True)
field.contribute_to_class(Follow, field_name)
setattr(model, lookup_method_name, get_followers_for_object)
model_map[model] = [related_name, field_name]
def follow(user, obj):
""" Make a user follow an object """
follow, created = Follow.objects.get_or_create(user, obj)
return follow
def unfollow(user, obj):
""" Make a user unfollow an object """
try:
follow = Follow.objects.get_follows(obj).get(user=user)
follow.delete()
return follow
except Follow.DoesNotExist:
pass
def toggle(user, obj):
""" Toggles a follow status. Useful function if you don't want to perform follow
checks but just toggle it on / off. """
if Follow.objects.is_following(user, obj):
return unfollow(user, obj)
return follow(user, obj)
def follow_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def unfollow_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def toggle_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def follow_url(user, obj):
""" Returns the right follow/unfollow url """
return toggle_link(obj)
|
Python
| 0.000001
|
@@ -274,16 +274,17 @@
n%5B:2%5D) %3E
+=
1.7:%0A
|
af40b5e9e172cd7cead22fabe364d60955378fff
|
modify test agent host from 0.0.0.0 to localhost
|
uitester/test_manager/tests_cli.py
|
uitester/test_manager/tests_cli.py
|
from uitester.test_manager import rpc_server, rpc_agent
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('Tester')
PORT = 11800
selected_device_id = None
test_server = None
commands = {}
agents = {}
def start_agent(device_id):
_agent = rpc_agent.get_test_agent(device_id)
agents[device_id] = _agent
_agent.start('0.0.0.0', PORT)
def close_agent(device_id):
_agent = agents.get(device_id)
if _agent:
_agent.stop()
agents.pop(device_id)
def show_agents():
for _agent_id in agents:
print(_agent_id)
def agent(*args):
"""
usage: agent [option]
options:
add [device_id] 'add agent with device_id'
rm [device_id] 'remove agent with device_id'
list 'list all agents'
"""
if len(args) < 1:
print('Agent arg error. \nneed at least 1 args\n')
return
if args[0] == 'add':
if len(args) < 2:
print('Agent add need 2 args')
return
start_agent(args[1])
elif args[0] == 'rm':
close_agent(args[1])
elif args[0] == 'list':
show_agents()
else:
print('Unknown agent command')
commands['quit'] = {'func': None, 'help': ''}
commands['agent'] = {'func': agent, 'help': agent.__doc__}
def server(*args):
"""
usage: server [option]
options:
agents 'list all registered agents'
connections 'list all connections'
call [remote_method] 'call remote method on agent'
select [device_id] 'select device_Id'
"""
global selected_device_id
if len(args) < 1:
print('Unknown command')
return
if args[0] == 'agents':
for _agent in test_server._agents:
print(_agent)
elif args[0] == 'connections':
pass
elif args[0] == 'use':
if len(args) < 2:
print('Use need 2 args. e.g. use device_id_1')
return
if args[1] not in test_server._agents:
print('Device id not found in register device list')
return
selected_device_id = args[1]
elif args[0] == 'call':
if len(args) < 2:
print('Call remote method need a method name')
return
if not selected_device_id:
print('Not select any device')
return
agent_proxy = test_server._agents.get(selected_device_id)
if agent_proxy:
remote_call_args = []
if len(args) > 2:
remote_call_args = args[2:]
response = agent_proxy.call(args[1], remote_call_args)
print(response)
print(response.to_json())
commands['server'] = {'func': server, 'help': server.__doc__}
def parse_line(line):
items = []
cache = None
in_quotation = False
for char in line:
if char == ' ' and not in_quotation and cache:
items.append(cache)
cache = None
elif char == '"':
in_quotation = not in_quotation
else:
if not cache:
cache = char
else:
cache += char
if cache:
items.append(cache)
if in_quotation:
raise ValueError('Missing quote. {}'.format(line))
return items
def test():
global test_server
test_server = rpc_server.start(11800)
while True:
user_input = input('>>').strip()
if len(user_input) == 0:
continue
if user_input == 'quit':
break
if user_input == 'help':
for cmd in commands:
print('Command: {}'.format(cmd))
print(commands[cmd]['help'])
continue
items = parse_line(user_input)
cmd = commands.get(items[0])
if cmd:
cmd['func'](*items[1:])
if __name__ == '__main__':
test()
|
Python
| 0.000001
|
@@ -361,15 +361,17 @@
rt('
-0.0.0.0
+localhost
', P
|
f875e5d1bbb3ca08d597164213dd4905ea631a05
|
Add a config option to control ParlayX starting and stopping the notification service.
|
vumi/transports/parlayx/parlayx.py
|
vumi/transports/parlayx/parlayx.py
|
# -*- test-case-name: vumi.transports.parlayx.tests.test_parlayx -*-
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi import log
from vumi.config import ConfigText, ConfigInt
from vumi.transports.base import Transport
from vumi.transports.failures import TemporaryFailure, PermanentFailure
from vumi.transports.parlayx.client import (
ParlayXClient, ServiceException, PolicyException)
from vumi.transports.parlayx.server import SmsNotificationService
from vumi.transports.parlayx.soaputil import SoapFault
class ParlayXTransportConfig(Transport.CONFIG_CLASS):
web_notification_path = ConfigText(
'Path to listen for delivery and receipt notifications on',
static=True)
web_notification_port = ConfigInt(
'Port to listen for delivery and receipt notifications on',
default=0, static=True)
notification_endpoint_uri = ConfigText(
'URI of the ParlayX SmsNotificationService in Vumi', static=True)
short_code = ConfigText(
'Service activation number or short code to receive deliveries for',
static=True)
remote_send_uri = ConfigText(
'URI of the remote ParlayX SendSmsService', static=True)
remote_notification_uri = ConfigText(
'URI of the remote ParlayX SmsNotificationService', static=True)
class ParlayXTransport(Transport):
CONFIG_CLASS = ParlayXTransportConfig
transport_type = 'sms'
def _create_client(self, config):
return ParlayXClient(
short_code=config.short_code,
endpoint=config.notification_endpoint_uri,
send_uri=config.remote_send_uri,
notification_uri=config.remote_notification_uri)
@inlineCallbacks
def setup_transport(self):
config = self.get_static_config()
log.info('Starting ParlayX transport: %s' % (self.transport_name,))
self.web_resource = yield self.start_web_resources(
[(SmsNotificationService(self.handle_raw_inbound_message,
self.publish_delivery_report),
config.web_notification_path)],
config.web_notification_port)
self._parlayx_client = self._create_client(config)
yield self._parlayx_client.start_sms_notification()
def teardown_transport(self):
log.info('Stopping ParlayX transport: %s' % (self.transport_name,))
d = self.web_resource.loseConnection()
d.addBoth(lambda ignored: self._parlayx_client.stop_sms_notification())
return d
def handle_outbound_message(self, message):
"""
Send a text message via the ParlayX client.
"""
log.info('Sending SMS via ParlayX: %r' % (message.to_json(),))
d = self._parlayx_client.send_sms(
message['to_addr'],
message['content'],
message['message_id'])
d.addErrback(self.handle_outbound_message_failure, message)
d.addCallback(
lambda requestIdentifier: self.publish_ack(
message['message_id'], requestIdentifier))
return d
@inlineCallbacks
def handle_outbound_message_failure(self, f, message):
"""
Handle outbound message failures.
`ServiceException`, `PolicyException` and client-class SOAP faults
result in `PermanentFailure` being raised; server-class SOAP faults
instances result in `TemporaryFailure` being raised; and other failures
are passed through.
"""
log.error(f, 'Sending SMS failure on ParlayX: %r' % (
self.transport_name,))
if not f.check(ServiceException, PolicyException):
if f.check(SoapFault):
# We'll give server-class unknown SOAP faults the benefit of
# the doubt as far as temporary failures go.
if f.value.code.endswith('Server'):
raise TemporaryFailure(f)
yield self.publish_nack(message['message_id'], f.getErrorMessage())
if f.check(SoapFault):
# We've ruled out unknown SOAP faults, so this must be a permanent
# failure.
raise PermanentFailure(f)
returnValue(f)
def handle_raw_inbound_message(self, message_id, inbound_message):
"""
Handle incoming text messages from `SmsNotificationService` callbacks.
"""
log.info('Receiving SMS via ParlayX: %r: %r' % (
message_id, inbound_message,))
return self.publish_message(
message_id=message_id,
content=inbound_message.message,
to_addr=inbound_message.service_activation_number,
from_addr=inbound_message.sender_address,
provider='parlayx',
transport_type=self.transport_type)
|
Python
| 0
|
@@ -193,16 +193,28 @@
onfigInt
+, ConfigBool
%0Afrom vu
@@ -1324,16 +1324,129 @@
c=True)%0A
+ start_notifications = ConfigBool(%0A 'Start (and stop) the ParlayX notification service?', static=True)%0A
%0A%0Aclass
@@ -2331,16 +2331,59 @@
config)%0A
+ if config.start_notifications:%0A
@@ -2435,16 +2435,37 @@
tion()%0A%0A
+ @inlineCallbacks%0A
def
@@ -2482,32 +2482,74 @@
ransport(self):%0A
+ config = self.get_static_config()%0A
log.info
@@ -2612,27 +2612,29 @@
,))%0A
-d =
+yield
self.web_re
@@ -2669,33 +2669,56 @@
-d.addBoth(lambda ignored:
+if config.start_notifications:%0A yield
sel
@@ -2758,34 +2758,16 @@
cation()
-)%0A return d
%0A%0A de
|
64ab6b412ab98a753166b8d9ab3545b2cf30f49c
|
Replace deprecated assertRaisesRegexp
|
cloudkittyclient/tests/functional/v2/test_dataframes.py
|
cloudkittyclient/tests/functional/v2/test_dataframes.py
|
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import uuid
from cloudkittyclient.tests.functional import base
class CkDataframesTest(base.BaseFunctionalTest):
dataframes_data = """
{
"dataframes": [
{
"period": {
"begin": "20190723T122810Z",
"end": "20190723T132810Z"
},
"usage": {
"metric_one": [
{
"vol": {
"unit": "GiB",
"qty": 1.2
},
"rating": {
"price": 0.04
},
"groupby": {
"group_one": "one",
"group_two": "two"
},
"metadata": {
"attr_one": "one",
"attr_two": "two"
}
}
],
"metric_two": [
{
"vol": {
"unit": "MB",
"qty": 200.4
},
"rating": {
"price": 0.06
},
"groupby": {
"group_one": "one",
"group_two": "two"
},
"metadata": {
"attr_one": "one",
"attr_two": "two"
}
}
]
}
},
{
"period": {
"begin": "20190823T122810Z",
"end": "20190823T132810Z"
},
"usage": {
"metric_one": [
{
"vol": {
"unit": "GiB",
"qty": 2.4
},
"rating": {
"price": 0.08
},
"groupby": {
"group_one": "one",
"group_two": "two"
},
"metadata": {
"attr_one": "one",
"attr_two": "two"
}
}
],
"metric_two": [
{
"vol": {
"unit": "MB",
"qty": 400.8
},
"rating": {
"price": 0.12
},
"groupby": {
"group_one": "one",
"group_two": "two"
},
"metadata": {
"attr_one": "one",
"attr_two": "two"
}
}
]
}
}
]
}
"""
def __init__(self, *args, **kwargs):
super(CkDataframesTest, self).__init__(*args, **kwargs)
self.runner = self.cloudkitty
def setUp(self):
super(CkDataframesTest, self).setUp()
self.fixture_file_name = '{}.json'.format(uuid.uuid4())
with open(self.fixture_file_name, 'w') as f:
f.write(self.dataframes_data)
def tearDown(self):
files = os.listdir('.')
if self.fixture_file_name in files:
os.remove(self.fixture_file_name)
super(CkDataframesTest, self).tearDown()
def test_dataframes_add_with_no_args(self):
self.assertRaisesRegexp(
RuntimeError,
'error: the following arguments are required: datafile',
self.runner,
'dataframes add',
fmt='',
has_output=False,
)
def test_dataframes_add(self):
self.runner(
'dataframes add {}'.format(self.fixture_file_name),
fmt='',
has_output=False,
)
def test_dataframes_add_with_hyphen_stdin(self):
with open(self.fixture_file_name, 'r') as f:
self.runner(
'dataframes add -',
fmt='',
stdin=f.read().encode(),
has_output=False,
)
def test_dataframes_get(self):
# TODO(jferrieu): functional tests will be added in another
# patch for `dataframes get`
pass
class OSCDataframesTest(CkDataframesTest):
def __init__(self, *args, **kwargs):
super(OSCDataframesTest, self).__init__(*args, **kwargs)
self.runner = self.openstack
|
Python
| 0.998335
|
@@ -4821,17 +4821,16 @@
sesRegex
-p
(%0A
|
56a1b400b791b2fa98ff03ea9e9709a5f04f7e80
|
correct gitlab authenticator when checking group membership of admins
|
oauthenticator/gitlab.py
|
oauthenticator/gitlab.py
|
"""
Custom Authenticator to use GitLab OAuth with JupyterHub
Modified for GitLab by Laszlo Dobos (@dobos)
based on the GitHub plugin by Kyle Kelley (@rgbkrk)
"""
import json
import os
import sys
from tornado.auth import OAuth2Mixin
from tornado import gen, web
import requests
from tornado.escape import url_escape
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set
from .oauth2 import OAuthLoginHandler, OAuthenticator
# Support gitlab.com and gitlab community edition installations
GITLAB_HOST = os.environ.get('GITLAB_HOST') or 'https://gitlab.com'
GITLAB_API = '%s/api/v3' % GITLAB_HOST
def _api_headers(access_token):
return {"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "token {}".format(access_token)
}
def _get_next_page(response):
# Gitlab uses Link headers for pagination.
# See https://docs.gitlab.com/ee/api/README.html#pagination-link-header
link_header = response.headers.get('Link')
if not link_header:
return
for link in requests.utils.parse_header_links(link_header):
if link.get('rel') == 'next':
return link['url']
# if no "next" page, this is the last one
return None
class GitLabMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "%s/oauth/authorize" % GITLAB_HOST
_OAUTH_ACCESS_TOKEN_URL = "%s/oauth/access_token" % GITLAB_HOST
class GitLabLoginHandler(OAuthLoginHandler, GitLabMixin):
pass
class GitLabOAuthenticator(OAuthenticator):
login_service = "GitLab"
client_id_env = 'GITLAB_CLIENT_ID'
client_secret_env = 'GITLAB_CLIENT_SECRET'
login_handler = GitLabLoginHandler
gitlab_group_whitelist = Set(
config=True,
help="Automatically whitelist members of selected groups",
)
@gen.coroutine
def authenticate(self, handler, data=None):
code = handler.get_argument("code", False)
if not code:
raise web.HTTPError(400, "oauth callback made without a token")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitLab Access Token
#
# See: https://github.com/gitlabhq/gitlabhq/blob/master/doc/api/oauth2.md
# GitLab specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
code=code,
grant_type="authorization_code",
redirect_uri=self.get_callback_url(handler),
)
validate_server_cert = self.validate_server_cert
url = url_concat("%s/oauth/token" % GITLAB_HOST,
params)
req = HTTPRequest(url,
method="POST",
headers={"Accept": "application/json"},
validate_cert=validate_server_cert,
body='' # Body is required for a POST...
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
req = HTTPRequest("%s/user" % GITLAB_API,
method="GET",
validate_cert=validate_server_cert,
headers=_api_headers(access_token)
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
user_id = resp_json["id"]
is_admin = resp_json["is_admin"]
# Check if user is a member of any whitelisted organizations.
# This check is performed here, as it requires `access_token`.
if self.gitlab_group_whitelist:
user_in_group = yield self._check_group_whitelist(
username, user_id, is_admin, access_token)
return username if user_in_group else None
else: # no organization whitelisting
return username
@gen.coroutine
def _check_group_whitelist(self, username, user_id, is_admin, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
if is_admin:
# For admins, /groups returns *all* groups. As a workaround
# we check if we are a member of each group in the whitelist
for group in map(url_escape, self.gitlab_group_whitelist):
url = "%s/groups/%s/members/%d" % (GITLAB_API, group, user_id)
req = HTTPRequest(url, method="GET", headers=headers)
resp = yield http_client.fetch(req)
if resp.code == 200:
return True # user _is_ in group
else:
# For regular users we get all the groups to which they have access
# and check if any of these are in the whitelisted groups
next_page = url_concat("%s/groups" % GITLAB_API,
dict(all_available=True))
while next_page:
req = HTTPRequest(next_page, method="GET", headers=headers)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
next_page = _get_next_page(resp)
user_groups = set(entry["path"] for entry in resp_json)
# check if any of the organizations seen thus far are in whitelist
if len(self.gitlab_group_whitelist & user_groups) > 0:
return True
return False
class LocalGitLabOAuthenticator(LocalAuthenticator, GitLabOAuthenticator):
"""A version that mixes in local system user creation"""
pass
|
Python
| 0.000002
|
@@ -4860,32 +4860,51 @@
client.fetch(req
+, raise_error=False
)%0A
|
55fb57316f51dc984e35516d0cceb62432da0fe9
|
Make 0-arity nodes act like ignored nodes when hashed or passed-through. This might need changing
|
ocradmin/plugins/node.py
|
ocradmin/plugins/node.py
|
"""
Base class for OCR nodes.
"""
import logging
FORMAT = '%(levelname)-5s %(name)s: %(message)s'
logging.basicConfig(format=FORMAT)
LOGGER = logging.getLogger("Node")
LOGGER.setLevel(logging.DEBUG)
import cache
class NodeError(Exception):
def __init__(self, node, msg):
super(NodeError, self).__init__(msg)
self.node = node
self.msg = msg
class UnsetParameterError(NodeError):
pass
class ValidationError(NodeError):
pass
class InvalidParameterError(NodeError):
pass
class InputOutOfRange(NodeError):
pass
class CircularDagError(NodeError):
pass
def noop_abort_func(*args):
return False
def noop_progress_func(*args):
pass
class Node(object):
"""
Node object. Evaluates some input and
return the output.
"""
name = "Base::None"
description = "Base node"
arity = 1 # number of inputs
passthrough = 0 # input to pass through if node ignored
stage = "general"
_parameters = [
]
def __init__(self, label=None, abort_func=None,
cacher=None,
progress_func=None, logger=None, ignored=False):
"""
Initialise a node.
"""
self.abort_func = abort_func if abort_func is not None \
else noop_abort_func
self.logger = logger if logger is not None \
else LOGGER
self.progress_func = progress_func if progress_func is not None \
else noop_progress_func
self._cacher = cacher if cacher is not None \
else cache.BasicCacher(logger=self.logger)
self._params = {}
self.label = label
self._parents = []
self._inputs = [None for n in range(self.arity)]
self._inputdata = [None for n in range(self.arity)]
self.logger.debug("Initialised %s with cacher: %s" % (self.label, self._cacher))
self.ignored = ignored
@classmethod
def parameters(cls):
return cls._parameters
def set_param(self, param, name):
"""
Set a parameter.
"""
self._params[param] = name
def _set_p(self, p, v):
"""
Set a parameter internally.
"""
pass
def _eval(self):
"""
Perform actual processing.
"""
pass
def add_parent(self, n):
"""
Add a parent node.
"""
if self == n:
raise CircularDagError(self, "added as parent to self")
if not n in self._parents:
self._parents.append(n)
def has_parents(self):
"""
Check if the node is a terminal node
or if there's a tree further down.
"""
return bool(len(self._parents))
def set_input(self, num, n):
"""
Set an input.
num: 0-based input number
node: input node
"""
if num > len(self._inputs) - 1:
raise InputOutOfRange(self, "Input '%d'" % num)
n.add_parent(self)
self._inputs[num] = n
def mark_dirty(self):
"""
Tell the node it needs to reevaluate.
"""
self.logger.debug("%s marked dirty", self)
for parent in self._parents:
parent.mark_dirty()
self._cacher.clear_cache()
def set_cache(self, cache):
"""
Set the cache on a node, preventing it
from eval'ing its inputs.
"""
self._cacher.set_cache(self, cache)
def eval_input(self, num):
"""
Eval an input node.
"""
return self._inputs[num].eval()
def eval_inputs(self):
"""
Eval all inputs and store the data in
self._inputdata.
"""
for i in range(len(self._inputs)):
self._inputdata[i] = self.eval_input(i)
def get_input_data(self, num):
"""
Fetch data for a given input, eval'ing
it if necessary.
"""
if self._inputdata[num] is None:
self._inputdata[num] = self.eval_input(num)
return self._inputdata[num]
return self._inputdata[num]
def validate(self):
"""
Check params are present and correct.
"""
if self.arity > 0:
for n in self._inputs:
if n is not None:
n.validate()
self._validate()
def _validate(self):
pass
def hash_value(self):
"""
Get a representation of this
node's current state. This is a data
structure the node type, it's
parameters, and it's children's hash_values.
"""
# if ignore, return the hash of the
# passthrough input
if self.ignored:
return self._inputs[self.passthrough].hash_value()
def makesafe(val):
if isinstance(val, unicode):
return val.encode()
elif isinstance(val, float):
return str(val)
return val
return dict(
name=self.name.encode(),
params=[[makesafe(v) for v in p] for p \
in self._params.iteritems()],
children=[n.hash_value() for n in self._inputs \
if n is not None]
)
def null_data(self):
"""
What we return when ignored.
"""
if self.arity > 0:
return self.eval_input(self.passthrough)
else:
return None
def first_active(self):
"""
Get the first node in the tree that is
active. If not ignored this is 'self'.
"""
if not self.ignored:
return self
else:
return self._inputs[self.passthrough].first_active()
def eval(self):
"""
Eval the node.
"""
if self.ignored:
self.logger.debug("Ignoring node: %s", self)
return self.null_data()
self.validate()
for p, v in self._params.iteritems():
self.logger.debug("Set Param %s.%s -> %s",
self, p, v)
self._set_p(p, v)
if self._cacher.has_cache(self):
self.logger.debug("%s returning cached input", self)
return self._cacher.get_cache(self)
self.eval_inputs()
self.logger.debug("Evaluating '%s' Node", self)
data = self._eval()
self._cacher.set_cache(self, data)
return data
def __repr__(self):
return "<%s: %s: %s" % (self.__class__.__name__, self.name, self.label)
def __str__(self):
return self.label
|
Python
| 0.000002
|
@@ -4713,32 +4713,51 @@
nput%0A if
+self.arity %3E 0 and
self.ignored:%0A
@@ -5657,62 +5657,39 @@
if
-not
self.
-ignored:%0A return self%0A else
+arity %3E 0 and self.ignored
:%0A
@@ -5750,16 +5750,36 @@
active()
+%0A return self
%0A%0A de
|
2191f877270fc984d5a8e7cc2ffe9ab8c1630101
|
fix style
|
apps/smeuhoverride/views.py
|
apps/smeuhoverride/views.py
|
# Create your views here.
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.models import User
from django.http import Http404, HttpResponse
from django.contrib.auth.decorators import login_required
from tagging.models import Tag
from tagging.utils import calculate_cloud, LOGARITHMIC
from pinax.apps.blog.models import Post
from photos.models import Image
class TagInTheCloud:
"""
a fake Tag model to feed the cloud
"""
def __init__(self, name, count, _):
self.name = name
self.count = count
def tag_index(request, template_name="tagging_ext/index.html", min_size=0, limit=1000):
query = """
SELECT tag.name as name, COUNT(tag_item.tag_id) as counter, tag_item.tag_id as tag_id
FROM tagging_taggeditem as tag_item
INNER JOIN tagging_tag as tag ON (tag.id = tag_item.tag_id)
GROUP BY tag.name, tag_id
ORDER BY tag.name
"""
cursor = connection.cursor()
cursor.execute(query)
tags = calculate_cloud(
[ TagInTheCloud(*row) for row in cursor ],
steps=5,
distribution=LOGARITHMIC
)
return render_to_response(template_name, {'tags': tags},
context_instance=RequestContext(request))
def user_blog_index(request, username, template_name="blog/user_blog.html"):
blogs = Post.objects.filter(status=2).select_related(depth=1).order_by("-publish")
if username is not None:
user = get_object_or_404(User, username=username.lower())
blogs = blogs.filter(author=user)
return render_to_response(template_name, {
"blogs": blogs,
"username": username,
}, context_instance=RequestContext(request))
def blog_post_source(request, username, slug):
post = get_object_or_404(Post, slug=slug,
author__username=username)
if post.status == 1 and post.author != request.user:
raise Http404
return HttpResponse(post.body, mimetype="text/plain; charset=utf-8")
|
Python
| 0.000001
|
@@ -638,17 +638,21 @@
count,
-_
+*args
):%0A
@@ -766,30 +766,19 @@
l%22,
-min_size=0, limit=1000
+*args, **kw
):%0A
|
9e909ae8988168de5ad65e01b2231b4fb81088e7
|
Check if the fqdn is localhost.localdomain. If yes, raise Exception
|
dbaas/workflow/steps/mysql/deploy/config_vms_foreman.py
|
dbaas/workflow/steps/mysql/deploy/config_vms_foreman.py
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import get_credentials_for
from util import exec_remote_command
from dbaas_foreman import get_foreman_provider
from dbaas_credentials.models import CredentialType
from dbaas_cloudstack.models import HostAttr
from ...util.base import BaseStep
from ....exceptions.error_codes import DBAAS_0007
LOG = logging.getLogger(__name__)
class ConfigVMsForeman(BaseStep):
def __unicode__(self):
return "Configuring VMs on Foreman..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
environment = workflow_dict['environment']
credentials = get_credentials_for(environment=environment,
credential_type=CredentialType.FOREMAN)
forman_provider = get_foreman_provider(databaseinfra=databaseinfra,
credentials=credentials)
vip = workflow_dict['vip']
for host in workflow_dict['hosts']:
LOG.info('Get fqdn for host {}'.format(host))
host_attr = HostAttr.objects.get(host=host)
script = 'hostname'
output = {}
return_code = exec_remote_command(server=host.address,
username=host_attr.vm_user,
password=host_attr.vm_password,
command=script,
output=output)
LOG.info(output)
if return_code != 0:
raise Exception(str(output))
fqdn = output['stdout'][0].strip()
LOG.info("Call forman for fqdn={}, vip={}, dsrc={}".format(fqdn, vip.vip_ip, vip.dscp))
forman_provider.setup_database_dscp(fqdn=fqdn,
vip_ip=vip.vip_ip,
dsrc=vip.dscp,
port=3306)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0007)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0007)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
Python
| 0.998786
|
@@ -1787,24 +1787,235 @@
0%5D.strip()%0A%0A
+ if fqdn == %22localhost.localdomain%22:%0A errormsg = %22The fqdn %7B%7D is not valid.%22.format(fqdn)%0A LOG.error(errormsg)%0A raise Exception(errormsg)%0A%0A
@@ -2036,16 +2036,17 @@
Call for
+e
man for
|
b9c474d1877190ef73e295c46ac8b7ae58a803cf
|
put pytest skip marker after imports
|
borg/testsuite/convert.py
|
borg/testsuite/convert.py
|
import os
import shutil
import tempfile
import pytest
try:
import attic.repository
import attic.key
import attic.helpers
except ImportError:
attic = None
pytestmark = pytest.mark.skipif(attic is None,
reason='cannot find an attic install')
from ..converter import AtticRepositoryConverter, AtticKeyfileKey
from ..helpers import get_keys_dir
from ..key import KeyfileKey
from ..repository import Repository, MAGIC
from . import BaseTestCase
class ConversionTestCase(BaseTestCase):
def open(self, path, repo_type=Repository, create=False):
return repo_type(os.path.join(path, 'repository'), create=create)
def setUp(self):
self.tmppath = tempfile.mkdtemp()
self.attic_repo = self.open(self.tmppath,
repo_type=attic.repository.Repository,
create=True)
# throw some stuff in that repo, copied from `RepositoryTestCase.test1`
for x in range(100):
self.attic_repo.put(('%-32d' % x).encode('ascii'), b'SOMEDATA')
self.attic_repo.close()
def tearDown(self):
shutil.rmtree(self.tmppath)
def repo_valid(self,):
repository = self.open(self.tmppath)
# can't check raises() because check() handles the error
state = repository.check()
repository.close()
return state
def test_convert_segments(self):
# check should fail because of magic number
assert not self.repo_valid()
print("opening attic repository with borg and converting")
repo = self.open(self.tmppath, repo_type=AtticRepositoryConverter)
segments = [filename for i, filename in repo.io.segment_iterator()]
repo.close()
repo.convert_segments(segments, dryrun=False)
assert self.repo_valid()
class EncryptedConversionTestCase(ConversionTestCase):
class MockArgs:
def __init__(self, path):
self.repository = attic.helpers.Location(path)
def setUp(self):
super().setUp()
# we use the repo dir for the created keyfile, because we do
# not want to clutter existing keyfiles
os.environ['ATTIC_KEYS_DIR'] = self.tmppath
# we use the same directory for the converted files, which
# will clutter the previously created one, which we don't care
# about anyways. in real runs, the original key will be retained.
os.environ['BORG_KEYS_DIR'] = self.tmppath
os.environ['ATTIC_PASSPHRASE'] = 'test'
self.key = attic.key.KeyfileKey.create(self.attic_repo,
self.MockArgs(self.tmppath))
def test_keys(self):
repository = self.open(self.tmppath,
repo_type=AtticRepositoryConverter)
keyfile = AtticKeyfileKey.find_key_file(repository)
AtticRepositoryConverter.convert_keyfiles(keyfile, dryrun=False)
# check that the new keyfile is alright
keyfile = os.path.join(get_keys_dir(),
os.path.basename(self.key.path))
with open(keyfile, 'r') as f:
assert f.read().startswith(KeyfileKey.FILE_ID)
def test_convert_all(self):
# check should fail because of magic number
assert not self.repo_valid()
print("opening attic repository with borg and converting")
repo = self.open(self.tmppath, repo_type=AtticRepositoryConverter)
with pytest.raises(NotImplementedError):
repo.convert(dryrun=False)
# check that the new keyfile is alright
keyfile = os.path.join(get_keys_dir(),
os.path.basename(self.key.path))
with open(keyfile, 'r') as f:
assert f.read().startswith(KeyfileKey.FILE_ID)
assert self.repo_valid()
|
Python
| 0
|
@@ -168,126 +168,8 @@
None
-%0Apytestmark = pytest.mark.skipif(attic is None,%0A reason='cannot find an attic install')
%0A%0Afr
@@ -367,16 +367,135 @@
stCase%0A%0A
+pytestmark = pytest.mark.skipif(attic is None,%0A reason='cannot find an attic install')%0A%0A
%0Aclass C
|
7750aaaea4115007a2cd39bad0277e81c2d4a031
|
Use the correct parameter for Google API key
|
omgeo/services/google.py
|
omgeo/services/google.py
|
import logging
from .base import GeocodeService
from omgeo.places import Candidate
from omgeo.preprocessors import ComposeSingleLine
logger = logging.getLogger(__name__)
class Google(GeocodeService):
"""
Class to geocode using Google's geocoding API.
"""
_endpoint = 'https://maps.googleapis.com/maps/api/geocode/json'
DEFAULT_PREPROCESSORS = [ComposeSingleLine()]
LOCATOR_MAPPING = {
'ROOFTOP': 'rooftop',
'RANGE_INTERPOLATED': 'interpolated',
}
def __init__(self, preprocessors=None, postprocessors=None, settings=None):
preprocessors = self.DEFAULT_PREPROCESSORS if preprocessors is None else preprocessors
GeocodeService.__init__(self, preprocessors, postprocessors, settings)
def _geocode(self, pq):
params = {
'address': pq.query,
'api_key': self._settings['api_key']
}
if pq.country:
params['components'] = 'country:' + pq.country
if pq.viewbox:
params['bounds'] = pq.viewbox.to_google_str()
response_obj = self._get_json_obj(self._endpoint, params)
return [self._make_candidate_from_result(r) for r in response_obj['results']]
def _make_candidate_from_result(self, result):
""" Make a Candidate from a Google geocoder results dictionary. """
candidate = Candidate()
candidate.match_addr = result['formatted_address']
candidate.x = result['geometry']['location']['lng']
candidate.y = result['geometry']['location']['lat']
candidate.locator = self.LOCATOR_MAPPING.get(result['geometry']['location_type'], '')
component_lookups = {
'city': {'type': 'locality', 'key': 'long_name'},
'subregion': {'type': 'administrative_area_level_2', 'key': 'long_name'},
'region': {'type': 'administrative_area_level_1', 'key': 'short_name'},
'postal': {'type': 'postal_code', 'key': 'long_name'},
'country': {'type': 'country', 'key': 'short_name'},
}
for (field, lookup) in component_lookups.iteritems():
setattr(candidate, 'match_' + field, self._get_component_from_result(result, lookup))
candidate.geoservice = self.__class__.__name__
return candidate
def _get_component_from_result(self, result, lookup):
"""
Helper function to get a particular address component from a Google result.
Since the address components in results are an array of objects containing a types array,
we have to search for a particular component rather than being able to look it up directly.
Returns the first match, so this should be used for unique component types (e.g.
'locality'), not for categories (e.g. 'political') that can describe multiple components.
:arg dict result: A results dict with an 'address_components' key, as returned by the
Google geocoder.
:arg dict lookup: The type (e.g. 'street_number') and key ('short_name' or 'long_name') of
the desired address component value.
:returns: address component or empty string
"""
for component in result['address_components']:
if lookup['type'] in component['types']:
return component.get(lookup['key'], '')
return ''
|
Python
| 0.000006
|
@@ -839,20 +839,16 @@
'
-api_
key': se
|
1ca331975ab91cf619c3c785ead5b352af47963a
|
Use local thread storage.
|
modoboa/lib/middleware.py
|
modoboa/lib/middleware.py
|
# coding: utf-8
"""Custom middlewares."""
from django.http import HttpResponseRedirect
from modoboa.lib.exceptions import ModoboaException
from modoboa.lib.signals import request_accessor
from modoboa.lib.web_utils import (
_render_error, ajax_response, render_to_json_response
)
from . import singleton
class AjaxLoginRedirect(object):
def process_response(self, request, response):
if request.is_ajax():
if type(response) == HttpResponseRedirect:
response.status_code = 278
return response
class CommonExceptionCatcher(object):
"""Modoboa exceptions catcher."""
def process_exception(self, request, exception):
if not isinstance(exception, ModoboaException):
return None
if request.is_ajax() or "/api/" in request.path:
if exception.http_code is None:
return ajax_response(
request, status="ko", respmsg=unicode(exception),
norefresh=True
)
return render_to_json_response(
unicode(exception), status=exception.http_code
)
return _render_error(
request, user_context=dict(error=str(exception))
)
class RequestCatcherMiddleware(singleton.Singleton):
"""Simple middleware to store the current request.
FIXME: the Singleton hack is used to make tests work. I don't know
why but middlewares are not dropped between test case runs so more
than one instance can be listening to the request_accessor signal
and we don't want that!
"""
def __init__(self):
self._request = None
request_accessor.connect(self)
def process_request(self, request):
self._request = request
def process_response(self, request, response):
"""Empty self._request."""
self._request = None
return response
def __call__(self, **kwargs):
return self._request
|
Python
| 0
|
@@ -37,16 +37,45 @@
es.%22%22%22%0A%0A
+from threading import local%0A%0A
from dja
@@ -314,31 +314,30 @@
%0A)%0A%0A
-from . import singleton
+_local_store = local()
%0A%0A%0Ac
@@ -1306,27 +1306,14 @@
are(
-singleton.Singleton
+object
):%0A
@@ -1369,254 +1369,8 @@
est.
-%0A%0A FIXME: the Singleton hack is used to make tests work. I don't know%0A why but middlewares are not dropped between test case runs so more%0A than one instance can be listening to the request_accessor signal%0A and we don't want that!%0A
%22%22%22%0A
@@ -1394,38 +1394,45 @@
(self):%0A
-self._
+_local_store.
request = None%0A
@@ -1510,38 +1510,45 @@
quest):%0A
-self._
+_local_store.
request = reques
@@ -1619,28 +1619,20 @@
%22Empty s
-elf._request
+tore
.%22%22%22%0A
@@ -1636,22 +1636,29 @@
-self._
+_local_store.
request
@@ -1742,14 +1742,21 @@
urn
-self._
+_local_store.
requ
|
70ce7627b11bf804660bc66a60910f66f8f106bd
|
Reformat code
|
boundary/metric_delete.py
|
boundary/metric_delete.py
|
#
# Copyright 2014-2015 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements command to remove a metric definition from a Boundary account.
"""
from boundary import ApiCli
"""
Uses the following Boundary API:
http://premium-documentation.boundary.com/v1/delete/metrics/:metric
"""
class MetricDelete (ApiCli):
def __init__(self):
ApiCli.__init__(self)
self.method = "DELETE"
self.metricName = None
def addArguments(self):
ApiCli.addArguments(self)
self.parser.add_argument('-n', '--metric-name', dest='metricName',action='store',required=True,metavar='metric_name',help='Metric identifier')
def getArguments(self):
'''
Extracts the specific arguments of this CLI
'''
ApiCli.getArguments(self)
if self.args.metricName != None:
self.metricName = self.args.metricName
self.path = "v1/metrics/{0}".format(self.metricName)
def validateArguments(self):
return ApiCli.validateArguments(self)
def getDescription(self):
return "Deletes a metric definition from a Boundary account"
|
Python
| 0
|
@@ -803,16 +803,17 @@
ic%0A%22%22%22%0A%0A
+%0A
class Me
@@ -822,17 +822,16 @@
icDelete
-
(ApiCli)
@@ -832,22 +832,16 @@
piCli):%0A
- %0A
def
@@ -948,25 +948,17 @@
= None%0A
-
%0A
+
def
@@ -1085,16 +1085,17 @@
icName',
+
action='
@@ -1101,16 +1101,17 @@
'store',
+
required
@@ -1116,16 +1116,50 @@
ed=True,
+%0A
metavar=
@@ -1172,16 +1172,17 @@
c_name',
+
help='Me
@@ -1199,24 +1199,16 @@
ifier')%0A
-
%0A def
@@ -1433,28 +1433,9 @@
Name
- %0A
+%0A
%0A
@@ -1492,21 +1492,17 @@
icName)%0A
-
%0A
+
def
@@ -1576,18 +1576,9 @@
lf)%0A
-
%0A
+
@@ -1672,12 +1672,8 @@
ccount%22%0A
-
|
b628e466f86bc27cbe45ec27a02d4774a0efd3bb
|
Clean out dist and build before building
|
semantic_release/pypi.py
|
semantic_release/pypi.py
|
"""PyPI
"""
from invoke import run
from semantic_release import ImproperConfigurationError
def upload_to_pypi(
dists: str = 'sdist bdist_wheel',
username: str = None,
password: str = None,
skip_existing: bool = False
):
"""Creates the wheel and uploads to pypi with twine.
:param dists: The dists string passed to setup.py. Default: 'bdist_wheel'
:param username: PyPI account username string
:param password: PyPI account password string
:param skip_existing: Continue uploading files if one already exists. (Only valid when
uploading to PyPI. Other implementations may not support this.)
"""
if username is None or password is None or username == "" or password == "":
raise ImproperConfigurationError('Missing credentials for uploading')
run('python setup.py {}'.format(dists))
run(
'twine upload -u {} -p {} {} {}'.format(
username,
password,
'--skip-existing' if skip_existing else '',
'dist/*'
)
)
run('rm -rf build dist')
|
Python
| 0
|
@@ -815,16 +815,45 @@
ading')%0A
+ run('rm -rf build dist')%0A
run(
|
cc981e1d8d58a9e8337d530370355a96cdf41e71
|
Remove the pesky underscore
|
robot/robot/src/common/autonomous_helper.py
|
robot/robot/src/common/autonomous_helper.py
|
#
# Add this to robotpy as the autonomous toolbox?
#
#
# Or distribute it separately?
#
#
# Interesting. Could implement commands and such in python using decorators
# and other neat madness?
#
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import functools
import inspect
#
# Decorators:
#
# timed_state
#
def timed_state(f=None, time=None, next_state=None, first=False):
if f is None:
return functools.partial(timed_state, time=time, next_state=next_state, first=first)
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
# store state variables here
wrapper.first = first
wrapper.name = f.__name__
wrapper.next_state = next_state
wrapper.time = time
wrapper.expires = 0xffffffff
wrapper.ran = False
# inspect the args, provide a correct call implementation
args, varargs, keywords, defaults = inspect.getargspec(f)
if keywords is not None or varargs is not None:
raise ValueError("Invalid function parameters for function %s" % wrapper.name)
# TODO: there has to be a better way to do this. oh well.
if len(args) == 1:
wrapper.run = lambda self, tm, state_tm: f(self)
elif len(args) == 2:
if args[1] == 'tm':
wrapper.run = lambda self, tm, state_tm: f(self, tm)
elif args[1] == 'state_tm':
wrapper.run = lambda self, tm, state_tm: f(self, state_tm)
else:
raise ValueError("Invalid parameter name for function %s" % wrapper.name)
elif args == ['self', 'tm', 'state_tm']:
wrapper.run = lambda self, tm, state_tm: f(self, tm, state_tm)
elif args == ['self', 'state_tm', 'tm']:
wrapper.run = lambda self, tm, state_tm: f(self, state_tm, tm)
else:
print(args)
raise ValueError("Invalid parameter names for function %s" % wrapper.name)
# provide a default docstring?
return wrapper
class StatefulAutonomous(object):
'''
TODO: document this
'''
__built = False
__done = False
__sd_args = []
def __init__(self, components):
if not hasattr(self, 'MODE_NAME'):
raise ValueError("Must define MODE_NAME class variable")
for k,v in components.items():
setattr(self, k, v)
self.__build_states()
def register_sd_var(self, name, default, add_prefix=True):
sd_name = name
if add_prefix:
sd_name = '%s_%s' % (self.MODE_NAME, name)
if isinstance(default, bool):
wpilib.SmartDashboard.PutBoolean(sd_name, default)
args = (name, sd_name, wpilib.SmartDashboard.GetBoolean)
elif isinstance(default, int) or isinstance(default, float):
wpilib.SmartDashboard.PutNumber(sd_name, default)
args = (name, sd_name, wpilib.SmartDashboard.GetNumber)
elif isinstance(default, str):
wpilib.SmartDashboard.PutString(sd_name, default)
args = (name, sd_name, wpilib.SmartDashboard.GetString)
else:
raise ValueError("Invalid default value")
self.__sd_args.append(args)
def __build_states(self):
has_first = False
#for each state function:
for name in dir(self.__class__):
state = getattr(self.__class__, name)
if name.startswith('__') or not hasattr(state, 'next_state'):
continue
# find a pre-execute function if available
state.pre = getattr(self.__class__, 'pre_%s' % name, None)
# is this the first state to execute?
if state.first:
if has_first:
raise ValueError("Multiple states were specified as the first state!")
self.__first = state
has_first = True
# make the time tunable
if state.time is not None:
self.register_sd_var(state.name + '_time', state.time)
if not has_first:
raise ValueError("Starting state not defined! Use first=True on a state decorator")
self.__built = True
def _validate(self):
# TODO: make sure the state machine can be executed
# - run at robot time? Probably not. Run this as part of a unit test
pass
# how long does introspection take? do this in the constructor?
# can do things like add all of the timed states, and project how long
# it will take to execute it (don't forget about cycles!)
def on_enable(self):
if not self.__built:
raise ValueError('super().__init__(components) was never called!')
# print out the details of this autonomous mode, and any tunables
print("Tunable values:")
# read smart dashboard values, print them
for name, sd_name, fn in self.__sd_args:
val = fn(sd_name)
setattr(self, name, val)
print("-> %20s: %s" % (name, val))
# set the starting state
self.__state = self.__first
def on_disable(self):
'''Called when the autonomous mode is disabled'''
pass
def next_state(self, name):
'''Call this function to transition to the next state'''
if name is not None:
self.__state = getattr(self.__class__, name)
else:
self.__state = None
if self.__state is None:
return
self.__state.ran = False
def update(self, tm):
# state: first, name, pre, time
state = self.__state
# determine if the time has passed to execute the next state
if state is not None and state.expires < tm:
self.next_state(state.next_state)
state = self.__state
if state is None:
if not self.__done:
print("%.3fs: Done with autonomous mode" % tm)
self.__done = True
return
# is this the first time this was executed?
if not state.ran:
state.ran = True
state.expires = tm + getattr(self, state.name + '_time')
state.start_time = tm
print("%.3fs: Entering state:" % tm, state.name)
# execute the pre state if it exists
if state.pre is not None:
state.pre(self, tm)
# execute the state
state.run(self, tm, tm - state.start_time)
|
Python
| 0.999999
|
@@ -2575,17 +2575,17 @@
me = '%25s
-_
+
%25s' %25 (s
|
71b1cfbd375907f19e6f6eb587bd539fbaf3242c
|
Fix #167: Error when trying to append to sys.path from main sketch file
|
runtime/src/jycessing/detect_sketch_mode.py
|
runtime/src/jycessing/detect_sketch_mode.py
|
import ast
import re
from jycessing import MixedModeError
"""
Determines the sketch mode, namely:
"ACTIVE" if the sketch uses draw() and/or setup() functions;
"STATIC" otherise.
"MIXED" if the user seems to have erroneously both declared a draw()
function and called drawing functions outside draw().
"""
# If you define any of these functions, you're in ACTIVE mode.
activeModeFunc = re.compile(r"""
^(
draw
|
setup
|
key(Pressed|Released|Typed)
|
mouse(Clicked|Dragged|Moved|Pressed|Released|Wheel)
)$
""", re.X)
# If you're in ACTIVE mode, you can't call any of these functions
# outside a function body.
illegalActiveModeCall = re.compile(r"""
^(
size
|
bezier(Detail|Point|Tangent)?
|
curve(Detail|Point|Tangent|Tightness)?
|
arc|ellipse|line|point|quad|rect|triangle
|
box|sphere(Detail)?
(begin|end)(Countour|Shape)
|
(quadratic|bezier|curve)?Vertex | vertex
|
(apply|pop|print|push|reset)Matrix
|
rotate[XYZ]?
|
(ambient|directional|point|spot)Light
|
light(Fallof|Specular|s)
|
noLights
|
normal
|
ambient|emissive|shininess|specular
|
(load|update)Pixels
|
background|clear|(no)?(Fill|Stroke)
)$
""", re.X)
def detect_mode(code, filename):
module = ast.parse(code + "\n\n", filename=filename)
mode = 'STATIC'
for node in module.body:
if isinstance(node, ast.FunctionDef):
if activeModeFunc.match(node.name):
mode = 'ACTIVE'
break
if mode == 'STATIC':
return mode, None
for node in module.body:
if not isinstance(node, ast.Expr):
continue
e = node.value
if not isinstance(e, ast.Call):
continue
f = e.func
if illegalActiveModeCall.match(f.id):
return 'MIXED', MixedModeError(
"You can't call %s() outside a function in \"active mode\"." % f.id,
__file__, node.lineno - 1)
return mode, None
__mode__, __error__ = detect_mode(__processing_source__, __file__)
|
Python
| 0
|
@@ -1976,16 +1976,37 @@
if
+hasattr(f, 'id') and
illegalA
|
b80aa5999de7167745243bf9ae4d83b63a4e56c5
|
Rename Runner attr `form_document` to `form_controls` which is the proper name.
|
orbeon_xml_api/runner.py
|
orbeon_xml_api/runner.py
|
from builder import Builder
from utils import generate_xml_root
from controls import StringControl, DateControl, TimeControl, DateTimeControl, \
BooleanControl, AnyURIControl, EmailControl, DecimalControl, \
Select1Control, OpenSelect1Control, SelectControl
class Runner:
def __init__(self, xml, builder=None, builder_xml=None, lang='en'):
"""
@param builder Builder
@param builder_xml str
"""
self.xml = xml
self.builder = builder
self.builder_xml = builder_xml
self.lang = lang
self.xml_root = None
self.set_xml_root()
if self.builder and self.builder_xml:
raise Exception("Constructor accepts either builder or builder_xml.")
if self.builder:
assert isinstance(self.builder, Builder)
elif self.builder_xml:
assert isinstance(self.builder_xml, basestring)
else:
raise Exception("Provide either the argument: builder or builder_xml.")
if self.builder is None and self.builder_xml:
self.set_builder_by_builder_xml()
# init
self.raw_values = {}
self.values = {}
self.form_document = {}
self.init()
# form object
self.form = RunnerForm(self)
def set_xml_root(self):
self.xml_root = generate_xml_root(self.xml)
def set_builder_by_builder_xml(self):
self.builder = Builder(self.builder_xml, self.lang)
def init(self):
for name, control in self.builder.controls.items():
element = self.get_form_element(name)
if element is not False:
self.raw_values[name] = getattr(element, 'text', None)
self.values[name] = control.decode(element.text)
# Instantiate the control class (these are imported above)
form_document_control_class = globals()[control.__class__.__name__]
form_document_control = form_document_control_class(self.builder, control.bind, element)
if form_document_control is not None:
form_document_control.init_runner_attrs(element)
self.form_document[name] = form_document_control
def get_form_element(self, name):
"""
@param name str The control name (form element tag)
"""
if name not in self.builder.controls:
return False
control = self.builder.controls[name]
if control.parent is None:
return False
# query = "//form/%s" % name
else:
query = "//form/%s/%s" % (control.parent.bind.name, name)
res = self.xml_root.xpath(query)
# TODO Fix composite controls like 'us-address'
# if len(res) > 1 or not res or len(res[0].getchildren()) > 1:
# return False
return res[0]
def get_raw_value(self, name):
return self.raw_values[name]
def get_value(self, name):
return self.values[name]
def get_form_document_control(self, name):
return self.form_document[name]
def set_value(self, name, value):
"""
Set Runner Control XML value.
"""
pass
class RunnerForm:
def __init__(self, runner):
self._runner = runner
def __getattr__(self, s_name):
name = self._runner.builder.sanitized_control_names.get(s_name, False)
if name:
return self._runner.get_form_document_control(name)
else:
return False
|
Python
| 0
|
@@ -1205,16 +1205,25 @@
document
+_controls
= %7B%7D%0A%0A
@@ -1987,16 +1987,20 @@
_control
+_obj
= form_
@@ -2100,16 +2100,20 @@
_control
+_obj
is not
@@ -2159,16 +2159,20 @@
_control
+_obj
.init_ru
@@ -2225,24 +2225,33 @@
orm_document
+_controls
%5Bname%5D = for
@@ -2268,16 +2268,20 @@
_control
+_obj
%0A%0A de
@@ -3139,16 +3139,25 @@
document
+_controls
%5Bname%5D%0A%0A
|
f6cad1777023ceb53db8599bc2e74bf0ab2aa0a7
|
Prepare for next dev release
|
orchestrator/__init__.py
|
orchestrator/__init__.py
|
from __future__ import absolute_import
from celery.signals import setup_logging
import orchestrator.logger
__version__ = '0.5.1'
__author__ = 'sukrit'
orchestrator.logger.init_logging()
setup_logging.connect(orchestrator.logger.init_celery_logging)
|
Python
| 0
|
@@ -124,9 +124,9 @@
0.5.
-1
+2
'%0A__
|
dceb30b96b2eb53f24ea69b9591f48b204dc9773
|
Add some debugging around the fas cache lock.
|
fedmsg_meta_fedora_infrastructure/fasshim.py
|
fedmsg_meta_fedora_infrastructure/fasshim.py
|
import threading
import urllib
import socket
from hashlib import sha256, md5
_fas_cache = {}
_fas_cache_lock = threading.Lock()
import logging
log = logging.getLogger("moksha.hub")
def avatar_url(username, size=64, default='retro'):
openid = "http://%s.id.fedoraproject.org/" % username
return avatar_url_from_openid(openid, size, default)
def avatar_url_from_openid(openid, size=64, default='retro', dns=False):
"""
Our own implementation since fas doesn't support this nicely yet.
"""
if dns:
# This makes an extra DNS SRV query, which can slow down our webapps.
# It is necessary for libravatar federation, though.
import libravatar
return libravatar.libravatar_url(
openid=openid,
size=size,
default=default,
)
else:
query = urllib.urlencode({'s': size, 'd': default})
hash = sha256(openid).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
def avatar_url_from_email(email, size=64, default='retro', dns=False):
"""
Our own implementation since fas doesn't support this nicely yet.
"""
if dns:
# This makes an extra DNS SRV query, which can slow down our webapps.
# It is necessary for libravatar federation, though.
import libravatar
return libravatar.libravatar_url(
email=email,
size=size,
default=default,
)
else:
query = urllib.urlencode({'s': size, 'd': default})
hash = md5(email).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
gravatar_url = avatar_url # backwards compat
gravatar_url_from_openid = avatar_url_from_openid
gravatar_url_from_email = avatar_url_from_email
def make_fas_cache(**config):
global _fas_cache
if _fas_cache:
return _fas_cache
log.warn("No previous fas cache found. Looking to rebuild.")
try:
import fedora.client
import fedora.client.fas2
except ImportError:
log.warn("No python-fedora installed. Not caching fas.")
return {}
if not 'fas_credentials' in config:
log.warn("No fas_credentials found. Not caching fas.")
return {}
creds = config['fas_credentials']
default_url = 'https://admin.fedoraproject.org/accounts/'
fasclient = fedora.client.fas2.AccountSystem(
base_url=creds.get('base_url', default_url),
username=creds['username'],
password=creds['password'],
)
timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(600)
try:
log.info("Downloading FAS cache")
request = fasclient.send_request('/user/list',
req_params={'search': '*'},
auth=True)
except fedora.client.ServerError as e:
log.warning("Failed to download fas cache %r" % e)
return {}
finally:
socket.setdefaulttimeout(timeout)
log.info("Caching necessary user data")
for user in request['people']:
nick = user['ircnick']
if nick:
_fas_cache[nick] = user['username']
email = user['email']
if email:
_fas_cache[email] = user['username']
del request
del fasclient
del fedora.client.fas2
return _fas_cache
def nick2fas(nickname, **config):
with _fas_cache_lock:
fas_cache = make_fas_cache(**config)
return fas_cache.get(nickname, nickname)
def email2fas(email, **config):
with _fas_cache_lock:
fas_cache = make_fas_cache(**config)
return fas_cache.get(email, email)
|
Python
| 0
|
@@ -3429,37 +3429,151 @@
g):%0A
-with _fas_cache_lock:
+log.debug(%22Acquiring _fas_cache_lock for nicknames.%22)%0A with _fas_cache_lock:%0A log.debug(%22Got _fas_cache_lock for nicknames.%22)
%0A
@@ -3612,36 +3612,38 @@
nfig)%0A re
-turn
+sult =
fas_cache.get(n
@@ -3665,67 +3665,250 @@
me)%0A
-%0A%0Adef email2fas(email, **config):%0A with _fas_cache_lock:
+ log.debug(%22Released _fas_cache_lock for nicknames.%22)%0A return result%0A%0A%0Adef email2fas(email, **config):%0A log.debug(%22Acquiring _fas_cache_lock for emails.%22)%0A with _fas_cache_lock:%0A log.debug(%22Got _fas_cache_lock for emails.%22)
%0A
@@ -3959,20 +3959,22 @@
re
-turn
+sult =
fas_cac
@@ -3994,8 +3994,80 @@
email)%0A
+ log.debug(%22Released _fas_cache_lock for emails.%22)%0A return result%0A
|
569c056e016131ec4325185ee9fe814018d5e1fe
|
Fix problem on no-longer existing bands that are still as logged in session available
|
server/bands/__init__.py
|
server/bands/__init__.py
|
from flask import session, redirect, url_for, g, jsonify, Response
from flask.views import MethodView
from server.models import Band
class RestrictedBandPage(MethodView):
def dispatch_request(self, *args, **kwargs):
if not 'bandId' in session:
return redirect(url_for('bands.session.index'))
else:
self.band = Band.query.get(session['bandId'])
if not self.band:
return redirect(url_for('bands.session.index'))
else:
g.band = self.band
return super(RestrictedBandPage, self).dispatch_request(*args, **kwargs)
class AjaxException(Exception):
errors = []
def __init__(self, *args):
super(Exception, self).__init__()
self.errors = args
AJAX_SUCCESS = Response(200)
class AjaxForm(MethodView):
def post(self):
if self.form.validate_on_submit():
try:
result = self.on_submit()
if type(result) is Response:
return result
else:
return jsonify(result)
except AjaxException as e:
errors = self.form.errors
if len(e.errors) > 0:
errors['general'] = e.errors
return jsonify(errors=errors), 400
else:
return jsonify(errors=self.form.errors), 400
|
Python
| 0.000001
|
@@ -413,16 +413,54 @@
f.band:%0A
+ del session%5B'bandId'%5D%0A
|
08c25e4ff96765c057397582327a36a6a1d3b7cb
|
fix caching unicode error
|
ourcup/util/filecache.py
|
ourcup/util/filecache.py
|
import hashlib, os, codecs, logging
'''
Super basic file-based cache (utf-8 friendly). Helpful if you're developing a
webpage scraper and want to be a bit more polite to the server you're scraping
while developing. The idea is that it caches content in files, each named by the
key you pass in (use the md5_key helper to generate keys and make this super easy).
'''
DEFAULT_DIR = "cache"
cache_dir = DEFAULT_DIR
logger = logging.getLogger(__name__)
def md5_key(string):
'''
Use this to generate filenae keys
'''
m = hashlib.md5()
m.update(string)
return m.hexdigest()
def set_dir(new_dir = DEFAULT_DIR):
'''
Don't need to call this, unless you want to override the default location
'''
global cache_dir
cache_dir = new_dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
logger.info("Caching files to {}".format(cache_dir))
def contains(key):
'''
Returns true if a file named by key is in the cache dir
'''
global cache_dir
return os.path.isfile(os.path.join(cache_dir,key))
def get(key):
'''
Returns the contents of the file named by key from the cache dir.
Returns None if file doesn't exist
'''
global cache_dir
if os.path.isfile(os.path.join(cache_dir,key)):
with codecs.open(os.path.join(cache_dir,key), mode="r",encoding='utf-8') as myfile:
return myfile.read()
return None
def put(key,content):
'''
Creates a file in the cache dir named by key, with the content in it
'''
global cache_dir
logger.debug("caching "+str(key)+" in "+cache_dir)
text_file = codecs.open(os.path.join(cache_dir,key), encoding='utf-8', mode="w")
text_file.write(content.decode('utf-8'))
text_file.close()
|
Python
| 0.000002
|
@@ -568,16 +568,32 @@
e(string
+.encode('utf-8')
)%0A re
|
05d6a420f7644eef197aa33aa83f4f3dcd6dd246
|
Add assertion to debug CI failure.
|
prjxray/tile_segbits.py
|
prjxray/tile_segbits.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
from collections import namedtuple
from prjxray import bitstream
from prjxray.grid_types import BlockType
import enum
class PsuedoPipType(enum.Enum):
ALWAYS = 'always'
DEFAULT = 'default'
HINT = 'hint'
def read_ppips(f):
ppips = {}
for l in f:
l = l.strip()
if not l:
continue
feature, ppip_type = l.split(' ')
ppips[feature] = PsuedoPipType(ppip_type)
return ppips
Bit = namedtuple('Bit', 'word_column word_bit isset')
def parsebit(val):
'''Return "!012_23" => (12, 23, False)'''
isset = True
# Default is 0. Skip explicit call outs
if val[0] == '!':
isset = False
val = val[1:]
# 28_05 => 28, 05
seg_word_column, word_bit_n = val.split('_')
return Bit(
word_column=int(seg_word_column),
word_bit=int(word_bit_n),
isset=isset,
)
def read_segbits(f):
segbits = {}
for l in f:
# CLBLM_L.SLICEL_X1.ALUT.INIT[10] 29_14
l = l.strip()
if not l:
continue
parts = l.split(' ')
assert len(parts) > 1
segbits[parts[0]] = [parsebit(val) for val in parts[1:]]
return segbits
class TileSegbits(object):
def __init__(self, tile_db):
self.segbits = {}
self.ppips = {}
self.feature_addresses = {}
if tile_db.ppips is not None:
with open(tile_db.ppips) as f:
self.ppips = read_ppips(f)
if tile_db.segbits is not None:
with open(tile_db.segbits) as f:
self.segbits[BlockType.CLB_IO_CLK] = read_segbits(f)
if tile_db.block_ram_segbits is not None:
with open(tile_db.block_ram_segbits) as f:
self.segbits[BlockType.BLOCK_RAM] = read_segbits(f)
for block_type in self.segbits:
for feature in self.segbits[block_type]:
sidx = feature.rfind('[')
eidx = feature.rfind(']')
if sidx != -1:
assert eidx != -1
base_feature = feature[:sidx]
if base_feature not in self.feature_addresses:
self.feature_addresses[base_feature] = {}
self.feature_addresses[base_feature][int(
feature[sidx + 1:eidx])] = (block_type, feature)
def match_bitdata(self, block_type, bits, bitdata, match_filter=None):
""" Return matching features for tile bits data (grid.Bits) and bitdata.
See bitstream.load_bitdata for details on bitdata structure.
"""
if block_type not in self.segbits:
return
for feature, segbit in self.segbits[block_type].items():
match = True
skip = False
for query_bit in segbit:
if match_filter is not None and not match_filter(block_type,
query_bit):
skip = True
break
frame = bits.base_address + query_bit.word_column
bitidx = bits.offset * bitstream.WORD_SIZE_BITS + query_bit.word_bit
if frame not in bitdata:
match = not query_bit.isset
if match:
continue
else:
break
found_bit = bitidx in bitdata[frame][1]
match = found_bit == query_bit.isset
if not match:
break
if not match or skip:
continue
def inner():
for query_bit in segbit:
if query_bit.isset:
frame = bits.base_address + query_bit.word_column
bitidx = bits.offset * bitstream.WORD_SIZE_BITS + query_bit.word_bit
yield (frame, bitidx)
yield (tuple(inner()), feature)
def map_bit_to_frame(self, block_type, bits, bit):
""" Convert bit from segbit to frame data. """
return Bit(
word_column=bits.base_address + bit.word_column,
word_bit=bits.offset * bitstream.WORD_SIZE_BITS + bit.word_bit,
isset=bit.isset,
)
def feature_to_bits(self, bits_map, feature, address=0):
if feature in self.ppips:
return
for block_type in self.segbits:
if address == 0 and feature in self.segbits[block_type]:
for bit in self.segbits[block_type][feature]:
yield block_type, self.map_bit_to_frame(
block_type, bits_map[block_type], bit)
return
block_type, feature = self.feature_addresses[feature][address]
for bit in self.segbits[block_type][feature]:
yield block_type, self.map_bit_to_frame(
block_type, bits_map[block_type], bit)
|
Python
| 0.000001
|
@@ -990,16 +990,75 @@
28, 05%0A
+ parts = val.split('_')%0A assert len(parts) == 2, val%0A
seg_
@@ -1083,30 +1083,21 @@
bit_n =
-val.split('_')
+parts
%0A%0A re
|
94788d01acb2b44bfdc4e621769e58f9c17b4d69
|
Add manifest filename to Application class
|
progression/__init__.py
|
progression/__init__.py
|
from .__version__ import __version__
import os.path
class Metadata(dict):
"""Metadata can be accessed using both dictionary and attribute syntax.
Provides basic valiation of input fields, with different requirements for
the testing and comparison steps.
"""
_compare_requires = []
_testing_requires = []
_testing_allows = {}
def __init__(self, *args, **kwargs):
super(Metadata, self).__init__(*args, **kwargs)
self.__dict__ = self
def skim(self):
return {k: self.get(k, None) for k in self._compare_requires}
@classmethod
def for_test(cls, data):
requires, allows = cls._testing_requires, cls._testing_allows
cls._validate(data, requires, allows)
return cls(**data)
@classmethod
def for_compare(cls, data):
requires, allows = cls._compare_requires, {}
cls._validate(data, requires, allows)
return cls(**data)
@staticmethod
def _validate(data, required, allowed):
for k in required:
if k not in data:
raise ValueError('Missing field: "%s"' % k)
for k in data.keys():
if k not in allowed and k not in required:
raise ValueError('Unrecognised field: "%s"' % k)
for k, v in allowed.items():
if k not in data:
data[k] = v
class Application(Metadata):
"""When declaring the application in a JSON file...
Required fields:
name
version
exe: executable [path]
setup_script: environment setup script [path]
tests_path [path]
benchmark_path [path]
Optional fields:
description
timeout [float in secs]
"""
_testing_requires = [
'name',
'version',
'exe',
'setup_script',
'tests_path',
'benchmark_path',
]
_testing_allows = {
'description': None,
'timeout': None,
}
_compare_requires = [
'name',
'version',
'description',
]
class Test(Metadata):
"""When declaring the test in a JSON file...
Required fields (testing):
name
version
args: arguments to pass to executable [list of strings]
Optional fields:
description
log_file [path]
input_files [list of paths]
output_files [list of paths]
fail_strings: list of strings indicating failure in log file
"""
_testing_requires = [
'name',
'version',
'args',
]
_testing_allows = {
'description': None,
'log_file': None,
'input_files': [],
'output_files': [],
'fail_strings': [],
'timeout': None,
}
_compare_requires = [
'name',
'version',
'description',
'log_file',
'output_files',
'passed',
'error_msg',
'duration',
'performance',
]
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
# Default log filename is constructed from parameter filename
if not self.log_file:
param_fname = max(self.args, key=len) # Assumed to be longest arg
(basename, _) = os.path.splitext(param_fname)
self.log_file = basename + '.log'
|
Python
| 0.000001
|
@@ -2050,16 +2050,158 @@
%0A %5D%0A%0A
+ def __init__(self, *args, **kwargs):%0A super(Test, self).__init__(*args, **kwargs)%0A self%5B'manifest_file'%5D = 'manifest.json'%0A%0A
%0Aclass T
|
f236c451fb49087dbf3d4a75f94ad879ce0896fd
|
Fix quartet officer updater
|
project/bhs/managers.py
|
project/bhs/managers.py
|
from django.db.models import Manager
from django.apps import apps
import django_rq
class HumanManager(Manager):
def update_persons(self, cursor=None, active_only=True, *args, **kwargs):
# Get base
humans = self.all()
# Filter if cursored
if active_only:
humans = humans.filter(
is_deceased=False,
)
if cursor:
humans = humans.filter(
updated_ts__gt=cursor,
)
# Return as objects
humans = humans.values_list(
'id',
'first_name',
'middle_name',
'last_name',
'nick_name',
'email',
'birth_date',
'phone',
'cell_phone',
'work_phone',
'bhs_id',
'sex',
'primary_voice_part',
)
# Creating/Update Persons
Person = apps.get_model('api.person')
for human in humans:
django_rq.enqueue(
Person.objects.update_or_create_from_human_object,
human,
)
return humans.count()
def delete_orphans(self, *args, **kwargs):
# Get base
humans = self.all()
humans = list(humans.values_list('id', flat=True))
# Delete Orphans
Person = apps.get_model('api.person')
orphans = Person.objects.filter(
bhs_pk__isnull=False,
).exclude(
bhs_pk__in=humans,
)
for orphan in orphans:
print(orphan)
return
class StructureManager(Manager):
def update_groups(self, cursor=None, active_only=True, *args, **kwargs):
# Get base
structures = self.all()
# Filter if cursored
if active_only:
structures = structures.filter(
status__name='active',
)
if cursor:
structures = structures.filter(
updated_ts__gt=cursor,
)
# Return as objects
structures = structures.values_list(
'id',
'name',
'preferred_name',
'chorus_name',
'status__name',
'kind',
'established_date',
'email',
'phone',
'website',
'facebook',
'twitter',
'bhs_id',
'parent',
)
# Creating/Update Groups
Group = apps.get_model('api.group')
for structure in structures:
django_rq.enqueue(
Group.objects.update_or_create_from_structure_object,
structure,
)
return structures.count()
def delete_orphans(self, *args, **kwargs):
# Get base
structures = self.all()
structures = list(structures.values_list('id', flat=True))
# Delete Orphans
Group = apps.get_model('api.group')
orphans = Group.objects.filter(
bhs_pk__isnull=False,
).exclude(
bhs_pk__in=structures,
)
for orphan in orphans:
print(orphan)
return
class SubscriptionManager(Manager):
def update_persons(self, cursor=None, *args, **kwargs):
# Get base
subscriptions = self.filter(
items_editable=True,
)
# Filter if cursored
if cursor:
subscriptions = subscriptions.filter(
updated_ts__gt=cursor,
)
# Order and Return as objects
subscriptions = subscriptions.order_by(
'created_ts',
).values_list(
'human__id',
'items_editable',
'status',
'current_through',
)
# Creating/Update Persons
Person = apps.get_model('api.person')
for subscription in subscriptions:
django_rq.enqueue(
Person.objects.update_status_from_subscription_object,
subscription,
)
return subscriptions.count()
class RoleManager(Manager):
def update_chapter_officers(self, cursor=None, active_only=True, *args, **kwargs):
# Get base
roles = self.exclude(
name='Quartet Admin',
)
if active_only:
roles = roles.filter(
structure__status__name='active',
)
# Filter if cursored
if cursor:
raise RuntimeError("Not currently supported")
roles = roles.filter(
updated_ts__gt=cursor,
)
# Order and Return as objects
roles = roles.order_by(
'start_date'
).values_list(
'id',
'name',
'structure',
'human',
'start_date',
'end_date',
)
# Creating/Update Officers
Officer = apps.get_model('api.officer')
for role in roles:
django_rq.enqueue(
Officer.objects.update_or_create_from_role_object,
role,
)
return
class SMJoinManager(Manager):
def update_members(self, cursor=None, *args, **kwargs):
# Get base
joins = self.filter(
structure__kind__in=[
'quartet',
'chapter',
],
)
# Filter if cursored
if cursor:
joins = joins.filter(
updated_ts__gt=cursor,
)
# Order and Return as objects
joins = joins.order_by(
'established_date',
'-inactive_date',
).values_list(
'id',
'structure__id',
'subscription__human__id',
'status',
'inactive_date',
'inactive_reason',
'membership__status__name',
'membership__code',
'vocal_part',
)
# Creating/Update Persons
Member = apps.get_model('api.member')
for join in joins:
django_rq.enqueue(
Member.objects.update_or_create_from_join_object,
join,
)
return joins.count()
def update_quartet_officers(self, cursor=None, active_only=True, *args, **kwargs):
# Get base
joins = self.filter(
structure__kind='quartet',
)
# Filter if cursored
if active_only:
joins = joins.filter(
structure__status__name='active',
)
# Filter if cursored
if cursor:
joins = joins.filter(
updated_ts__gt=cursor,
)
# Order and Return as objects
joins = joins.order_by(
'established_date',
'-inactive_date',
).values_list(
'id',
'status',
'structure',
'human',
)
# Creating/Update Officers
Officer = apps.get_model('api.officer')
for join in joins:
django_rq.enqueue(
Officer.objects.update_or_create_from_join_object,
join,
)
return joins.count()
|
Python
| 0
|
@@ -6902,32 +6902,46 @@
',%0A '
+subscription__
human',%0A
|
a3353e1b4c3e181958a877c8e2485c2c7eed4201
|
Update pd_utils for python3
|
projections/pd_utils.py
|
projections/pd_utils.py
|
import numpy.lib
import numpy as np
import pandas as pd
import _pickle as pickle
def save_pandas(fname, data):
'''Save DataFrame or Series
Parameters
----------
fname : str
filename to use
data: Pandas DataFrame or Series
'''
np.save(open(fname, 'w'), data)
if len(data.shape) == 2:
meta = data.index,data.columns
elif len(data.shape) == 1:
meta = (data.index,)
else:
raise ValueError('save_pandas: Cannot save this type')
s = pickle.dumps(meta)
s = s.encode('string_escape')
with open(fname, 'a') as f:
f.seek(0, 2)
f.write(s)
def load_pandas(fname, mmap_mode='r'):
'''Load DataFrame or Series
Parameters
----------
fname : str
filename
mmap_mode : str, optional
Same as numpy.load option
'''
values = np.load(fname, mmap_mode=mmap_mode)
with open(fname) as f:
numpy.lib.format.read_magic(f)
numpy.lib.format.read_array_header_1_0(f)
f.seek(values.dtype.alignment*values.size, 1)
meta = pickle.loads(f.readline().decode('string_escape'))
if len(meta) == 2:
return pd.DataFrame(values, index=meta[0], columns=meta[1])
elif len(meta) == 1:
return pd.Series(values, index=meta[0])
|
Python
| 0
|
@@ -279,16 +279,17 @@
name, 'w
+b
'), data
@@ -286,24 +286,24 @@
wb'), data)%0A
-
if len(d
@@ -345,16 +345,17 @@
a.index,
+
data.col
@@ -519,24 +519,57 @@
s(meta)%0A
+import pdb; pdb.set_trace()%0A #
s = s.encode
@@ -609,16 +609,17 @@
name, 'a
+b
') as f:
@@ -637,17 +637,27 @@
seek(0,
-2
+os.SEEK_END
)%0A
@@ -945,16 +945,22 @@
en(fname
+, 'rb'
) as f:%0A
@@ -1089,9 +1089,11 @@
ment
-*
+ *
valu
@@ -1108,24 +1108,53 @@
1)%0A
+data = f.readline()%0A #
meta = pickl
@@ -1161,28 +1161,20 @@
e.loads(
-f.readline()
+data
.decode(
@@ -1191,16 +1191,86 @@
cape'))%0A
+ import pdb; pdb.set_trace()%0A meta = pickle.loads(data)%0A
if l
|
fda311fe04e913875fd39d071e6468e1b32875a5
|
Mark other clients of bw-compat feature
|
src/zeit/content/author/author.py
|
src/zeit/content/author/author.py
|
from zeit.cms.content.property import ObjectPathProperty
from zeit.cms.i18n import MessageFactory as _
from zeit.content.author.interfaces import IAuthor
import UserDict
import grokcore.component as grok
import lxml.objectify
import zeit.cms.content.interfaces
import zeit.cms.content.property
import zeit.cms.content.reference
import zeit.cms.content.xmlsupport
import zeit.cms.interfaces
import zeit.cms.repository.interfaces
import zeit.cms.type
import zeit.content.author.interfaces
import zeit.find.search
import zeit.workflow.interfaces
import zope.interface
class Author(zeit.cms.content.xmlsupport.XMLContentBase):
zope.interface.implements(zeit.content.author.interfaces.IAuthor,
zeit.cms.interfaces.IAsset)
default_template = (
u'<author xmlns:py="http://codespeak.net/lxml/objectify/pytype">'
u'</author>')
for name in [
'biography',
'display_name',
'email',
'entered_display_name',
'external',
'facebook',
'firstname',
'instagram',
'lastname',
'status',
'summary',
'title',
'topiclink_label_1',
'topiclink_label_2',
'topiclink_label_3',
'topiclink_url_1',
'topiclink_url_2',
'topiclink_url_3',
'twitter',
'vgwortcode',
'vgwortid',
]:
locals()[name] = ObjectPathProperty('.%s' % name, IAuthor[name])
community_profile = zeit.cms.content.property.ObjectPathProperty(
'.communityprofile')
favourite_content = zeit.cms.content.reference.MultiResource(
'.favourites.reference', 'related')
@property
def exists(self):
query = zeit.find.search.query(
fulltext='%s %s' % (self.firstname, self.lastname),
types=('author',))
return bool(zeit.find.search.search(query).hits)
@property
def bio_questions(self):
return zeit.content.author.interfaces.IBiographyQuestions(self)
@property
def image_group(self):
# BBB Deprecated in favor of a separate images adapter
return zeit.content.image.interfaces.IImages(self).image
class AuthorType(zeit.cms.type.XMLContentTypeDeclaration):
factory = Author
interface = zeit.content.author.interfaces.IAuthor
type = 'author'
title = _('Author')
addform = 'zeit.content.author.add_contextfree'
@grok.implementer(zeit.content.image.interfaces.IImages)
@grok.adapter(zeit.content.author.interfaces.IAuthor)
class AuthorImages(object):
zope.interface.implements(zeit.cms.content.interfaces.IXMLRepresentation)
image = zeit.cms.content.reference.SingleResource('.image_group', 'image')
def __init__(self, context):
self.context = context
self.__parent__ = context
self.xml = context.xml
self.uniqueId = context.uniqueId
@grok.subscribe(
zeit.content.author.interfaces.IAuthor,
zeit.cms.repository.interfaces.IBeforeObjectAddEvent)
def update_display_name(obj, event):
if obj.entered_display_name:
obj.display_name = obj.entered_display_name
else:
obj.display_name = u'%s %s' % (obj.firstname, obj.lastname)
# Note: This is needed by zeit.vgwort, among others.
# zeit.vgwort.report uses the fact that the references to author objects are
# copied to the freetext 'author' webdav property to filter out which content
# objects to report.
@grok.subscribe(
zeit.cms.content.interfaces.ICommonMetadata,
zope.lifecycleevent.interfaces.IObjectModifiedEvent)
def update_author_freetext(obj, event):
if event.descriptions:
for description in event.descriptions:
if (issubclass(description.interface,
zeit.cms.content.interfaces.ICommonMetadata) and
'authorships' in description.attributes):
ref_names = [x.target.display_name for x in obj.authorships]
obj.authors = ref_names
class Dependencies(grok.Adapter):
grok.context(zeit.cms.content.interfaces.ICommonMetadata)
grok.name('zeit.content.author')
grok.implements(zeit.workflow.interfaces.IPublicationDependencies)
def __init__(self, context):
self.context = context
def get_dependencies(self):
return [x.target for x in self.context.authorships]
@grok.adapter(
zeit.cms.content.interfaces.ICommonMetadata,
name='zeit.content.author')
@grok.implementer(
zeit.cms.relation.interfaces.IReferenceProvider)
def references(context):
return [x.target for x in context.authorships]
@grok.adapter(
zeit.content.author.interfaces.IAuthor,
zeit.cms.content.interfaces.IContentAdder)
@grok.implementer(zeit.cms.content.interfaces.IAddLocation)
def author_location(type_, adder):
return zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
class BiographyQuestions(
grok.Adapter,
UserDict.DictMixin,
zeit.cms.content.xmlsupport.Persistent):
grok.context(zeit.content.author.interfaces.IAuthor)
grok.implements(zeit.content.author.interfaces.IBiographyQuestions)
def __init__(self, context):
object.__setattr__(self, 'context', context)
object.__setattr__(self, 'xml', zope.security.proxy.getObject(
context.xml))
object.__setattr__(self, '__parent__', context)
def __getitem__(self, key):
node = self.xml.xpath('//question[@id="%s"]' % key)
return Question(
key, self.title(key), unicode(node[0]) if node else None)
def __setitem__(self, key, value):
node = self.xml.xpath('//question[@id="%s"]' % key)
if node:
self.xml.remove(node[0])
if value:
node = lxml.objectify.E.question(value, id=key)
lxml.objectify.deannotate(node[0], cleanup_namespaces=True)
self.xml.append(node)
super(BiographyQuestions, self).__setattr__('_p_changed', True)
def keys(self):
return list(zeit.content.author.interfaces.BIOGRAPHY_QUESTIONS(self))
def title(self, key):
return zeit.content.author.interfaces.BIOGRAPHY_QUESTIONS(
self).title(key)
# Attribute-style access to answers is meant only for zope.formlib.
# XXX Why does this work without an explicit security declaration?
def __getattr__(self, key):
return self.get(key).answer
def __setattr__(self, key, value):
self[key] = value
class Question(object):
zope.interface.implements(zeit.content.author.interfaces.IQuestion)
def __init__(self, id, title, answer):
self.id = id
self.title = title
self.answer = answer
|
Python
| 0
|
@@ -3227,16 +3227,34 @@
eded by
+the publisher and
zeit.vgw
@@ -3272,16 +3272,75 @@
others.%0A
+# Publisher only indexes the freetext field at the moment.%0A
# zeit.v
|
006eff6a9376c65e0632efe79ec6d39cbc50f80b
|
remove UI element namespaces
|
docs/tutorial/tuthello.py
|
docs/tutorial/tuthello.py
|
# Licensed CC0 Public Domain: http://creativecommons.org/publicdomain/zero/1.0
# [HelloRapicorn-EXAMPLE]
# Load and import a versioned Rapicorn module into the 'Rapicorn' namespace
import Rapicorn1307 as Rapicorn
# Setup the application object, unsing a unique application name.
app = Rapicorn.app_init ("Hello Rapicorn")
# Define the elements of the dialog window to be displayed.
hello_window = """
<tmpl:define id="hello-window" inherit="Window">
<Alignment padding="15">
<VBox spacing="30">
<Label markup-text="Hello World!"/>
<Button on-click="CLICK">
<Label markup-text="Close" />
</Button>
</VBox>
</Alignment>
</tmpl:define>
"""
# Register the 'hello-window' definition for later use, for this we need
# a unique domain string, it's easiest to reuse the application name.
app.load_string ("HelloRapicorn", hello_window)
# The above is all that is needed to allow us to create the window object.
window = app.create_window ("HelloRapicorn:hello-window")
# This function is called to handle the command we use for button clicks.
def command_handler (command_name, args):
# When we see the 'CLICK' command, close down the Application
if command_name == "CLICK":
app.close_all();
# Call the handler when the Window::commands signal is emitted.
window.sig_commands_connect (command_handler)
# Preparations done, now it's time to show the window on the screen.
window.show()
# Pass control to the event loop, to wait and handle user commands.
app.loop()
# [HelloRapicorn-EXAMPLE]
|
Python
| 0.000001
|
@@ -855,25 +855,8 @@
ng (
-%22HelloRapicorn%22,
hell
@@ -974,22 +974,8 @@
w (%22
-HelloRapicorn:
hell
|
8936ba626c4bc3114ecb9d2efc75136f4122e37a
|
Initialized LSS texture recognizer example
|
examples/linear_separator/texture_recognizer/Texture.py
|
examples/linear_separator/texture_recognizer/Texture.py
|
from classes.solver import LinearSeparator
class Texture:
def __init__(self, neighborhoods, colors, textures=2):
self.dimensions = neighborhoods * (colors) * (colors) + 1
self.colors = colors
self.neighborhoods = neighborhoods
self.__separator = LinearSeparator(self.dimensions, classes=textures)
def pick_texture_sample(self, params, texture):
print self.__get_vector(params)
return self.__separator.setup({texture: [self.__get_vector(params)]})
def __get_vector(self, params):
x = [0] * self.dimensions
x[-1] = 1
for key, value in params.items():
x[self.__get_element_number(key, value)] = 1
return x
def __get_element_number(self, neighbourhood, colors):
return neighbourhood * self.colors * self.colors + \
(colors[0] * self.colors + colors[1])
def recognize_texture(self, params):
return self.__separator.classify_vertex(self.__get_vector(params))
|
Python
| 0.999995
|
@@ -166,13 +166,11 @@
lors
-)
*
-(
colo
|
240e33bcdd3734f43ea9d58814f8571c2023cf24
|
Fix typo
|
webhookdb/tasks/repository_hook.py
|
webhookdb/tasks/repository_hook.py
|
# coding=utf-8
from __future__ import unicode_literals, print_function
from datetime import datetime
from iso8601 import parse_date
from celery import group
from webhookdb import db
from webhookdb.process import process_repository_hook
from webhookdb.models import RepositoryHook, Repository, Mutex
from webhookdb.exceptions import NotFound
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from webhookdb.tasks import celery, logger
from webhookdb.tasks.fetch import fetch_url_from_github
from urlobject import URLObject
LOCK_TEMPLATE = "Repository|{owner}/{repo}|hooks"
@celery.task(bind=True)
def sync_repository_hook(self, owner, repo, hook_id,
children=False, requestor_id=None):
hook_url = "/repos/{owner}/{repo}/hooks/{hook_id}".format(
owner=owner, repo=repo, hook_id=hook_id,
)
try:
resp = fetch_url_from_github(hook_url, requestor_id=requestor_id)
except NotFound:
# add more context
msg = "Hook #{hook_id} for {owner}/{repo} not found".format(
hook_id=hook_id, owner=owner, repo=repo,
)
raise NotFound(msg, {
"type": "repo_hook",
"owner": owner,
"repo": repo,
"hook_id": hook_id,
})
hook_data = resp.json()
try:
hook = process_repository_hook(
hook_data, via="api", fetched_at=datetime.now(), commit=True,
requestor_id=requestor_id,
)
except IntegrityError as exc:
self.retry(exc=exc)
return hook.id
@celery.task(bind=True)
def sync_page_of_repository_hooks(self, owner, repo, children=False,
requestor_id=None, per_page=100, page=1):
hook_page_url = (
"/repos/{owner}/{repo}/hooks?per_page={per_page}&page={page}"
).format(
owner=owner, repo=repo, per_page=per_page, page=page,
)
resp = fetch_url_from_github(hook_page_url, requestor_id=requestor_id)
fetched_at = datetime.now()
hook_data_list = resp.json()
results = []
for hook_data in hook_data_list:
try:
hook = process_repository_hook(
hook_data, via="api", fetched_at=fetched_at, commit=True,
requestor_id=requestor_id,
)
results.append(hook.id)
except IntegrityError as exc:
self.retry(exc=exc)
return results
@celery.task()
def hooks_scanned(owner, repo, requestor_id=None):
"""
Update the timestamp on the repository object,
and delete old hooks that weren't updated.
"""
repo = Repository.get(owner, repo)
prev_scan_at = repo.hooks_last_scanned_at
pr.hooks_last_scanned_at = datetime.now()
db.session.add(repo)
if prev_scan_at:
# delete any hooks that were not updated since the previous scan --
# they have been removed from Github
query = (
RepositoryHook.query.filter_by(repo_id=repo.id)
.filter(RepositoryHook.last_replicated_at < prev_scan_at)
)
query.delete()
# delete the mutex
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo)
Mutex.query.filter_by(name=lock_name).delete()
db.session.commit()
@celery.task()
def spawn_page_tasks_for_repository_hooks(
owner, repo, children=False, requestor_id=None, per_page=100,
):
# acquire lock or fail (we're already in a transaction)
lock_name = LOCK_TEMPLATE.format(owner=owner, repo=repo)
existing = Mutex.query.get(lock_name)
if existing:
return False
lock = Mutex(name=lock_name, user_id=requestor_id)
db.session.add(lock)
db.session.commit()
hook_page_url = (
"/repos/{owner}/{repo}/hooks?per_page={per_page}"
).format(
owner=owner, repo=repo, type=type, per_page=per_page,
)
resp = fetch_url_from_github(
hook_page_url, method="HEAD", requestor_id=requestor_id,
)
last_page_url = URLObject(resp.links.get('last', {}).get('url', ""))
last_page_num = int(last_page_url.query.dict.get('page', 1))
g = group(
sync_page_of_repository_hooks.s(
owner=owner, repo=repo,
children=children, requestor_id=requestor_id,
per_page=per_page, page=page,
) for page in xrange(1, last_page_num+1)
)
finisher = hooks_scanned.si(
owner=owner, repo=repo, requestor_id=requestor_id,
)
return (g | finisher).delay()
|
Python
| 0.999999
|
@@ -2661,18 +2661,20 @@
_at%0A
-p
r
+epo
.hooks_l
|
11efe2d85dc50b60ddcfa00b31e223ff3ac62ea1
|
add **kwargs to manage extra args in xpath and cssselect
|
weboob/tools/parsers/lxmlparser.py
|
weboob/tools/parsers/lxmlparser.py
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Christophe Benz
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
import lxml.html as html
import lxml.etree as etree
from .iparser import IParser
from ..browser import BrokenPageError
__all__ = ['LxmlHtmlParser', 'LxmlXmlParser']
class LxmlParser(IParser):
"""
Parser using lxml.
Note that it is not available on every systems.
"""
def get_parser(encoding=None):
pass
def parse(self, data, encoding=None):
if encoding is None:
parser = None
else:
parser = self.get_parser(encoding=encoding)
return self.module.parse(data, parser)
def tostring(self, element):
return self.module.tostring(element, encoding=unicode)
def tocleanstring(self, element):
txt = [txt.strip() for txt in element.itertext()]
txt = u' '.join(txt) # 'foo bar'
txt = re.sub('\s+', ' ', txt) # 'foo bar'
return txt.strip()
def strip(self, s):
doc = self.module.fromstring(s) # parse html/xml string
return self.tocleanstring(doc)
@classmethod
def select(cls, element, selector, nb=None, method='cssselect'):
"""
Select one or many elements from an element, using lxml cssselect by default.
Raises :class:`weboob.tools.browser.browser.BrokenPageError` if not found.
:param element: element on which to apply selector
:type element: object
:param selector: CSS or XPath expression
:type selector: str
:param method: (cssselect|xpath)
:type method: str
:param nb: number of elements expected to be found. Use None for
undefined number, and 'many' for 1 to infinite
:type nb: :class:`int` or :class:`str`
:rtype: Element
"""
if method == 'cssselect':
results = element.cssselect(selector)
elif method == 'xpath':
results = element.xpath(selector)
else:
raise NotImplementedError('Only the cssselect and xpath methods are supported')
if nb is None:
return results
elif isinstance(nb, basestring) and nb == 'many':
if results is None or len(results) == 0:
raise BrokenPageError('Element not found with selector "%s"' % selector)
elif len(results) == 1:
raise BrokenPageError('Only one element found with selector "%s"' % selector)
else:
return results
elif isinstance(nb, int) and nb > 0:
if results is None:
raise BrokenPageError('Element not found with selector "%s"' % selector)
elif len(results) < nb:
raise BrokenPageError('Not enough elements found (%d expected) with selector "%s"' % (nb, selector))
else:
return results[0] if nb == 1 else results
else:
raise Exception('Unhandled value for kwarg "nb": %s' % nb)
class LxmlHtmlParser(LxmlParser):
"""
Parser using lxml.
Note that it is not available on every systems.
"""
def __init__(self, *args, **kwargs):
self.module = html
def get_parser(self, encoding=None):
return html.HTMLParser(encoding=encoding)
class LxmlXmlParser(LxmlParser):
"""
Parser using lxml.
Note that it is not available on every systems.
"""
def __init__(self, *args, **kwargs):
self.module = etree
def get_parser(self, encoding=None):
return etree.XMLParser(encoding=encoding, strip_cdata=False)
|
Python
| 0
|
@@ -1835,16 +1835,26 @@
sselect'
+, **kwargs
):%0A
@@ -2560,32 +2560,42 @@
sselect(selector
+, **kwargs
)%0A elif m
@@ -2648,32 +2648,42 @@
t.xpath(selector
+, **kwargs
)%0A else:%0A
|
f3cb0175ec32be75df70172fbf5629125a8c8381
|
Update docstring for BIDSReport.
|
bids/reports/report.py
|
bids/reports/report.py
|
"""Generate publication-quality data acquisition methods section from BIDS
dataset.
"""
from __future__ import print_function
import json
from os.path import dirname
from os.path import abspath
from os.path import join as pathjoin
from collections import Counter
from bids.reports import utils
from bids.reports import parsing
class BIDSReport(object):
"""
Generates publication-quality data acquisition methods section from BIDS
dataset.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
config : :obj:`str` or :obj:`dict`, optional
Configuration info for methods generation. Can be a path to a file
(str), a dictionary, or None. If None, loads and uses default
configuration information.
Keys in the dictionary include:
'dir': a dictionary for converting encoding direction strings
(e.g., j-) to descriptions (e.g., anterior to
posterior)
'seq': a dictionary of sequence abbreviations (e.g., EP) and
corresponding names (e.g., echo planar)
'seqvar': a dictionary of sequence variant abbreviations
(e.g., SP) and corresponding names (e.g., spoiled)
"""
def __init__(self, layout, config=None):
self.layout = layout
if config is None:
config = pathjoin(dirname(abspath(__file__)), 'config',
'converters.json')
if isinstance(config, str):
with open(config) as fobj:
config = json.load(fobj)
if not isinstance(config, dict):
raise ValueError('Input config must be None, dict, or path to '
'json file containing dict.')
self.config = config
def generate(self, **kwargs):
"""Generate the methods section.
Parameters
----------
task_converter : :obj:`dict`, optional
A dictionary with information for converting task names from BIDS
filename format to human-readable strings.
Returns
-------
counter : :obj:`collections.Counter`
A dictionary of unique descriptions across subjects in the dataset,
along with the number of times each pattern occurred.
Examples
--------
>>> from os.path import join
>>> from bids.layout import BIDSLayout
>>> from bids.reports import BIDSReport
>>> from bids.tests import get_test_data_path
>>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic'))
>>> report = BIDSReport(layout)
>>> counter = report.generate(session='01')
>>> counter.most_common()[0][0]
"""
descriptions = []
subjs = self.layout.get_subjects(**kwargs)
kwargs = {k: v for k, v in kwargs.items() if k != 'subject'}
for sid in subjs:
descriptions.append(self._report_subject(subject=sid, **kwargs))
counter = Counter(descriptions)
print('Number of patterns detected: {0}'.format(len(counter.keys())))
print(utils.reminder())
return counter
def _report_subject(self, subject, **kwargs):
"""Write a report for a single subject.
Parameters
----------
subject : :obj:`str`
Subject ID.
Attributes
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
config : :obj:`dict`
Configuration info for methods generation.
Returns
-------
description : :obj:`str`
A publication-ready report of the dataset's data acquisition
information. Each scan type is given its own paragraph.
"""
description_list = []
# Remove sess from kwargs if provided, else set sess as all available
sessions = kwargs.pop('session',
self.layout.get_sessions(subject=subject,
**kwargs))
if not sessions:
sessions = [None]
elif not isinstance(sessions, list):
sessions = [sessions]
for ses in sessions:
niftis = self.layout.get(subject=subject, extensions='nii.gz',
**kwargs)
if niftis:
description_list.append('For session {0}:'.format(ses))
description_list += parsing.parse_niftis(self.layout, niftis,
subject, self.config,
session=ses)
metadata = self.layout.get_metadata(niftis[0].path)
else:
raise Exception('No niftis for subject {0}'.format(subject))
# Assume all data were converted the same way and use the last nifti
# file's json for conversion information.
if 'metadata' not in vars():
raise Exception('No valid jsons found. Cannot generate final '
'paragraph.')
description = '\n\t'.join(description_list)
description = description.replace('\tFor session', '\nFor session')
description += '\n\n{0}'.format(parsing.final_paragraph(metadata))
return description
|
Python
| 0
|
@@ -2372,16 +2372,284 @@
ccurred.
+ In cases%0A where all subjects underwent the same protocol, the most common%0A pattern is most likely the most complete. In cases where the%0A dataset contains multiple protocols, each pattern will need to be%0A inspected manually.
%0A%0A
|
ec2456eac36a96c9819920bf8b4176e6a37ad9a5
|
Rename productclass made during migration
|
saleor/product/migrations/0020_attribute_data_to_class.py
|
saleor/product/migrations/0020_attribute_data_to_class.py
|
from __future__ import unicode_literals
from django.db import migrations, models
def move_data(apps, schema_editor):
Product = apps.get_model('product', 'Product')
ProductClass = apps.get_model('product', 'ProductClass')
for product in Product.objects.all():
attributes = product.attributes.all()
product_class = ProductClass.objects.all()
for attribute in attributes:
product_class = product_class.filter(
variant_attributes__in=[attribute])
product_class = product_class.first()
if product_class is None:
product_class = ProductClass.objects.create(
name='Migrated Product Class',
has_variants=True)
product_class.variant_attributes = attributes
product_class.save()
product.product_class = product_class
product.save()
class Migration(migrations.Migration):
dependencies = [
('product', '0019_auto_20161212_0230'),
]
operations = [
migrations.RunPython(move_data),
]
|
Python
| 0
|
@@ -667,30 +667,28 @@
me='
-Migrated P
+Unnamed p
roduct
-Class
+type
',%0A
|
f31e8215838e40960abff6c86be8c66cbf113c95
|
Make the endpoint return geojson as opposed to wkt geometry
|
server/rest/twofishes.py
|
server/rest/twofishes.py
|
import requests
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
class TwoFishes(Resource):
def __init__(self):
self.resourceName = 'minerva_geocoder'
self.route('GET', (), self.geocode)
self.route('GET', ('autocomplete',), self.autocomplete)
@access.public
def geocode(self, params):
r = requests.get(params['twofishes'],
params={'query': params['location'],
'responseIncludes': 'WKT_GEOMETRY'})
return r.json()
geocode.description = (
Description('Get geojson for a given location name')
.param('twofishes', 'Twofishes url')
.param('location', 'Location name to get a geojson')
)
@access.public
def autocomplete(self, params):
r = requests.get(params['twofishes'],
params={'autocomplete': True,
'query': params['location'],
'maxInterpretations': 10,
'autocompleteBias': None})
return [i['feature']['matchedName'] for i in r.json()['interpretations']]
autocomplete.description = (
Description('Autocomplete result for a given location name')
.param('twofishes', 'Twofishes url')
.param('location', 'Location name to autocomplete')
)
|
Python
| 0.99978
|
@@ -8,16 +8,83 @@
requests
+%0Afrom shapely.wkt import loads%0Afrom shapely.geometry import mapping
%0A%0Afrom g
@@ -633,22 +633,21 @@
-return
+wkt =
r.json(
@@ -647,16 +647,111 @@
r.json()
+%5B'interpretations'%5D%5B0%5D%5B'feature'%5D%5B'geometry'%5D%5B'wktGeometry'%5D%0A return mapping(loads(wkt))
%0A%0A ge
|
76fc2cb8ff45f0a004f876966d8e18f5f0fc13e5
|
return valid User object and move existence checking to make_user function
|
server/sessionmanager.py
|
server/sessionmanager.py
|
import crispin
import uuid
import logging as log
import datetime
import traceback
import google_oauth
from models import db_session, User, UserSession
# Memory cache for currently open crispin instances
email_address_to_crispins = {}
def log_ignored(exc):
log.error('Ignoring error: %s\nOuter stack:\n%s%s'
% (exc, ''.join(traceback.format_stack()[:-2]), traceback.format_exc(exc)))
def create_session(email_address):
new_session = UserSession()
new_session.g_email = email_address
new_session.session_token = str(uuid.uuid1())
db_session.add(new_session)
db_session.commit()
log.error("Created new session with token: %s" % str(new_session.session_token))
return new_session
def get_session(session_token):
session_obj = db_session.query(UserSession).filter_by(session_token=session_token).first()
if not session_obj:
log.error("No record for session with token: %s" % session_token)
return session_obj
def make_user(access_token_dict, existing=None):
if existing is None:
new_user = User()
else:
new_user = existing
# new_user.name = None
new_user.g_token_issued_to = access_token_dict['issued_to']
new_user.g_user_id = access_token_dict['user_id']
new_user.g_access_token = access_token_dict['access_token']
new_user.g_id_token = access_token_dict['id_token']
new_user.g_expires_in = access_token_dict['expires_in']
new_user.g_access_type = access_token_dict['access_type']
new_user.g_token_type = access_token_dict['token_type']
new_user.g_audience = access_token_dict['audience']
new_user.g_scope = access_token_dict['scope']
new_user.g_email = access_token_dict['email']
new_user.g_refresh_token = access_token_dict['refresh_token']
new_user.g_verified_email = access_token_dict['verified_email']
new_user.date = datetime.datetime.utcnow() # Used to verify key lifespan
new_user = db_session.add(new_user)
db_session.commit()
log.info("Stored new user object %s" % new_user)
return new_user
def get_user(email_address, callback=None):
user_obj = db_session.query(User).filter_by(g_email=email_address).first()
if not user_obj:
log.error("Should already have a user object...")
return None
return verify_user(user_obj)
def verify_user(user_obj):
issued_date = user_obj.date
expires_seconds = user_obj.g_expires_in
# TODO check with expire date first
expire_date = issued_date + datetime.timedelta(seconds=expires_seconds)
is_valid = google_oauth.validate_token(user_obj.g_access_token)
# TODO refresh tokens based on date instead of checking?
# if not is_valid or expire_date > datetime.datetime.utcnow():
if not is_valid:
log.error("Need to update access token!")
refresh_token = user_obj.g_refresh_token
log.error("Getting new access token...")
response = google_oauth.get_new_token(refresh_token) # TOFIX blocks
response['refresh_token'] = refresh_token # Propogate it through
# TODO handling errors here for when oauth has been revoked
if 'error' in response:
log.error(response['error'])
if response['error'] == 'invalid_grant':
# Means we need to reset the entire oauth process.
log.error("Refresh token is invalid.")
return None
# TODO Verify it and make sure it's valid.
assert 'access_token' in response
user_obj = make_user(response, existing=user_obj)
log.info("Updated token for user %s" % user_obj.g_email)
return user_obj
def get_crispin_from_session(session_token):
""" Get the running crispin instance, or make a new one """
s = get_session(session_token)
return get_crispin_from_email(s.g_email)
def get_crispin_from_email(email_address, initial=False):
if email_address in email_address_to_crispins:
return email_address_to_crispins[email_address]
else:
user_obj = get_user(email_address)
assert user_obj is not None
crispin_client = crispin.CrispinClient(user_obj)
assert 'X-GM-EXT-1' in crispin_client.imap_server.capabilities(), "This must not be Gmail..."
email_address_to_crispins[email_address] = crispin_client
return crispin_client
def stop_all_crispins():
if not email_address_to_crispins: return
for e,c in email_address_to_crispins.iteritems():
c.stop()
|
Python
| 0.000001
|
@@ -1004,41 +1004,118 @@
dict
-, existing=None):%0A if existing
+):%0A user_obj = db_session.query(User).filter_by(g_email=access_token_dict%5B'email'%5D).first()%0A if user_obj
is
@@ -1175,24 +1175,24 @@
_user =
-existing
+user_obj
%0A # n
@@ -2002,27 +2002,16 @@
span%0A
- new_user =
db_sess
@@ -2052,17 +2052,16 @@
ommit()%0A
-%0A
log.
@@ -3599,27 +3599,8 @@
onse
-, existing=user_obj
)%0A
|
8465b0af6504844e8a7cb7cb35b7a8b20c356696
|
fix the channel names
|
plugins/tay.py
|
plugins/tay.py
|
from rtmbot.core import Plugin, Job
import os
import random
bot_id = 'U3MSN806S'
header = '<@{}> '.format(bot_id)
responses = {
'hi': 'Nice to meet you, where you been?',
'who should i vote for?': 'Me, of course',
'who are you': '@swiftonsecurity',
}
directory = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
all_lyrics = []
with open(os.path.join(directory, 'lyrics.txt')) as fp:
cur = ''
for line in fp:
if line.strip():
cur += line
else:
all_lyrics.append(cur.strip())
cur = ''
videos = []
with open(os.path.join(directory, 'videos.txt')) as fp:
for line in fp:
videos.append(line.strip())
class LyricJob(Job):
def run(self, slack_client):
content = random.choice(all_lyrics)
channels = slack_client.api_call('channels.list')['channels']
channel_id = None
for channel in channels:
if channel['name'] == 'aoeu':
channel_id = channel['id']
break
return [
[channel_id, "You are listening to :tay: radio. Don't forget to #votefortay"],
[channel_id, content],
]
class VideoJob(Job):
def run(self, slack_client):
channels = slack_client.api_call('channels.list')['channels']
channel_id = None
for channel in channels:
if channel['name'] == 'aoeu':
channel_id = channel['id']
break
messages = [
'This is so much better than a rickroll',
'There are 10 reasons why you should click this video. #4 will shock you',
'You must watch this video. @corey said so in #rubrics',
'Imma let you finish, but I am the greatest of all time, actually',
'I promise to stop posting videos if you vote for me',
]
content = '{} {}'.format(random.choice(messages), random.choice(videos))
return [
[channel_id, content],
]
class Tay(Plugin):
def register_jobs(self):
self.jobs.append(LyricJob(60 * 30))
self.jobs.append(VideoJob(60 * 30))
def process_message(self, data):
if data['text'].startswith(header):
message = data['text'][len(header):].strip().lower()
if responses[message]:
response = responses[message]
self.outputs.append([data['channel'], response])
self.outputs.append([data['channel'], ":tay::tay::tay:Don't forget to #votefortay :tay::tay::tay:"])
elif 'devops' in message:
self.outputs.append([data['channel'], "@tay loves devops. Even more than :max:"])
elif 'bobby' in message:
self.outputs.append([data['channel'], ":bobby: :heart: :tay:"])
elif 'win' in message:
self.outputs.append([data['channel'], "I believe in me"])
elif 'apple' in message or 'ios' in message:
self.outputs.append([data['channel'], "If you think Swift is the best programming language then you should #votefortay"])
else:
self.outputs.append([data['channel'], "I'm sorry, I'm afraid I can't answer that until you vote for me"])
|
Python
| 0.99939
|
@@ -961,36 +961,37 @@
nel%5B'name'%5D == '
-aoeu
+emoji
':%0A
@@ -1439,12 +1439,14 @@
== '
-aoeu
+random
':%0A
|
8fb5b379d99d29fdee6fca86d944687268ba8101
|
Format currency conversion using embeds. Also include a thousands separator in the value and add an emoji flag.
|
plugins/web.py
|
plugins/web.py
|
""" Plugin for web commands
Commands:
define
"""
import discord
from datetime import datetime
from pcbot import Annotate, utils
import plugins
client = plugins.client # type: discord.Client
# Create exchange rate cache and keep track of when we last reset it
exchange_rate_cache = dict(reset=client.time_started)
@plugins.command()
async def define(message: discord.Message, term: Annotate.LowerCleanContent):
""" Defines a term using Urban Dictionary. """
json = await utils.download_json("http://api.urbandictionary.com/v0/define", term=term)
assert json["list"], "Could not define `{}`.".format(term)
definitions = json["list"]
msg = ""
# Send any valid definition (length of message < 2000 characters)
for definition in definitions:
# Format example in code if there is one
if definition.get("example"):
definition["example"] = "```{}```".format(definition["example"])
# Format definition
msg = "**{word}**:\n{definition}{example}".format(**definition)
# If this definition fits in a message, break the loop so that we can send it
if len(msg) <= 2000:
break
# Cancel if the message is too long
assert len(msg) <= 2000, "Defining this word would be a bad idea."
await client.say(message, msg)
async def get_exchange_rate(base: str, symbol: str):
""" Returns the exchange rate between two currencies. """
# Return the cached result unless the last reset was 3 days ago or more
if (base, symbol) in exchange_rate_cache:
if (datetime.now() - exchange_rate_cache["reset"]).days >= 3:
exchange_rate_cache.clear()
exchange_rate_cache["reset"] = datetime.now()
else:
return exchange_rate_cache[(base, symbol)]
data = await utils.download_json("https://api.fixer.io/latest", base=base, symbols=symbol)
# Raise an error when the base is invalid
if "error" in data and data["error"].lower() == "invalid base":
raise ValueError("{} is not a valid currency".format(base))
# The API will not return errors on invalid symbols, so we check this manually
if not data["rates"]:
raise ValueError("{} is not a valid currency".format(symbol))
rate = data["rates"][symbol]
# Add both the exchange rate of the given order and the inverse to the cache
exchange_rate_cache[(base, symbol)] = rate
exchange_rate_cache[(symbol, base)] = 1 / rate
return rate
@plugins.command(aliases="ge currency cur")
async def convert(message: discord.Message, value: float, currency_from: str.upper, currency_to: str.upper):
""" Converts currency. """
try:
rate = await get_exchange_rate(currency_from, currency_to)
except ValueError as e:
await client.say(message, "**{}**".format(e))
else:
await client.say(message, "**{:.2f} {}**".format(value * rate, currency_to))
|
Python
| 0
|
@@ -2813,26 +2813,9 @@
ge,
-%22**%7B%7D**%22.format(e)
+e
)%0A
@@ -2834,48 +2834,104 @@
-await client.say(message, %22**
+flag = utils.text_to_emoji(currency_to%5B:2%5D)%0A e = discord.Embed(description=%22%7B%7D
%7B:
+,
.2f%7D %7B%7D
-**
%22.fo
@@ -2935,16 +2935,22 @@
.format(
+flag,
value *
@@ -2967,10 +2967,98 @@
ency_to)
+, color=message.author.color)%0A await client.send_message(message.channel, embed=e
)%0A
|
7baecddc4884b20f12107242f73b8722aa413006
|
Fix repeat number and seed selection.
|
plume/plume.py
|
plume/plume.py
|
#!/usr/bin/env python
import argparse
import logging
import os.path
import sys
import pexpect
from qrsim.tcpclient import UAVControls
import tables
import behaviors
from config import load_config
from client import TaskPlumeClient
import prediction
from recorder import ControlsRecorder, TargetsRecorder, TaskPlumeRecorder
logger = logging.getLogger(__name__)
class FilterLevelAboveOrEqual(object):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
class Controller(object):
def __init__(self, client, movement_behavior):
self.client = client
self.movement_behavior = movement_behavior
self.recorders = []
def add_recorder(self, recorder):
self.recorders.append(recorder)
def init_new_sim(self, seed):
self.client.reset_seed(seed)
self.client.reset()
# Ensure that all simulator variables have been set
self.step_keeping_position()
def run(self, num_steps):
for step in xrange(num_steps):
logger.info('Step %i', step + 1)
controls = self.movement_behavior.get_controls(
self.client.noisy_state,
self.client.get_plume_sensor_outputs())
self.client.step(self.client.timestep, controls)
for recorder in self.recorders:
recorder.record()
def step_keeping_position(self):
c = UAVControls(self.client.numUAVs, 'vel')
c.U.fill(0.0)
self.client.step(self.client.timestep, c)
def do_simulation_run(i, output_filename, conf, client):
with tables.openFile(output_filename, 'w') as fileh:
tbl = fileh.createVLArray(
'/', 'conf', tables.ObjectAtom(),
title='Configuration used to generate the stored data.')
tbl.append(conf)
fileh.createArray('/', 'repeat', [i], title='Number of repeat run.')
num_steps = conf['global_conf']['duration_in_steps']
kernel = conf['kernel'](prediction)
predictor = conf['predictor'](prediction, kernel)
if 'bounds' in conf:
predictor.bounds = conf['bounds']
if 'priors' in conf:
for i in range(len(conf['priors'])):
predictor.priors[i] = conf['priors'][i](prediction)
behavior = conf['behavior'](behaviors, predictor=predictor)
client = ControlsRecorder(fileh, client, num_steps)
controller = Controller(client, behavior)
controller.init_new_sim(conf['seedlist'][i])
recorder = TaskPlumeRecorder(fileh, client, predictor, num_steps)
recorder.init(conf['global_conf']['area'])
controller.add_recorder(recorder)
if hasattr(behavior, 'targets'):
targets_recorder = TargetsRecorder(
fileh, behavior, client.numUAVs, num_steps)
targets_recorder.init()
controller.add_recorder(targets_recorder)
controller.run(num_steps)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-q', '--quiet', action='store_true', help='Reduce output verbosity.')
parser.add_argument(
'-c', '--config', nargs=1, type=str, help='Configuration to load.')
parser.add_argument(
'-o', '--output', nargs=1, type=str, default=['plume'],
help='Output file name without extension (will be add automatically).')
parser.add_argument(
'-H', '--host', nargs=1, type=str,
help='Host running QRSim. If not given it will be tried to launch an '
'instance locally and connect to that.')
parser.add_argument(
'-P', '--port', nargs=1, type=int, default=[10000],
help='Port on which QRSim instance is listening.')
parser.add_argument(
'output_dir', nargs=1, type=str, help='Output directory.')
args = parser.parse_args()
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.addFilter(FilterLevelAboveOrEqual(logging.WARNING))
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
root_logger = logging.getLogger()
if args.quiet:
root_logger.setLevel(logging.WARNING)
else:
root_logger.setLevel(logging.INFO)
root_logger.addHandler(stdout_handler)
root_logger.addHandler(stderr_handler)
conf = load_config(args.config[0])
with TaskPlumeClient() as client:
if args.host is not None:
client.connect_to(args.host[0], args.port[0])
else:
qrsim = pexpect.spawn(
'matlab -nodesktop -nosplash -r "'
"cd(fileparts(which('QRSimTCPServer')));"
"QRSimTCPServer(0);"
'quit;"',
timeout=120)
qrsim.logfile = sys.stdout
qrsim.expect(r'Listening on port: (\d+)')
port = int(qrsim.match.group(1))
client.connect_to('127.0.0.1', port)
num_steps = conf['global_conf']['duration_in_steps']
client.init(conf['task'], num_steps)
clean = True
for i in xrange(conf['repeats']):
try:
output_filename = os.path.join(
args.output_dir[0], args.output[0] + '.%i.h5' % i)
do_simulation_run(i, output_filename, conf, client)
except:
logger.exception('Repeat failed.')
clean = False
return clean
if __name__ == '__main__':
if main():
sys.exit(os.EX_OK)
else:
sys.exit(os.EX_SOFTWARE)
|
Python
| 0
|
@@ -1582,33 +1582,37 @@
_simulation_run(
-i
+trial
, output_filenam
@@ -1887,16 +1887,29 @@
teArray(
+%0A
'/', 're
@@ -1916,17 +1916,21 @@
peat', %5B
-i
+trial
%5D, title
@@ -2568,17 +2568,21 @@
dlist'%5D%5B
-i
+trial
%5D)%0A%0A
|
e8ffa23f83d13de6ef3e4fe61b5e481c5e6c1d5b
|
Add reference to bug report
|
setuptools/__init__.py
|
setuptools/__init__.py
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import distutils.core
import distutils.filelist
from distutils.core import Command as _Command
from distutils.util import convert_path
from fnmatch import fnmatchcase
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature, _get_unpatched
from setuptools.depends import Require
from setuptools.compat import filterfalse
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it.
"""
out = cls._find_packages_iter(convert_path(where))
out = cls.require_parents(out)
includes = cls._build_filter(*include)
excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude)
out = filter(includes, out)
out = filterfalse(excludes, out)
return list(out)
@staticmethod
def require_parents(packages):
"""
Exclude any apparent package that apparently doesn't include its
parent.
For example, exclude 'foo.bar' if 'foo' is not present.
"""
found = []
for pkg in packages:
base, sep, child = pkg.rpartition('.')
if base and base not in found:
continue
found.append(pkg)
yield pkg
@staticmethod
def _candidate_dirs(base_path):
"""
Return all dirs in base_path that might be packages.
"""
has_dot = lambda name: '.' in name
for root, dirs, files in os.walk(base_path, followlinks=True):
# Exclude directories that contain a period, as they cannot be
# packages. Mutate the list to avoid traversal.
dirs[:] = filterfalse(has_dot, dirs)
for dir in dirs:
yield os.path.relpath(os.path.join(root, dir), base_path)
@classmethod
def _find_packages_iter(cls, base_path):
candidates = cls._candidate_dirs(base_path)
return (
path.replace(os.path.sep, '.')
for path in candidates
if cls._looks_like_package(os.path.join(base_path, path))
)
@staticmethod
def _looks_like_package(path):
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
# we can't patch distutils.cmd, alas
distutils.core.Command = Command
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir, followlinks=True):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
# fix findall bug in distutils.
distutils.filelist.findall = findall
|
Python
| 0
|
@@ -5155,17 +5155,52 @@
istutils
-.
+ (http://bugs.python.org/issue12885)
%0Adistuti
|
93240501a79f93b2efc20c8de24193697316fdb3
|
add noise & drag
|
simulator/num_model.py
|
simulator/num_model.py
|
import numpy as np
class Drone:
def __init__(self):
self.dt = 1E-5
self.time = 0.0
self.g = 9.80
self.gvec = np.array([0, 0, -self.g])
self.M = 1.250
self.R = 0.23
self.Iz = 0.25 * self.M * self.R**2
self.Ixy = self.Iz * 0.5
self.I = np.diag([self.Ixy, self.Ixy, self.Iz])
self.LIFT_K = 0.01
self.TDRAG_K = 0
self.pos = np.eye(4)
self.vel = np.zeros(3)
self.omega = np.zeros(3)
self.acc_sensor = np.zeros(3)
self.motor = np.zeros(4)
rz = self.R * (2 ** -0.5)
self.ppos = [
np.array([rz, rz, 0]),
np.array([rz, -rz, 0]),
np.array([-rz, -rz, 0]),
np.array([-rz, rz, 0]),
]
self.pdir = [-1, 1, -1, 1]
def rot(self):
return self.pos[:3, :3]
def diff_matrix(self, dt):
vx, vy, vz = self.vel * dt
wx, wy, wz = self.omega * dt
ret = np.array([
[1, -wz, wy, vx],
[wz, 1, -wx, vy],
[-wy, wx, 1, vz],
[0, 0, 0, 1.],
])
return ret
def lift(self, pomega):
return self.LIFT_K * pomega
def force(self, lifts):
f = np.array([0, 0, sum(lifts)])
return f
def torque(self, lifts, pomega):
tau = np.zeros(3)
for i in range(4):
lf = np.array([0, 0, lifts[i]])
tau += np.cross(self.ppos[i], lf)
return tau
def set_motors(self, motor):
self.motor = motor
def step(self):
pomega = self.motor
rot = self.rot()
lifts = [self.lift(x) for x in pomega]
force_int = self.force(lifts)
torque_int = self.torque(lifts, pomega)
force_ref = np.dot(rot, force_int) + self.M * self.gvec
torque_ref = np.dot(rot, torque_int)
I_ref = np.dot(rot, self.I)
omega_ref = self.omega
self.acc_sensor = force_int / self.M
acc_ref = force_ref / self.M
rotacc_ref = np.dot(
np.linalg.inv(I_ref),
torque_ref - np.cross(omega_ref, np.dot(I_ref, omega_ref))
)
dmx = self.diff_matrix(self.dt)
self.pos = np.dot(dmx, self.pos)
self.vel += acc_ref * self.dt
self.omega += rotacc_ref * self.dt
self.time += self.dt
def get_time(self):
return self.time
def get_sensors(self):
return self.acc_sensor, np.dot(np.linalg.inv(self.rot()), self.omega)
def get_position(self):
return self.pos[:3, 3]
def get_orientation(self):
return np.dot(self.rot(), np.array([0, 0, 1.0]))
def set_init(self, vel, omega):
self.vel = np.array(vel, dtype=np.float64)
self.omega = np.array(omega, dtype=np.float64)
if __name__ == '__main__':
drone = Drone()
drone.step()
|
Python
| 0
|
@@ -394,16 +394,105 @@
AG_K = 0
+%0A self.DRAG_B = 0.5%0A%0A self.noise_acc = 0.07%0A self.noise_omega = 0.02
%0A%0A
@@ -1908,16 +1908,41 @@
elf.gvec
+ - self.DRAG_B * self.vel
%0A
@@ -2555,30 +2555,29 @@
f):%0A
-return
+acc =
self.acc_se
@@ -2584,55 +2584,180 @@
nsor
-, np.dot(np.linalg.inv(self.rot()), self.omega)
+ + np.random.normal(scale=self.noise_acc)%0A omega = np.dot(np.linalg.inv(self.rot()), self.omega) + np.random.normal(scale=self.noise_omega)%0A return acc, omega
%0A%0A
|
9ece50e71d2c5eab7b97edc5b8bbdfb410ce64bf
|
edit admin.py
|
polls/admin.py
|
polls/admin.py
|
from django.contrib import admin
from polls.models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
admin.site.register(Question, QuestionAdmin)
|
Python
| 0
|
@@ -377,16 +377,65 @@
eInline%5D
+%0A list_display = ('question_text', 'pub_date')
%0A%0Aadmin.
|
5b63950cb3fa018b63ccb0be3faeceef684f9299
|
Add persistent storage
|
ipinfo.py
|
ipinfo.py
|
from ipaddress import ip_address, ip_network
from ipwhois import IPWhois
known_networks = {}
def updateIpInfo(ip):
info = IPWhois(ip).lookup()
# these two lines might break on some input
net = info['nets'][0]
networks = net['cidr'].split(', ')
for network in networks:
network = ip_network(network)
known_networks.update({network: net})
return net
def getIpInfo(ip):
ip = ip_address(ip)
for network in known_networks:
if ip in network:
info = known_networks[network]
return info
info = updateIpInfo(ip)
return info
def getISP(ip):
net = getIpInfo(ip)
return net['description']
def getCountry(ip):
net = getIpInfo(ip)
return net['country']
|
Python
| 0.000003
|
@@ -37,16 +37,30 @@
_network
+%0Aimport shelve
%0A%0Afrom i
@@ -86,27 +86,100 @@
is%0A%0A
-known_networks = %7B%7D
+ip_whois_shelve_filename = 'ip_whois'%0Aknown_networks = shelve.open(ip_whois_shelve_filename)
%0A%0Ade
@@ -394,27 +394,15 @@
k =
-ip_
network
-(network)
%0A
@@ -555,23 +555,35 @@
f ip in
+ip_
network
+(network)
:%0A
|
8f6c73238b5b52bdd7feb63fc1f1f28ed12e7e3e
|
Update render method implementation in MultiCurve and MultiSurface classes
|
geomdl/Multi.py
|
geomdl/Multi.py
|
"""
.. module:: Multi
:platform: Unix, Windows
:synopsis: Container module for storage and visualization of curves and surfaces
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
from . import warnings
from . import Abstract
from . import utilities
class MultiCurve(Abstract.Multi):
""" Container class for storing multiple curves.
Rendering depends on the visualization instance, e.g. if you are using ``VisMPL`` module,
you can visualize a 3D curve using a ``VisCurve2D`` instance
but you cannot visualize a 2D curve with a ``VisCurve3D`` instance.
"""
def __init__(self):
super(MultiCurve, self).__init__()
self._instance = Abstract.Curve
def render(self):
""" Renders the curve the using the visualization component.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
"""
if not self._vis_component:
warnings.warn("No visualization component has set")
return
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
elem.sample_size = self._sample_size
elem.evaluate()
color = utilities.color_generator()
self._vis_component.add(ptsarr=elem.ctrlpts,
name="Control Points " + str(idx + 1),
color=color[0],
plot_type='ctrlpts')
self._vis_component.add(ptsarr=elem.curvepts,
name="Curve " + str(idx + 1),
color=color[1],
plot_type='evalpts')
self._vis_component.render()
class MultiSurface(Abstract.Multi):
""" Container class for storing multiple surfaces. """
def __init__(self):
super(MultiSurface, self).__init__()
self._instance = Abstract.Surface
def render(self):
""" Renders the surface the using the visualization component.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
"""
if not self._vis_component:
warnings.warn("No visualization component has set")
return
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
elem.sample_size = self._sample_size
elem.evaluate()
color = utilities.color_generator()
self._vis_component.add(ptsarr=elem.ctrlpts,
size=[elem.ctrlpts_size_u, elem.ctrlpts_size_v],
name="Control Points " + str(idx + 1),
color=color[0],
plot_type='ctrlpts')
self._vis_component.add(ptsarr=elem.surfpts,
size=[elem.sample_size, elem.sample_size],
name="Surface " + str(idx + 1),
color=color[1],
plot_type='evalpts')
self._vis_component.render()
|
Python
| 0
|
@@ -711,32 +711,42 @@
def render(self
+, **kwargs
):%0A %22%22%22 R
@@ -913,138 +913,427 @@
od.%0A
- %22%22%22%0A if not self._vis_component:%0A warnings.warn(%22No visualization component has set%22)%0A return
+%0A Keyword Arguments:%0A%0A * %60%60cpcolor%60%60: sets the color of the control points grid%0A * %60%60evalcolor%60%60: sets the color of the surface%0A%0A %22%22%22%0A if not self._vis_component:%0A warnings.warn(%22No visualization component has set%22)%0A return%0A%0A # Get the color values from keyword arguments%0A cpcolor = kwargs.get('cpcolor')%0A evalcolor = kwargs.get('evalcolor')
%0A%0A
@@ -1751,32 +1751,68 @@
color=
+cpcolor if cpcolor is not None else
color%5B0%5D,%0A
@@ -2020,32 +2020,72 @@
color=
+evalcolor if evalcolor is not None else
color%5B1%5D,%0A
@@ -2397,24 +2397,34 @@
render(self
+, **kwargs
):%0A %22
@@ -2597,138 +2597,427 @@
od.%0A
- %22%22%22%0A if not self._vis_component:%0A warnings.warn(%22No visualization component has set%22)%0A return
+%0A Keyword Arguments:%0A%0A * %60%60cpcolor%60%60: sets the color of the control points grid%0A * %60%60evalcolor%60%60: sets the color of the surface%0A%0A %22%22%22%0A if not self._vis_component:%0A warnings.warn(%22No visualization component has set%22)%0A return%0A%0A # Get the color values from keyword arguments%0A cpcolor = kwargs.get('cpcolor')%0A evalcolor = kwargs.get('evalcolor')
%0A%0A
@@ -3524,24 +3524,60 @@
color=
+cpcolor if cpcolor is not None else
color%5B0%5D,%0A
@@ -3877,16 +3877,56 @@
color=
+evalcolor if evalcolor is not None else
color%5B1%5D
|
e0e23391d0ec035926644cfb7d1e7b1f43f35cf7
|
Avoid the key error
|
get_timeline.py
|
get_timeline.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from configparser import ConfigParser
import datetime
import json
import os
import sys
from requests_oauthlib import OAuth1Session
from logging import FileHandler, Formatter, getLogger, DEBUG
config = ConfigParser()
config.read(os.path.abspath(os.path.dirname(__file__)) + '/config.ini')
tw_consumer_key = config.get('DEFAULT', 'tw_consumer_key')
tw_consumer_secret = config.get('DEFAULT', 'tw_consumer_secret')
tw_access_token = config.get('DEFAULT', 'tw_access_token')
tw_access_token_secret = config.get('DEFAULT', 'tw_access_token_secret')
max_parsed_id_file = config.get('DEFAULT', 'max_parsed_id_file')
timeline_json_file = config.get('DEFAULT', 'timeline_json_file')
log_file = config.get('DEFAULT', 'log_file')
handler = FileHandler(filename=log_file)
form = Formatter(
fmt='%(asctime)s %(levelname)s %(module)s "%(message)s"')
handler.setFormatter(form)
logger = getLogger(__name__)
logger.setLevel(DEBUG)
logger.addHandler(handler)
def get_max_parsed_id():
try:
with open(max_parsed_id_file, 'r') as file:
id_str = file.read()
except FileNotFoundError:
id_str = '0'
if len(id_str) == 0:
id_str = '0'
return int(id_str)
def put_max_parsed_id(id):
with open(max_parsed_id_file, 'w') as file:
file.write('{0}\n'.format(id))
def get_timeline():
url = 'https://api.twitter.com/1.1/statuses/home_timeline.json'
params = {'count': 200}
max_parsed_id = get_max_parsed_id()
if max_parsed_id > 0:
params.update({'since_id': max_parsed_id})
twitter = OAuth1Session(tw_consumer_key,
tw_consumer_secret,
tw_access_token,
tw_access_token_secret)
req = twitter.get(url, params=params)
if req.status_code == 200:
current_tl_json = req.text
timeline = json.loads(current_tl_json)
tweets_count = len(timeline)
if tweets_count > 0:
top = timeline[0]
max_parsed_id = top['id']
try:
with open(timeline_json_file, 'r') as file:
old_tl_json = file.read()
new_tl_json = current_tl_json[:-1] + ',' + old_tl_json[1:]
except FileNotFoundError:
new_tl_json = current_tl_json
with open(timeline_json_file, 'w') as file:
file.write(new_tl_json.strip())
put_max_parsed_id(max_parsed_id)
limit = req.headers['x-rate-limit-remaining']
reset = req.headers['x-rate-limit-reset']
utc = datetime.datetime.utcfromtimestamp(int(reset))
logger.info('Number of tweets: {0}'.format(tweets_count))
logger.info('API remain: {0}'.format(limit))
logger.info(
'API reset: {0:%a, %d %b %Y %H:%M:%S +0000}'.format(utc))
return True
else:
logger.info('Number of tweets: {0}'.format(tweets_count))
return True
else:
logger.info('HTTP Status Code: {0}'.format(req.status_code))
return False
def main():
get_timeline()
if __name__ == '__main__':
main()
sys.exit(0)
|
Python
| 0.999999
|
@@ -2522,27 +2522,80 @@
l
-imit = req.headers%5B
+ogger.info('Number of tweets: %7B0%7D'.format(tweets_count))%0A if
'x-r
@@ -2614,18 +2614,35 @@
maining'
-%5D%0A
+ in req.headers:%0A
@@ -2645,20 +2645,22 @@
-rese
+ limi
t = req.
@@ -2683,19 +2683,23 @@
limit-re
-set
+maining
'%5D%0A
@@ -2709,188 +2709,240 @@
-utc = datetime.datetime.utcfromtimestamp(int(reset))%0A logger.info('Number of tweets: %7B0%7D'.format(tweets_count))%0A logger.info('API remain: %7B0%7D'.format(limit))%0A
+ logger.info('API remain: %7B0%7D'.format(limit))%0A if 'x-rate-limit-reset' in req.headers:%0A reset = req.headers%5B'x-rate-limit-reset'%5D%0A utc = datetime.datetime.utcfromtimestamp(int(reset))%0A
@@ -2962,16 +2962,20 @@
r.info(%0A
+
@@ -3040,16 +3040,17 @@
t(utc))%0A
+%0A
|
f63ddd4c4e98322fcff651aefb298f7724dc9bff
|
Add help text, rename 'history' to graph
|
basil.py
|
basil.py
|
import subprocess
import time
import tempfile
last_watered = 0
COOLDOWN = 60
def basilcmd(cmds):
output = subprocess.check_output(['ssh', 'rrpi', './basilbot/cli.py', *cmds], stderr=subprocess.STDOUT)
return output
def moisture():
output = basilcmd(['moisture'])
return 'Soil moisture content: ' + output.decode().strip() + '%'
def history(num=12):
output = basilcmd(['history', num])
return '```' + output.decode().strip() + '```'
def water(runtime):
global last_watered, COOLDOWN
dt = time.time() - last_watered
if runtime <= 0:
return "Nice try, you won't fool me with that one again."
if runtime > 60:
return 'Please only water me between 0 and 60 seconds.'
if dt < COOLDOWN:
return 'I was watered %d second(s) ago, but you may tend to me again in a mere %d second(s)' % (int(dt), int(COOLDOWN - dt))
else:
output = basilcmd(['water', str(runtime)])
if output.decode().strip() == 'OK':
last_watered = time.time()
return str(runtime) + " seconds of S i p p"
else:
return "Hydration subsystem reported error: " + output.decode().strip()
def ghistory(samples):
data = basilcmd(['raw_history', str(samples)])
image = tempfile.NamedTemporaryFile(delete=False)
subprocess.run(['gnuplot', 'basil_history.gnuplot'], stdout=image, input=data)
image.close()
return image.name
|
Python
| 0.999822
|
@@ -72,17 +72,50 @@
WN = 60%0A
-%0A
+WATER_MAX_SECS = 60%0A%0AHELPTEXT = %7B%7D
%0A%0Adef ba
@@ -255,16 +255,85 @@
output%0A%0A
+HELPTEXT%5B'moisture'%5D = 'Check the instantaneous moisture of the pot'%0A
def mois
@@ -446,16 +446,101 @@
+ '%25'%0A%0A
+HELPTEXT%5B'history %5BN%5D'%5D = 'Print %5BN%5D of the automatic hourly moisture measurements.'%0A
def hist
@@ -644,16 +644,79 @@
'%60%60%60'%0A%0A
+HELPTEXT%5B'water %5Btime%5D'%5D = 'Dispense water for %5Btime%5D seconds'%0A
def wate
@@ -760,16 +760,32 @@
COOLDOWN
+, WATER_MAX_SECS
%0A dt
@@ -917,18 +917,25 @@
ntime %3E
-60
+WATER_MAX
:%0A
@@ -943,17 +943,17 @@
return
-'
+%22
Please o
@@ -979,18 +979,18 @@
n 0 and
-60
+%25d
seconds
@@ -990,17 +990,34 @@
seconds.
-'
+%22 %25 WATER_MAX_SECS
%0A if
@@ -1046,17 +1046,17 @@
return
-'
+%22
I was wa
@@ -1130,17 +1130,17 @@
econd(s)
-'
+%22
%25 (int(
@@ -1467,20 +1467,100 @@
()%0A%0A
-def ghistory
+HELPTEXT%5B'graph %5BN%5D'%5D = 'Graph %5BN%5D of the automatic hourly moisture measurements.'%0Adef graph
(sam
@@ -1794,8 +1794,307 @@
ge.name%0A
+%0A%0AHELPTEXT%5B'help %5Bcommand%5D'%5D = 'Get detailed help for %5Bcommand%5D'%0Adef help(cmd):%0A str = ''%0A try:%0A str += '!basil %25s: %25s%5Cn' %25 (cmd, HELPTEXT%5Bcmd%5D)%0A except KeyError:%0A str += 'Basil commands:%5Cn'%0A for text in HELPTEXT:%0A str += '!basil %25s%5Cn' %25 text%0A return str%0A
|
6fc945ba01ec4a99cef3b2e1491646095e506f72
|
debug output
|
autobahn/autobahn/wamp/router.py
|
autobahn/autobahn/wamp/router.py
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import message
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.broker import Broker
from autobahn.wamp.dealer import Dealer
from autobahn.wamp.interfaces import IRouter, IRouterFactory
class Router:
"""
Basic WAMP router.
This class implements :class:`autobahn.wamp.interfaces.IRouter`.
"""
def __init__(self, factory, realm, options = None):
"""
Ctor.
:param factory: The router factory this router was created by.
:type factory: Object that implements :class:`autobahn.wamp.interfaces.IRouterFactory`..
:param realm: The realm this router is working for.
:type realm: str
:param options: Router options.
:type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
"""
self.factory = factory
self.realm = realm
self._options = options or types.RouterOptions()
self._broker = Broker(realm, self._options)
self._dealer = Dealer(realm, self._options)
self._attached = 0
def attach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IRouter.attach`
"""
self._broker.attach(session)
self._dealer.attach(session)
self._attached += 1
return [self._broker._role_features, self._dealer._role_features]
def detach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IRouter.detach`
"""
self._broker.detach(session)
self._dealer.detach(session)
self._attached -= 1
if not self._attached:
self.factory.onLastDetach(self)
def process(self, session, msg):
"""
Implements :func:`autobahn.wamp.interfaces.IRouter.process`
"""
## Broker
##
if isinstance(msg, message.Publish):
self._broker.processPublish(session, msg)
elif isinstance(msg, message.Subscribe):
self._broker.processSubscribe(session, msg)
elif isinstance(msg, message.Unsubscribe):
self._broker.processUnsubscribe(session, msg)
## Dealer
##
elif isinstance(msg, message.Register):
self._dealer.processRegister(session, msg)
elif isinstance(msg, message.Unregister):
self._dealer.processUnregister(session, msg)
elif isinstance(msg, message.Call):
self._dealer.processCall(session, msg)
elif isinstance(msg, message.Cancel):
self._dealer.processCancel(session, msg)
elif isinstance(msg, message.Yield):
self._dealer.processYield(session, msg)
elif isinstance(msg, message.Error) and msg.request_type == message.Invocation.MESSAGE_TYPE:
self._dealer.processInvocationError(session, msg)
else:
raise ProtocolError("Unexpected message {}".format(msg.__class__))
IRouter.register(Router)
class RouterFactory:
"""
Basic WAMP Router factory.
This class implements :class:`autobahn.wamp.interfaces.IRouterFactory`.
"""
def __init__(self, options = None, debug = False):
"""
Ctor.
:param options: Default router options.
:type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
"""
self._routers = {}
self.debug = debug
self._options = options or types.RouterOptions()
def get(self, realm):
"""
Implements :func:`autobahn.wamp.interfaces.IRouterFactory.get`
"""
if not realm in self._routers:
self._routers[realm] = Router(self, realm, self._options)
if self.debug:
print("Router created for realm '{}'".format(realm))
return self._routers[realm]
def onLastDetach(self, router):
assert(router.realm in self._routers)
del self._routers[router.realm]
if self.debug:
print("Router destroyed for realm '{}'".format(router.realm))
IRouterFactory.register(RouterFactory)
|
Python
| 0.000011
|
@@ -1658,32 +1658,56 @@
ons%60.%0A %22%22%22%0A
+ self.debug = True%0A
self.facto
@@ -2613,24 +2613,84 @@
%60%0A %22%22%22%0A
+ if self.debug:%0A print(%22Router.process%22, msg)%0A%0A
## Bro
|
4118f51ade43d3748bc71f8502de0c43a8c3e39e
|
disable router debug
|
autobahn/autobahn/wamp/router.py
|
autobahn/autobahn/wamp/router.py
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import message
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.broker import Broker
from autobahn.wamp.dealer import Dealer
from autobahn.wamp.interfaces import IRouter, IRouterFactory
class Router:
"""
Basic WAMP router.
This class implements :class:`autobahn.wamp.interfaces.IRouter`.
"""
def __init__(self, factory, realm, options = None):
"""
Ctor.
:param factory: The router factory this router was created by.
:type factory: Object that implements :class:`autobahn.wamp.interfaces.IRouterFactory`..
:param realm: The realm this router is working for.
:type realm: str
:param options: Router options.
:type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
"""
self.debug = True
self.factory = factory
self.realm = realm
self._options = options or types.RouterOptions()
self._broker = Broker(realm, self._options)
self._dealer = Dealer(realm, self._options)
self._attached = 0
def attach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IRouter.attach`
"""
self._broker.attach(session)
self._dealer.attach(session)
self._attached += 1
return [self._broker._role_features, self._dealer._role_features]
def detach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IRouter.detach`
"""
self._broker.detach(session)
self._dealer.detach(session)
self._attached -= 1
if not self._attached:
self.factory.onLastDetach(self)
def process(self, session, msg):
"""
Implements :func:`autobahn.wamp.interfaces.IRouter.process`
"""
if self.debug:
print("Router.process: {}".format(msg))
## Broker
##
if isinstance(msg, message.Publish):
self._broker.processPublish(session, msg)
elif isinstance(msg, message.Subscribe):
self._broker.processSubscribe(session, msg)
elif isinstance(msg, message.Unsubscribe):
self._broker.processUnsubscribe(session, msg)
## Dealer
##
elif isinstance(msg, message.Register):
self._dealer.processRegister(session, msg)
elif isinstance(msg, message.Unregister):
self._dealer.processUnregister(session, msg)
elif isinstance(msg, message.Call):
self._dealer.processCall(session, msg)
elif isinstance(msg, message.Cancel):
self._dealer.processCancel(session, msg)
elif isinstance(msg, message.Yield):
self._dealer.processYield(session, msg)
elif isinstance(msg, message.Error) and msg.request_type == message.Invocation.MESSAGE_TYPE:
self._dealer.processInvocationError(session, msg)
else:
raise ProtocolError("Unexpected message {}".format(msg.__class__))
IRouter.register(Router)
class RouterFactory:
"""
Basic WAMP Router factory.
This class implements :class:`autobahn.wamp.interfaces.IRouterFactory`.
"""
def __init__(self, options = None, debug = False):
"""
Ctor.
:param options: Default router options.
:type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
"""
self._routers = {}
self.debug = debug
self._options = options or types.RouterOptions()
def get(self, realm):
"""
Implements :func:`autobahn.wamp.interfaces.IRouterFactory.get`
"""
if not realm in self._routers:
self._routers[realm] = Router(self, realm, self._options)
if self.debug:
print("Router created for realm '{}'".format(realm))
return self._routers[realm]
def onLastDetach(self, router):
assert(router.realm in self._routers)
del self._routers[router.realm]
if self.debug:
print("Router destroyed for realm '{}'".format(router.realm))
IRouterFactory.register(RouterFactory)
|
Python
| 0
|
@@ -1689,11 +1689,12 @@
g =
-Tru
+Fals
e%0A
|
6c3ba0617575d2d178c2f1a2632cea5f5ba09d5f
|
Revert "Updated License"
|
grabLocation.py
|
grabLocation.py
|
from geopy.geocoders import GoogleV3
from geopy.exc import GeocoderTimedOut
import time # For rate limiting purposes
# Using Geopy copyright 2006-2016 geopy authors
# Geopy available at https://github.com/geopy/geopy
# This program is copyright 2017 Joseph Johaneman
# And is released under the MIT License
# What this does: this program reads in a list of addresses of
# school names and prints out a tab separated list of schools name
# latitude and longitude. Obviously, this can be redirected to
# a text file by redirecting standard output
# Create the Google Maps API Object. Note you need an API Key
googleLocator=GoogleV3(api_key='<Your Google Map Geoencode API Key>')
# First we need the list of schools
filename="SchoolList.txt"
# Create a list to score the School Names loaded them from a file
with open(filename) as f:
schools=f.read().splitlines()
# print header
print "School Name\tLatitude\tLongitude"
# Loop through the school names and get locations
for i in schools:
try: #Exception handling is important!
location=googleLocator.geocode(i, exactly_one=True)
except GeocoderTimedOut: # in case we time out:
print i, "\t0\t-1" # print 0, -1. We'll check for it later
else: # Okay we didn't time out
if location != None: # if we find something
print i, "\t", location.latitude, "\t", location.longitude #print it
else: # Didn't find it. Print zeroes
print i, "\t0\t0" # otherwise print 0s. We'll check for it later
time.sleep(.3) # This waits 300 milliseconds between requests to be nice
# Note: I chose to print 0, -1 for timeouts and 0, 0 for not found so I'd know
# how many exceptions were thrown.
|
Python
| 0
|
@@ -297,11 +297,20 @@
the
-MIT
+3 Clause BSD
Lic
|
1cb55e4feec981efdf629b01ed7508f825b6c2c0
|
add comment TODO
|
StockIndicators/StockIndicators.py
|
StockIndicators/StockIndicators.py
|
#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
Python
| 0
|
@@ -90,16 +90,75 @@
ame__)%0A%0A
+#TODO: probar a meter token de seguridad a ver se funciona%0A
@api_si.
|
2a15c18b5d29672fc309a17fd5440cb02619f0c6
|
Change the method's docstring to actually fit the code.
|
uniformdh.py
|
uniformdh.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module implements a class to deal with Uniform Diffie-Hellman handshakes.
The class `UniformDH' is used by the server as well as by the client to handle
the Uniform Diffie-Hellman handshake used by ScrambleSuit.
"""
import const
import random
import util
import mycrypto
import obfsproxy.transports.obfs3_dh as obfs3_dh
import obfsproxy.transports.base as base
import obfsproxy.common.log as logging
log = logging.get_obfslogger()
class UniformDH( object ):
"""
Provide methods to deal with Uniform Diffie-Hellman handshakes.
The class provides methods to extract public keys and to generate public
keys wrapped in a valid UniformDH handshake.
"""
def __init__( self, sharedSecret, weAreServer ):
"""
Initialise a UniformDH object.
"""
# `True' if we are the server; `False' otherwise.
self.weAreServer = weAreServer
# The shared UniformDH secret.
self.sharedSecret = sharedSecret
# Cache a UniformDH public key until it's added to the replay table.
self.remotePublicKey = None
# Uniform Diffie-Hellman object (implemented in obfs3_dh.py).
self.udh = None
def getRemotePublicKey( self ):
"""
Return the cached remote UniformDH public key.
"""
return self.remotePublicKey
def receivePublicKey( self, data, callback ):
"""
Extract the public key and invoke a callback with the master secret.
First, the UniformDH public key is extracted out of `data'. Then, the
shared master secret is computed and `callback' is invoked with the
master secret as argument. If any of this fails, `False' is returned.
"""
# Extract the public key sent by the remote host.
remotePublicKey = self._extractPublicKey(data)
if not remotePublicKey:
return False
if self.weAreServer:
self.remotePublicKey = remotePublicKey
# As server, we need a DH object; as client, we already have one.
self.udh = obfs3_dh.UniformDH()
assert self.udh is not None
try:
masterKey = self.udh.get_secret(remotePublicKey)
except ValueError:
raise base.PluggableTransportError("Corrupted public key.")
# Truncate remainder of 1536-bit UniformDH group.
masterKey = masterKey[:const.MASTER_KEY_LENGTH]
# Derive the session keys from the newly obtained master key.
callback(masterKey)
return True
def _extractPublicKey( self, data ):
"""
Extract and return a UniformDH public key out of `data'.
Before the public key is touched, the HMAC is verified. If the HMAC is
invalid or some other error occurs, `False' is returned. Otherwise,
the public key is returned. The extracted data is finally drained from
the given `data' object.
"""
assert self.sharedSecret is not None
# Do we already have the minimum amount of data?
if len(data) < (const.PUBLIC_KEY_LENGTH + const.MARKER_LENGTH +
const.HMAC_LENGTH):
return False
log.debug("Attempting to extract UniformDH public key out of %d bytes "
"of data." % len(data))
handshake = data.peek()
# First, find the marker to efficiently locate the HMAC.
publicKey = handshake[:const.PUBLIC_KEY_LENGTH]
marker = mycrypto.HMAC_SHA256_128(self.sharedSecret,
self.sharedSecret + publicKey)
index = util.locateMarker(marker, handshake)
if not index:
return False
# Now that we know where the authenticating HMAC is: verify it.
hmacStart = index + const.MARKER_LENGTH
existingHMAC = handshake[hmacStart : (hmacStart + const.HMAC_LENGTH)]
myHMAC = mycrypto.HMAC_SHA256_128(self.sharedSecret,
handshake[0 : hmacStart] +
util.getEpoch())
if not util.isValidHMAC(myHMAC, existingHMAC):
return False
data.drain(index + const.MARKER_LENGTH + const.HMAC_LENGTH)
return handshake[:const.PUBLIC_KEY_LENGTH]
def createHandshake( self ):
"""
Create and return a ready-to-be-sent UniformDH handshake.
The returned handshake data includes the public key, pseudo-random
padding, the marker and the HMAC. If `publicKey' is not given (which
is the case for the client), a new public key is created using the
Diffie-Hellman object.
"""
assert self.sharedSecret is not None
log.debug("Creating UniformDH handshake message.")
if self.udh is None:
self.udh = obfs3_dh.UniformDH()
publicKey = self.udh.get_public()
# Subtract the length of the public key to make the handshake on
# average as long as a redeemed ticket.
padding = mycrypto.weakRandom(random.randint(0,
const.MAX_PADDING_LENGTH -
const.PUBLIC_KEY_LENGTH))
# Add a marker which enables efficient location of the HMAC.
marker = mycrypto.HMAC_SHA256_128(self.sharedSecret,
self.sharedSecret + publicKey)
# Authenticate the handshake including the current approximate epoch.
mac = mycrypto.HMAC_SHA256_128(self.sharedSecret,
publicKey + padding + marker +
util.getEpoch())
return publicKey + padding + marker + mac
# Alias class name in order to provide a more intuitive API.
new = UniformDH
|
Python
| 0
|
@@ -4578,144 +4578,90 @@
If
-%60publicKey' is not given (which%0A is the case for the client), a new public key is created using the%0A Diffie-Hellman object
+a UniformDH object has not been%0A initialised yet, a new instance is created
.%0A
|
13a5e797bf3c268ae42dda79c75959ca0602096f
|
Update unit tests script.
|
unit_test.py
|
unit_test.py
|
#!/usr/bin/python3
import glob
import re
import subprocess
import os
import os.path
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
test_cases = sorted(glob.glob("models/TestCase*.blend"), key=natural_keys)
if not os.path.exists("test_results"):
os.makedirs("test_results")
num_samples = 10000
def run_test(test_case, technique, beta):
output = os.path.join("test_results", os.path.basename(test_case[:-6]) + "." + technique + str(beta) + ".exr")
command = ["master", test_case, "--" + technique, "--parallel", "--beta=" + str(beta), "--output=" + output, "--num-samples=" + str(num_samples), "--batch"]
print(" ".join(command))
subprocess.run(command)
for test_case in test_cases:
for technique in ["PT", "BPT", "UPG"]:
for beta in [0, 1, 2]:
run_test(test_case, technique, beta)
|
Python
| 0
|
@@ -617,17 +617,20 @@
beta):%0A
-%09
+
output =
@@ -732,17 +732,59 @@
%22.exr%22)%0A
-%09
+%0A if not os.path.exists(output):%0A
command
@@ -932,17 +932,23 @@
batch%22%5D%0A
-%09
+
print(%22
@@ -964,17 +964,23 @@
mmand))%0A
-%09
+
subproce
@@ -1025,17 +1025,20 @@
_cases:%0A
-%09
+
for tech
@@ -1068,18 +1068,24 @@
%22UPG%22%5D:%0A
-%09%09
+
for beta
@@ -1103,11 +1103,20 @@
2%5D:%0A
-%09%09%09
+
run_
|
7ccb8d443fe3dda236d05ff4ed13e067a6893872
|
create a changeset against local before changing local; we'll use this later
|
updatecmd.py
|
updatecmd.py
|
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import changeset
import os
import sys
import util
import versions
def doUpdate(repos, db, cfg, pkg, versionStr = None):
cs = None
if not os.path.exists(cfg.root):
util.mkdirChain(cfg.root)
if os.path.exists(pkg):
# there is a file, try to read it as a changeset file
if versionStr:
sys.stderr.write("Verison should not be specified when a SRS "
"change set is being installed.\n")
return 1
try:
cs = changeset.ChangeSetFromFile(pkg)
except KeyError:
# invalid changeset file
pass
else:
if cs.isAbstract():
newcs = db.rootChangeSet(cs, cfg.defaultbranch)
if newcs:
cs = newcs
if not cs:
# so far no changeset (either the path didn't exist or we could not
# read it
if pkg and pkg[0] != ":":
pkg = cfg.packagenamespace + ":" + pkg
if versionStr and versionStr[0] != "/":
versionStr = cfg.defaultbranch.asString() + "/" + versionStr
if versionStr:
newVersion = versions.VersionFromString(versionStr)
else:
newVersion = None
list = []
bail = 0
for pkgName in repos.getPackageList(pkg):
if not newVersion:
newVersion = repos.pkgLatestVersion(pkgName, cfg.defaultbranch)
if not repos.hasPackageVersion(pkgName, newVersion):
sys.stderr.write("package %s does not contain version %s\n" %
(pkgName, newVersion.asString()))
bail = 1
else:
if db.hasPackage(pkgName):
currentVersion = db.pkgLatestVersion(pkgName,
newVersion.branch())
else:
currentVersion = None
list.append((pkgName, currentVersion, newVersion))
if bail:
return
if not list:
sys.stderr.write("repository does not contain a package called %s\n" % pkg)
return
cs = repos.createChangeSet(list)
if cs.isAbstract():
db.commitChangeSet(cfg.sourcepath, cs, eraseOld = 0)
else:
inverse = cs.invert(db)
db.addRollback(inverse)
db.commitChangeSet(cfg.sourcepath, cs, eraseOld = 1)
|
Python
| 0
|
@@ -809,16 +809,95 @@
newcs%0A%0A
+%09 list = %5B%5D%0A%09 list = map(lambda x: list.append(x), cs.getPackageList())%0A%0A
if n
@@ -2011,16 +2011,257 @@
(list)%0A%0A
+%09# permute the list into a list of just package names%0A%09list = map(lambda x: x%5B0%5D, list)%0A%0A # create a change set between what is in the database and what is%0A # on the disk%0A localChanges = changeset.CreateAgainstLocal(cfg, db, list)%0A%0A
if c
|
e28b900e1a9ac407f7a32af9983daace7b225758
|
Fix typing in get_processors helper (FieldABC instance, not BaseField)
|
aiohttp_json_api/helpers.py
|
aiohttp_json_api/helpers.py
|
"""Helpers."""
import inspect
from collections import Iterable, Mapping
from typing import Optional
from aiohttp import web
from .fields.base import BaseField
from .fields.decorators import Tag
from .typings import Callee
from .common import JSONAPI
def is_generator(obj):
"""Return True if ``obj`` is a generator."""
return inspect.isgeneratorfunction(obj) or inspect.isgenerator(obj)
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (
(isinstance(obj, Iterable) and not hasattr(obj, "strip")) or
is_generator(obj)
)
def is_indexable_but_not_string(obj):
"""Return True if ``obj`` is indexable but isn't a string."""
return not hasattr(obj, "strip") and hasattr(obj, "__getitem__")
def is_collection(obj, exclude=()):
"""Return True if ``obj`` is a collection type."""
return (not isinstance(obj, (Mapping,) + exclude) and
is_iterable_but_not_string(obj))
def ensure_collection(value, exclude=()):
"""Ensure value is collection."""
return value if is_collection(value, exclude=exclude) else (value,)
def first(iterable, default=None, key=None):
"""
Return first element of *iterable*.
Return first element of *iterable* that evaluates to ``True``, else
return ``None`` or optional *default*.
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional *key* argument specifies a one-argument predicate function
like that used for *filter()*. The *key* argument, if supplied, should be
in keyword form. For example, finding the first even number in an iterable:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
Contributed by Hynek Schlawack, author of `the original standalone module`_
.. _the original standalone module: https://github.com/hynek/first
"""
return next(filter(key, iterable), default)
def make_sentinel(name='_MISSING', var_name=None):
"""
Create sentinel instance.
Creates and returns a new **instance** of a new class, suitable for
usage as a "sentinel", a kind of singleton often used to indicate
a value is missing when ``None`` is a valid input.
>>> make_sentinel(var_name='_MISSING')
_MISSING
The most common use cases here in project are as default values
for optional function arguments, partly because of its
less-confusing appearance in automatically generated
documentation. Sentinels also function well as placeholders in queues
and linked lists.
.. note::
By design, additional calls to ``make_sentinel`` with the same
values will not produce equivalent objects.
>>> make_sentinel('TEST') == make_sentinel('TEST')
False
>>> type(make_sentinel('TEST')) == type(make_sentinel('TEST'))
False
:arg str name:
Name of the Sentinel
:arg str var_name:
Set this name to the name of the variable in its respective
module enable pickleability.
"""
class Sentinel(object):
def __init__(self):
self.name = name
self.var_name = var_name
def __repr__(self):
if self.var_name:
return self.var_name
return '%s(%r)' % (self.__class__.__name__, self.name)
if var_name:
def __reduce__(self):
return self.var_name
def __nonzero__(self):
return False
__bool__ = __nonzero__
return Sentinel()
def get_router_resource(app: web.Application, resource: str):
"""Return route of JSON API application for resource."""
return app.router[
'{}.{}'.format(app[JSONAPI]['routes_namespace'], resource)
]
def get_processors(obj, tag: Tag, field: BaseField,
default: Optional[Callee] = None):
has_processors = getattr(obj, '_has_processors', False)
if has_processors:
processor_tag = tag, field.key
processors = obj.__processors__.get(processor_tag)
if processors:
for processor_name in processors:
processor = getattr(obj, processor_name)
processor_kwargs = \
processor.__processing_kwargs__.get(processor_tag)
yield processor, processor_kwargs
return
if not callable(default):
return
yield default, {}
MISSING = make_sentinel()
|
Python
| 0
|
@@ -129,28 +129,26 @@
rom
+.abc
.field
-s.base
import
Base
@@ -143,25 +143,24 @@
import
-Base
Field
+ABC
%0Afrom .f
@@ -4013,17 +4013,16 @@
ld:
-Base
Field
+ABC
,%0A
|
7d92b89d7a9e0c950bde96632447d7cd0486f933
|
Update api to use backend datastore
|
utils/api.py
|
utils/api.py
|
#!/usr/bin/env python
'''
THIS APP IS NOT PRODUCTION READY!! DO NOT USE!
Flask app that provides a RESTful API to
the multiscanner.
Proposed supported operations:
GET / ---> Test functionality. {'Message': 'True'}
GET /api/v1/tasks/list ---> Receive list of tasks in multiscanner
GET /api/v1/tasks/list/<task_id> ---> receive task in JSON format
GET /api/v1/reports/list/<report_id> ---> receive report in JSON
GET /api/v1/reports/delete/<report_id> ----> delete report_id
POST /api/v1/tasks/create ---> POST file and receive report id
Sample POST usage:
curl -i -X POST http://localhost:8080/api/v1/tasks/create/ -F file=@/bin/ls
TODO:
* Add a backend DB to store reports
* Make this app agnostic to choice of backend DB
* Add doc strings to functions
'''
import os
import sys
import uuid
from flask import Flask, jsonify, make_response, request, abort
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.join(MS_WD, 'storage') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'storage'))
import sqlite_driver as database
TASK_NOT_FOUND = {'Message': 'No task with that ID not found!'}
INVALID_REQUEST = {'Message': 'Invalid request parameters'}
UPLOAD_FOLDER = 'tmp/'
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
app = Flask(__name__)
db = database.Database()
@app.errorhandler(HTTP_BAD_REQUEST)
def invalid_request(error):
'''Return a 400 with the INVALID_REQUEST message.'''
return make_response(jsonify(INVALID_REQUEST), HTTP_BAD_REQUEST)
@app.errorhandler(HTTP_NOT_FOUND)
def not_found(error):
'''Return a 404 with a TASK_NOT_FOUND message.'''
return make_response(jsonify(TASK_NOT_FOUND), HTTP_NOT_FOUND)
@app.route('/')
def index():
'''
Return a default standard message
for testing connectivity.
'''
return jsonify({'Message': 'True'})
@app.route('/api/v1/tasks/list/', methods=['GET'])
def task_list():
'''
Return a JSON dictionary containing all the tasks
in the DB.
'''
db = database.Database()
return jsonify({'Tasks': db.get_all_tasks()})
@app.route('/api/v1/tasks/list/<int:task_id>', methods=['GET'])
def get_task(task_id):
'''
Return a JSON dictionary corresponding
to the given task ID.
'''
task = db.get_task(task_id)
if task:
return jsonify({'Task': task})
else:
abort(HTTP_NOT_FOUND)
@app.route('/api/v1/tasks/delete/<int:task_id>', methods=['GET'])
def delete_task(task_id):
'''
Delete the specified task. Return deleted message.
'''
result = db.delete_task(task_id)
if not result:
abort(HTTP_NOT_FOUND)
return jsonify({'Message': 'Deleted'})
@app.route('/api/v1/tasks/create/', methods=['POST'])
def create_task():
'''
Create a new task. Save the submitted file
to UPLOAD_FOLDER. Return task id and 201 status.
'''
file_ = request.files['file']
extension = os.path.splitext(file_.filename)[1]
f_name = str(uuid.uuid4()) + extension
file_path = os.path.join(UPLOAD_FOLDER, f_name)
file_.save(file_path)
# TODO: run multiscan on the file, have it update the
# DB when done
# output = multiscanner.multiscan([file_path])
# report = multiscanner.parseReports
task_id = db.add_task()
return make_response(
jsonify({'Message': {'task_id': task_id}}),
HTTP_CREATED
)
@app.route('/api/v1/reports/list/<int:report_id>', methods=['GET'])
def get_report(report_id):
'''
Return a JSON dictionary corresponding
to the given report ID.
'''
report = [report for report in REPORTS if report['report_id'] == report_id]
if len(report) == 0:
abort(HTTP_NOT_FOUND)
return jsonify({'Report': report[0]})
@app.route('/api/v1/reports/delete/<int:report_id>', methods=['GET'])
def delete_report(report_id):
'''
Delete the specified report. Return deleted message.
'''
report = [report for report in REPORTS if report['report_id'] == report_id]
if len(report) == 0:
abort(HTTP_NOT_FOUND)
REPORTS.remove(report[0])
return jsonify({'Message': 'Deleted'})
if __name__ == '__main__':
db.init_sqlite_db()
if not os.path.isdir(UPLOAD_FOLDER):
print 'Creating upload dir'
os.makedirs(UPLOAD_FOLDER)
app.run(host='0.0.0.0', port=8080, debug=True)
|
Python
| 0
|
@@ -1065,16 +1065,44 @@
atabase%0A
+from storage import Storage%0A
%0A%0ATASK_N
@@ -1368,16 +1368,48 @@
abase()%0A
+db_store = Storage.get_storage()
%0A%0A@app.e
@@ -3655,64 +3655,28 @@
t =
-%5Breport for report in REPORTS if report%5B'report_id'%5D ==
+db_store.get_report(
repo
@@ -3676,33 +3676,33 @@
rt(report_id
-%5D
+)
%0A if
len(report)
@@ -3693,56 +3693,20 @@
if
-len(
report
-) == 0
:%0A
- abort(HTTP_NOT_FOUND)%0A
@@ -3741,12 +3741,49 @@
port
-%5B0%5D%7D
+%7D)%0A else:%0A abort(HTTP_NOT_FOUND
)%0A%0A%0A
@@ -3963,169 +3963,43 @@
-report = %5Breport for report in REPORTS if report%5B'report_id'%5D == report_id%5D%0A if len(report) == 0:%0A abort(HTTP_NOT_FOUND)%0A REPORTS.remove(report%5B0%5D)%0A
+if db_store.delete(report_id):%0A
@@ -4037,16 +4037,56 @@
eted'%7D)%0A
+ else:%0A abort(HTTP_NOT_FOUND)%0A
%0A%0Aif __n
|
6aea68d6c1de498583c42839a3a31ef25f51e17e
|
Complete alg_breadth_first_search.py
|
alg_breadth_first_search.py
|
alg_breadth_first_search.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def bfs():
pass
def main():
# Small word ladder graph.
graph_adj_d = {
'fool': {'cool', 'pool', 'foil', 'foul'},
'foul': {'fool', 'foil'},
'foil': {'fool', 'foul', 'fail'},
'cool': {'fool', 'pool'},
'fail': {'foil', 'fall'},
'fall': {'fail', 'pall'},
'pool': {'fool', 'cool', 'poll'},
'poll': {'pool', 'pall', 'pole'},
'pall': {'fall', 'pale', 'poll'},
'pole': {'poll', 'pope', 'pale'},
'pope': {'pole'},
'pale': {'pall', 'pole', 'sale', 'page'},
'sale': {'pale', 'sage'},
'page': {'pale', 'sage'},
'sage': {'sale', 'page'}
}
if __name__ == '__main__':
main()
|
Python
| 0.99995
|
@@ -107,28 +107,507 @@
on%0A%0A
-%0Adef bfs():%0A pass
+import numpy as np%0A%0Adef bfs(graph_adj_d, start_vertex):%0A visit_queue = %5B%5D%0A visit_queue.insert(0, start_vertex)%0A distance_d = %7Bv: np.inf for v in graph_adj_d.keys()%7D%0A distance_d%5Bstart_vertex%5D = 0%0A while visit_queue:%0A v_visit = visit_queue.pop()%0A for v_neighbor in graph_adj_d%5Bv_visit%5D:%0A if np.isinf(distance_d%5Bv_neighbor%5D):%0A visit_queue.insert(0, v_neighbor)%0A distance_d%5Bv_neighbor%5D = distance_d%5Bv_visit%5D + 1%0A return distance_d
%0A%0A%0Ad
@@ -684,17 +684,17 @@
'fool':
-%7B
+%5B
'cool',
@@ -715,17 +715,17 @@
, 'foul'
-%7D
+%5D
,%0A
@@ -730,25 +730,25 @@
'foul':
-%7B
+%5B
'fool', 'foi
@@ -749,17 +749,17 @@
, 'foil'
-%7D
+%5D
,%0A
@@ -764,25 +764,25 @@
'foil':
-%7B
+%5B
'fool', 'fou
@@ -791,17 +791,17 @@
, 'fail'
-%7D
+%5D
,%0A
@@ -806,25 +806,25 @@
'cool':
-%7B
+%5B
'fool', 'poo
@@ -825,17 +825,17 @@
, 'pool'
-%7D
+%5D
,%0A
@@ -844,17 +844,17 @@
'fail':
-%7B
+%5B
'foil',
@@ -855,25 +855,25 @@
oil', 'fall'
-%7D
+%5D
,%0A 'f
@@ -878,17 +878,17 @@
'fall':
-%7B
+%5B
'fail',
@@ -893,17 +893,17 @@
, 'pall'
-%7D
+%5D
,%0A
@@ -912,17 +912,17 @@
'pool':
-%7B
+%5B
'fool',
@@ -927,33 +927,33 @@
, 'cool', 'poll'
-%7D
+%5D
,%0A 'poll'
@@ -954,17 +954,17 @@
'poll':
-%7B
+%5B
'pool',
@@ -973,25 +973,25 @@
all', 'pole'
-%7D
+%5D
,%0A 'p
@@ -996,17 +996,17 @@
'pall':
-%7B
+%5B
'fall',
@@ -1019,17 +1019,17 @@
, 'poll'
-%7D
+%5D
,%0A
@@ -1034,25 +1034,25 @@
'pole':
-%7B
+%5B
'poll', 'pop
@@ -1061,17 +1061,17 @@
, 'pale'
-%7D
+%5D
,%0A
@@ -1084,16 +1084,16 @@
e':
-%7B
+%5B
'pole'
-%7D
+%5D
,%0A
@@ -1102,25 +1102,25 @@
'pale':
-%7B
+%5B
'pall', 'pol
@@ -1133,25 +1133,25 @@
ale', 'page'
-%7D
+%5D
,%0A 's
@@ -1152,25 +1152,25 @@
'sale':
-%7B
+%5B
'pale', 'sag
@@ -1163,33 +1163,33 @@
%5B'pale', 'sage'
-%7D
+%5D
,%0A 'page'
@@ -1190,17 +1190,17 @@
'page':
-%7B
+%5B
'pale',
@@ -1205,17 +1205,17 @@
, 'sage'
-%7D
+%5D
,%0A
@@ -1224,17 +1224,17 @@
'sage':
-%7B
+%5B
'sale',
@@ -1243,16 +1243,249 @@
age'
-%7D %0A %7D
+%5D %0A %7D%0A print('Graph: %7B%7D'.format(graph_adj_d))%0A%0A start_vertex = 'fool'%0A print('Start vertex: %7B%7D'.format(start_vertex))%0A distance_d = bfs(graph_adj_d, start_vertex)%0A print('By BFS, the distance dict is %7B%7D'.format(distance_d))
%0A%0A%0Ai
|
649029d2ad04eb5afd618b40ccd62993d69e389f
|
Complete alg_percentile_selection.py
|
alg_percentile_selection.py
|
alg_percentile_selection.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def select_percentile(a_list, k):
"""Select list's kth percentile.
Just select the kth element, without caring about
the relative ordering of the rest of them.
"""
pass
def main():
pass
if __name__ == '__main__':
main()
|
Python
| 0.998978
|
@@ -103,16 +103,31 @@
nction%0A%0A
+import random%0A%0A
%0Adef sel
@@ -145,14 +145,10 @@
ile(
-a_list
+ls
, k)
@@ -157,36 +157,42 @@
%09%22%22%22
-Select list's kth percentile
+Kth percentile selection algorithm
.%0A%0A
@@ -295,38 +295,931 @@
em.%0A
-%09%22%22%22%0A%09pass%0A%0A%0Adef main():%0A%09pass
+%0A The algorithm performs in place without allocating%0A new memory for the three sublists using three pointers.%0A%09%22%22%22%0A%09v = random.sample(ls, 1)%5B0%5D%0A%09idx_eq_v = %5Bi for i, a in enumerate(ls) if a == v%5D%0A%09idx_le_v = %5Bi for i, a in enumerate(ls) if a %3C v%5D%0A%09idx_ge_v = %5Bi for i, a in enumerate(ls) if a %3E v%5D%0A%0A%09if k %3C= len(idx_le_v):%0A%09%09le_v_ls = %5Bls%5Bidx%5D for idx in idx_le_v%5D%0A%09%09return select_percentile(le_v_ls, k)%0A%09elif len(idx_le_v) %3C k %3C= len(idx_le_v) + len(idx_eq_v):%0A%09%09return v%0A%09elif k %3E len(idx_le_v) + len(idx_eq_v):%0A%09%09ge_v_ls = %5Bls%5Bidx%5D for idx in idx_ge_v%5D%0A%09%09return select_percentile(ge_v_ls, k - len(idx_le_v) - len(idx_eq_v))%0A%0A%0Adef main():%0A%09n = 100%0A%09ls = range(n)%0A%09random.shuffle(ls)%0A%09print('List: %7B%7D'.format(ls))%0A%09print('Get median by selection:')%0A%09print(select_percentile(ls, n // 2))%0A%09print('Get min by selection:')%0A%09print(select_percentile(ls, 1))%0A%09print('Get max by selection:')%0A%09print(select_percentile(ls, n))
%0A%0Aif
|
7de37a5ba8164b757a6a8ed64f80ee379ff7a3ad
|
fix some bugs
|
scripts/extract_computer_and_accessories.py
|
scripts/extract_computer_and_accessories.py
|
from scripts.rake import *
import json
# get review texts aggregated by asin id
def get_rdd(base, input, num_part):
base_dir = os.path.join(base)
input_path = os.path.join(input)
file_name = os.path.join(base_dir, input_path)
# load data
rdd = sc.textFile(file_name, num_part)
rdd_j = rdd.map(json.loads)
rdd_j.cache()
return rdd_j
num_part = 16
revs = get_rdd('data', 'reviews_electronics.json', num_part)
rev_texts = revs.map(lambda x: (x['asin'], x['reviewText']))
rev_agg_texts = rev_texts.map(lambda (asin, text): (asin, [text])).reduceByKey(lambda x, y: x + y)
rev_agg_texts.cache()
prods = get_rdd('data', 'meta_electronics.json', num_part)
categ = prods.map( lambda x: (x.get('asin'), x.get('categories')) )
categ = categ.flapMapValues(lambda x: x)
computers = categ_.filter( lambda (asin, cats): 'Computers & Accessories' in cats )
prods_ = prods_.join(computers)
prods.cache()
# (asin, ([review], (d_prod, [category])) )
items = rev_agg_texts.join(prods_)
items = items.map( lambda (asin, (reviews, (d_prod, categories))): (asin, reviews, d_prod, categories) )
# 1. RAKE: keyword. use rake algorithm to extract keywords and take top 10 keywords from each asin
rake = Rake('data/MergedStopList.txt') # TODO: add more into this list
items_wk = items.map( lambda (asin, reviews, d_prod, categories): (asin, rake.run(' '.join(reviews)), reviews, d_prod, categories) )
# 2. NP: noun phrasee among these keywords
import nltk
from scripts.np_extractor import *
items_wk.cache()
items_np = items_wk.map(lambda (asin, pairs, reviews, d_prod, categories):
(asin, [(NPExtractor(string).extract(), score) for (string, score) in pairs], reviews, d_prod, categories)
)
items_np = items_np.map(lambda (asin, pairs, reviews, d_prod, categories):
(asin, [(toks, scr) for (toks, scr) in pairs if len(toks) > 0], reviews, d_prod, categories)
)
# 3. output
import pandas as pd
df = pd.DataFrame(items_np.collect())
df.to_csv('data/processed/computers_kw.csv')
|
Python
| 0.000008
|
@@ -859,17 +859,17 @@
ateg.fla
-p
+t
MapValue
@@ -900,17 +900,16 @@
= categ
-_
.filter(
@@ -980,17 +980,16 @@
= prods
-_
.join(co
@@ -1002,16 +1002,17 @@
s)%0Aprods
+_
.cache()
@@ -1607,18 +1607,18 @@
wk.cache
-()
+%0A%0A
%0Aitems_n
@@ -1849,16 +1849,18 @@
)%0A
+%0A%0A
items_np
|
f4f9c524838b5bc30bc993cc6ffe3392495dff2d
|
Make sure we patch webbrowser
|
glue/plugins/exporters/plotly/qt/tests/test_exporter.py
|
glue/plugins/exporters/plotly/qt/tests/test_exporter.py
|
from __future__ import absolute_import, division, print_function
import json
import mock
import pytest
import plotly
from plotly.exceptions import PlotlyError
from mock import patch
from glue.tests.helpers import requires_plotly
from glue.core import Data, DataCollection
from glue.app.qt import GlueApplication
from glue.viewers.histogram.qt import HistogramWidget
from glue.plugins.exporters.plotly.export_plotly import build_plotly_call
from .. import QtPlotlyExporter
plotly_sign_in = mock.MagicMock()
plotly_plot = mock.MagicMock()
SIGN_IN_ERROR = """
Aw, snap! You tried to use our API as the user 'BATMAN', but
the supplied API key doesn't match our records. You can view
your API key at plot.ly/settings.
"""
MAX_PRIVATE_ERROR = """
This file cannot be saved as private, your current Plotly account has
filled its quota of private files. You can still save public files, or you can
upgrade your account to save more files privately by visiting your account at
https://plot.ly/settings/subscription. To make a file public in the API, set
the optional argument 'world_readable' to true.
"""
def make_credentials_file(path, username='', api_key=''):
credentials = {}
credentials['username'] = username
credentials['api_key'] = api_key
credentials['proxy_username'] = ''
credentials['proxy_password'] = ''
credentials['stream_ids'] = []
with open(path, 'w') as f:
json.dump(credentials, f)
plotly.files.FILE_CONTENT[path] = credentials
@requires_plotly
class TestQtPlotlyExporter():
def setup_class(self):
data = Data(x=[1, 2, 3], y=[2, 3, 4], label='data')
dc = DataCollection([data])
app = GlueApplication(dc)
data.style.color = '#000000'
v = app.new_data_viewer(HistogramWidget, data=data)
v.component = data.id['y']
v.xmin = 0
v.xmax = 10
v.bins = 20
self.args, self.kwargs = build_plotly_call(app)
def get_exporter(self):
return QtPlotlyExporter(plotly_args=self.args, plotly_kwargs=self.kwargs)
def test_default(self, tmpdir):
credentials_file = tmpdir.join('.credentials').strpath
make_credentials_file(credentials_file)
with patch('plotly.tools.CREDENTIALS_FILE', credentials_file):
exporter = self.get_exporter()
assert exporter.radio_account_glue.isChecked()
assert exporter.radio_sharing_public.isChecked()
assert not exporter.radio_sharing_secret.isEnabled()
assert not exporter.radio_sharing_private.isEnabled()
def test_default_with_credentials(self, tmpdir):
credentials_file = tmpdir.join('.credentials').strpath
make_credentials_file(credentials_file, username='batman', api_key='batmobile')
with patch('plotly.tools.CREDENTIALS_FILE', credentials_file):
exporter = self.get_exporter()
assert exporter.radio_account_config.isChecked()
assert 'username: batman' in exporter.radio_account_config.text()
assert exporter.radio_sharing_secret.isChecked()
assert exporter.radio_sharing_secret.isEnabled()
assert exporter.radio_sharing_private.isEnabled()
def test_toggle_account_sharing(self, tmpdir):
credentials_file = tmpdir.join('.credentials').strpath
make_credentials_file(credentials_file)
with patch('plotly.tools.CREDENTIALS_FILE', credentials_file):
exporter = self.get_exporter()
assert not exporter.radio_sharing_secret.isEnabled()
assert not exporter.radio_sharing_private.isEnabled()
exporter.radio_account_manual.setChecked(True)
assert exporter.radio_sharing_secret.isEnabled()
assert exporter.radio_sharing_private.isEnabled()
exporter.radio_account_glue.setChecked(True)
assert not exporter.radio_sharing_secret.isEnabled()
assert not exporter.radio_sharing_private.isEnabled()
def test_edit_username_toggle_custom(self, tmpdir):
credentials_file = tmpdir.join('.credentials').strpath
make_credentials_file(credentials_file)
with patch('plotly.tools.CREDENTIALS_FILE', credentials_file):
exporter = self.get_exporter()
assert exporter.radio_account_glue.isChecked()
assert not exporter.radio_account_manual.isChecked()
exporter.username = 'a'
assert not exporter.radio_account_glue.isChecked()
assert exporter.radio_account_manual.isChecked()
exporter.radio_account_glue.setChecked(True)
assert exporter.radio_account_glue.isChecked()
assert not exporter.radio_account_manual.isChecked()
exporter.api_key = 'a'
assert not exporter.radio_account_glue.isChecked()
assert exporter.radio_account_manual.isChecked()
def test_accept_default(self, tmpdir):
credentials_file = tmpdir.join('.credentials').strpath
make_credentials_file(credentials_file)
with patch('plotly.tools.CREDENTIALS_FILE', credentials_file):
with patch('plotly.plotly.plot', mock.MagicMock()):
exporter = self.get_exporter()
exporter.accept()
assert exporter.text_status.text() == 'Exporting succeeded'
ERRORS = [
(PlotlyError(SIGN_IN_ERROR), 'Authentication with username batman failed'),
(PlotlyError(MAX_PRIVATE_ERROR), 'Maximum number of private plots reached'),
(PlotlyError('Oh noes!'), 'An unexpected error occurred'),
(TypeError('A banana is not an apple'), 'An unexpected error occurred')
]
@pytest.mark.parametrize(('error', 'status'), ERRORS)
def test_accept_errors(self, tmpdir, error, status):
credentials_file = tmpdir.join('.credentials').strpath
make_credentials_file(credentials_file, username='batman', api_key='batmobile')
plot = mock.MagicMock(side_effect=error)
with patch('plotly.tools.CREDENTIALS_FILE', credentials_file):
with patch('plotly.plotly.plot', plot):
exporter = self.get_exporter()
exporter.accept()
assert exporter.text_status.text() == status
@pytest.mark.parametrize(('error', 'status'), ERRORS)
def test_fix_url(self, tmpdir, error, status):
credentials_file = tmpdir.join('.credentials').strpath
make_credentials_file(credentials_file, username='batman', api_key='batmobile')
plot = mock.MagicMock(return_value='https://plot.ly/~batman/6?share_key=rbkWvJQn6cyj3HMMGROiqI')
with patch('plotly.tools.CREDENTIALS_FILE', credentials_file):
with patch('plotly.plotly.plot', plot):
with patch('webbrowser.open_new_tab') as open_new_tab:
exporter = self.get_exporter()
exporter.accept()
assert open_new_tab.called_once_with('https://plot.ly/~batman/6/?share_key=rbkWvJQn6cyj3HMMGROiqI')
|
Python
| 0
|
@@ -5210,16 +5210,91 @@
ock()):%0A
+ with patch('webbrowser.open_new_tab') as open_new_tab:%0A
@@ -5340,32 +5340,36 @@
+
exporter.accept(
@@ -5362,32 +5362,36 @@
porter.accept()%0A
+
@@ -6223,32 +6223,107 @@
y.plot', plot):%0A
+ with patch('webbrowser.open_new_tab') as open_new_tab:%0A
@@ -6361,32 +6361,36 @@
+
exporter.accept(
@@ -6383,32 +6383,36 @@
porter.accept()%0A
+
|
ab533cf55e87571e757f545d5eaad6f8f62cc31f
|
Make worker resource plot responsive
|
distributed/diagnostics/worker_monitor.py
|
distributed/diagnostics/worker_monitor.py
|
from __future__ import print_function, division, absolute_import
from collections import defaultdict
from itertools import chain
from toolz import pluck
from ..utils import ignoring
with ignoring(ImportError):
from bokeh.models import ColumnDataSource, DataRange1d, Range1d
from bokeh.palettes import Spectral9
from bokeh.plotting import figure
def resource_profile_plot(width=600, height=300):
names = ['time', 'cpu', 'memory-percent']
source = ColumnDataSource({k: [] for k in names})
x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)
y_range = Range1d(0, 100)
p = figure(width=width, height=height, x_axis_type='datetime',
tools='xpan,xwheel_zoom,box_zoom,resize,reset',
x_range=x_range, y_range=y_range)
p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,
color=Spectral9[7], legend='Memory Usage', source=source)
p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,
color=Spectral9[0], legend='CPU Usage', source=source)
p.legend[0].location = 'top_left'
p.yaxis[0].axis_label = 'Percent'
p.xaxis[0].axis_label = 'Time'
p.min_border_right = 10
return source, p
def resource_profile_update(source, worker_buffer, times_buffer):
data = defaultdict(list)
workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))
for name in ['cpu', 'memory-percent']:
data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'
for msg in worker_buffer]
for w in workers]
data['workers'] = workers
data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'
for i, t in enumerate(times_buffer)]
for w in workers]
source.data.update(data)
def resource_append(lists, msg):
L = list(msg.values())
if not L:
return
for k in ['cpu', 'memory-percent']:
lists[k].append(mean(pluck(k, L)))
lists['time'].append(mean(pluck('time', L)) * 1000)
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
|
Python
| 0.000002
|
@@ -698,16 +698,33 @@
+ responsive=True,
tools='
@@ -923,16 +923,20 @@
legend='
+Avg
Memory U
@@ -1056,16 +1056,20 @@
legend='
+Avg
CPU Usag
|
b90927f3aa8b3178eabdf75ab68cf11df1c08835
|
Resolve #8 and modify output
|
portscanner.py
|
portscanner.py
|
#!/usr/bin/python
import socket
import sys
import os
import argparse
def main():
parser = argparse.ArgumentParser(description = "Test a specified ip/host for open ports.")
mutex = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('ip', metavar='IP', help='The ip/host to be scanned for open ports')
parser.add_argument('-v', '--verbose', help='Add extra verbosity to the output of the scanner', action='store_true')
parser.add_argument('-t', '--type', help='The type of the ports to be scanned', choices=['tcp', 'TCP', 'udp', 'UDP'], metavar='[ udp | tcp ]', default='tcp')
mutex.add_argument('-a', '--all', help='Scan all the possible ports', action='store_true')
mutex.add_argument('-p', '--ports', help='Scan the specified ports only', type=int, metavar='PORT', choices=range(0,65536), nargs='*', default=[])
args = parser.parse_args()
ports = []
if args.all:
ports = range(0, 65536)
elif args.ports:
ports = args.ports
try:
ip = socket.gethostbyname(args.ip)
except socket.gaierror:
sys.exit('[!] Invalid ip-address/hostname!\n[!] Exiting...')
try:
host = socket.gethostbyaddr(ip)[0]
except socket.herror:
host = 'Uknown Host'
protocol = args.type
print '[+] Pinging host ' + ip + ":" + host
os.system('ping -q -c1 -w2 ' + ip)
print ""
for port in ports: # For every given port attempt to connect...
if (args.type == 'tcp' or args.type == 'TCP'):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif (args.type == 'udp' or args.type == 'UDP'):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
banner = False
s.settimeout(3)
try:
serv = socket.getservbyport(port)
except socket.error:
serv = 'Uknown Service'
try:
if args.verbose:
print '[+] Attempting to connect to ' + ip + '(' + host + ')' + ':' + str(port) + '/' + protocol.upper() + ' [' + serv.upper() + ']'
s.connect((ip ,int(port)))
s.send("Port Checking")
try:
banner = s.recv(1024)
except socket.timeout:
banner = ''
except socket.error: # If a socket.error exception is caught, it means the attempt to connect has failed,
continue # hence the port is closed... In that case advance to the next port.
if banner=='':
banner = 'No Response...'
print '[+] Port ' + str(port) + '/' + protocol.upper() + ' [' + serv.upper() + ']' + ' is open!' + ' ==> Reply: ' + str(banner)
s.close()
if __name__ =='__main__':
main()
|
Python
| 0
|
@@ -64,16 +64,154 @@
gparse%0A%0A
+class colors:%0A%09HOST='%5C033%5B32m'%0A%09PORT='%5C033%5B36m'%0A%09TYPE='%5C033%5B33m'%0A%09SERVICE='%5C033%5B35m'%0A%09WARNING='%5C033%5B31m'%0A%09INFO='%5C033%5B34m'%0A%09END='%5C033%5B0m'%0A%0A
def main
@@ -1182,47 +1182,122 @@
xit(
-'%5B!%5D Invalid ip-address/hostname!%5Cn%5B!%5D
+colors.WARNING + '%5B!%5D ' + colors.END + 'Invalid ip-address/hostname!%5Cn' + colors.WARNING + '%5B!%5D ' + colors.END + '
Exit
@@ -1424,21 +1424,53 @@
%0A%09print
-'%5B+%5D
+colors.INFO + '%5B+%5D ' + colors.END + '
Pinging
@@ -1994,128 +1994,397 @@
int
-'%5B+%5D Attempting to connect to ' + ip + '(' + host + ')' + ':' + str(port) + '/' + protocol.upper() + ' %5B' + serv.upper()
+colors.INFO + '%5B+%5D ' + colors.END + 'Attempting to connect to ' + colors.INFO + '::' + colors.END + colors.HOST + ip + colors.END + colors.INFO + ':' + colors.END + colors.HOST + host + colors.END + colors.INFO + '::' + colors.END + ' via port ' + colors.PORT + str(port) + colors.END + '/' + colors.TYPE + protocol.upper() + colors.END + ' %5B' + colors.SERVICE + serv.upper() + colors.END
+ '
@@ -2776,66 +2776,168 @@
int
-'%5B+%5D Port ' + str(port) + '/' + protocol.upper() + ' %5B' +
+colors.INFO + '%5B+%5D ' + colors.END + 'Port ' + colors.PORT + str(port) + colors.END + '/' + colors.TYPE + protocol.upper() + colors.END + ' %5B' + colors.SERVICE +
serv
@@ -2946,16 +2946,29 @@
pper() +
+ colors.END +
'%5D' + '
@@ -2987,11 +2987,48 @@
'
-==%3E
+' + colors.INFO + '==%3E' + colors.END + '
Rep
|
6e65bf0ce5334e2242fee0c36886cfda29a1f4a4
|
Make build.py baseimage pulling optional
|
build.py
|
build.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: python; -*-
import logging
import subprocess
import sys
import time
from common import get_sys_args, yield_dockerfiles
from common import run_cmd, init_logger
def build_image(repo_path, repo_name):
for dockerfile, image in yield_dockerfiles(repo_path, repo_name):
try:
run_cmd(['docker', 'build', '--pull', '-t', image, '-f', dockerfile, repo_path])
except subprocess.CalledProcessError:
logging.error('Failed to build %s!' % dockerfile)
sys.exit(1)
time.sleep(1)
if __name__ == '__main__':
"""
Build solvable and controller docker images from an avatao challenge repository.
Simply add the challenge repository path as the first argument and the script does the rest.
If a controller or solvable is missing, we skip it.
After a successful build you can use the start.py to run your containers.
"""
init_logger()
build_image(*get_sys_args())
logging.info('Finished. Everything is built.')
|
Python
| 0
|
@@ -119,16 +119,26 @@
ort time
+%0Aimport os
%0A%0Afrom c
@@ -362,16 +362,20 @@
-run
+build
_cmd
-(
+ =
%5B'do
@@ -393,18 +393,8 @@
ld',
- '--pull',
'-t
@@ -431,16 +431,151 @@
po_path%5D
+%0A if os.environ.get('PULL_BASEIMAGES', '0') == '1':%0A build_cmd.append('--pull')%0A run_cmd(build_cmd
)%0A
|
2454b864670ba2138361edbdb9e66afccdf31010
|
fix a bug creating valid zip files
|
build.py
|
build.py
|
"""\
Build the current project as a Factorio mod.
Not required, but handy for work on the mod. The basic process is this:
1. If the git status isn't clean, throw the current stuff into the stash
2. Get the current version from info.json for the zip name
3. Zip the current working directory into name_version.zip, in the containing directory
4. Unstash to restore the directory status.
"""
import subprocess
import json
from zipfile import ZipFile, ZIP_DEFLATED
import os
import codecs
import sys
def git_is_clean():
"""
Return True if `git status` reports clean, otherwise False
"""
proc = subprocess.Popen(['git', 'status', '--porcelain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
success = proc.wait() == 0 # gets the return code, False on nonzero
success = success and len(out) == 0 # false if any standard output
success = success and len(err) == 0 # false if any standard error
return success
def stash():
"""
If the current working directory isn't clean as reported by `git status`, stash it and return True.
Otherwise return False.
"""
need_unstash = not git_is_clean()
if need_unstash:
subprocess.check_call(['git', 'stash'])
assert git_is_clean()
return need_unstash
def unstash():
subprocess.check_call(['git', 'stash', 'apply'])
def form_name(fn='info.json'):
with file(fn, 'r') as fp:
info = json.load(fp)
return '_'.join([info['name'], info['version']])
def get_default_path():
"""\
Returns the system default path for Factorio mods.
Derived from <http://www.factorioforums.com/wiki/index.php?title=Modding_overview#Mods_directory>
If it can't figure out the right place to go, defaults to the current working directory.
"""
ret = None
if sys.platform.startswith('win'):
ret = '%appdata%\Factorio\mods'
elif sys.platform.startwsith('linux'):
ret = '~/.factorio/mods'
elif sys.platform.startswith('darwin'):
ret = '~/Library/Application Support/factorio/mods'
else:
ret = '.'
return os.path.expanduser(os.path.expandvars(ret))
def makezip(name, destpath=None):
if destpath is None:
destpath = get_default_path()
if not os.path.exists(destpath):
print >> sys.stderr, "'{}' does not exist. Can't write data there!".format(destpath)
return
elif not os.path.isdir(destpath):
print >> sys.stderr, "'{}' is not a directory. Can't write data there!".format(destpath)
return
path = os.path.join(destpath, name + '.zip')
failed = None
with file(path, 'w') as fp:
with ZipFile(fp, 'w', compression=ZIP_DEFLATED) as zip:
for dirpath, dirnames, filenames in os.walk('.'):
# don't add .git to the packed archive
if '.git' in dirnames:
dirnames.remove('.git')
# don't add .gitignore to the packed archive
# also don't add the current zipfile, if we happen to be working in the CWD
ignorefiles = ['.gitignore', name + '.zip']
for ifn in ignorefiles:
if ifn in filenames:
filenames.remove(ifn)
for fn in filenames:
filepath = os.path.join(dirpath, fn)
# documentation here: http://www.factorioforums.com/wiki/index.php?title=Modding_overview
# lies about required structure. It says it must be in a folder inside the zip file, but
# that causes an error on game load.
# zippath = os.path.join(name, filepath)
try:
# zipfile requires that file names be encoded in cp437.
filepath = codecs.encode(filepath, 'cp437')
# zippath = codecs.encode(zippath, 'cp437')
except ValueError, e:
print >> sys.stderr, "Failed to encode", name
print >> sys.stderr, e
failed = e
break;
# actually add the item to the archive
zip.write(filepath)
if failed is not None:
# we had a catastrophic error and don't want to leave a partial file hanging around
os.remove(path)
if __name__ == '__main__':
import argparse
desc = __doc__.strip()
parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-o', '--output-directory', metavar="OD", dest='od',
help="specify a directory in which to place the output zip")
parser.add_argument('-.', '--output-here', action='store_const', dest='od', const='.',
help="store the output in the current dir. Same as '--output-directory .'")
parser.add_argument('-d', '--detect-directory', action='store_true', default=False,
help="show the detected mods directory on this machine and exit")
args = parser.parse_args()
if args.detect_directory:
print get_default_path()
sys.exit(0)
stashed = stash()
makezip(form_name(), args.od)
if stashed:
unstash()
|
Python
| 0
|
@@ -2484,16 +2484,17 @@
path, 'w
+b
') as fp
@@ -3708,24 +3708,137 @@
filepath)%0A%09%0A
+%09if failed is None:%0A%09%09with file(path, 'rb') as fp:%0A%09%09%09with ZipFile(fp, 'r') as zip:%0A%09%09%09%09failed = zip.testzip()%0A%09%0A
%09if failed i
@@ -3952,16 +3952,116 @@
ve(path)
+%0A%09%09print %3E%3E sys.stderr, %22Failed to write%22, path%0A%09else:%0A%09%09print %22Successfully wrote and tested%22, path
%0A%0Aif __n
|
bc09d332e7f473cd9d212b2f1aab81498b8b09e2
|
Add global vars to generated class
|
objects/function.py
|
objects/function.py
|
from objects.file import File
class Function(object):
"""
Represents a .func file
text - text passed into the constructor
get_name() - the name of the function, as it will be used in the .auto file
get_constructors() - a list containing 2 strings that are inserted into C++
source code as constructors for tne periodic and init
functions.
get_args() - A list of all args. Example:
[["Distance", "dist"], ["Angle", "angle"]]
get_section_code() - the literal C++ code that should be in a section
get_section() - The text in a section
get_includes() - A list of everything that needs to be included. Strings in
this list will most likely be enclosed in quotes or angle
brackets
"""
def __init__(self, text, script_dir):
"""
Parses a string from a .func file into a easy to use format
This sets self.text, then runs any functions needed to set up member
variables by parsing self.text.
"""
self.text = text
self.script_dir = script_dir + ("/" if script_dir[-1] != "/" else "")
def get_section(self, section_name):
"""
Gets a "section" in self.text. Example of a section:
name {
...
stuff
...
}
That would be a section with the name "name".
Sections that are used are "include", "init", and "periodic".
"""
found_section = False
text_in_section = []
for line in self.text.split("\n"):
if not found_section and \
line.strip()[:len(section_name)] == section_name and \
line.strip()[-1] == "{":
found_section = True
elif found_section and not line[0] == "}":
text_in_section.append(line)
elif found_section:
return '\n'.join(text_in_section)
return '' #TODO(Wesley) Better way of indicating failure
def get_class(self):
class_h_skel_file = open(self.script_dir + "text_includes/auto_function_class.h.skel")
class_h_file = File(class_h_skel_file.read())
class_h_file.replace_text("name", self.get_name())
for arg in self.get_args():
class_h_file.insert_text("vars", "{0} {1} = 0;".format(arg[0], arg[1]))
class_cpp_skel_file = open(self.script_dir + "text_includes/auto_function_class.cpp.skel")
class_cpp_file = File(class_cpp_skel_file.read())
class_cpp_file.replace_text("name", self.get_name())
class_cpp_file.replace_text("init_code", self.get_section_code("init"))
class_cpp_file.replace_text("periodic_code", self.get_section_code("periodic"))
return [class_h_file.text, class_cpp_file.text]
def get_generator(self):
generator_h_skel_file = open(self.script_dir + "text_includes/auto_function_generator.h.skel").read()
generator_cpp_skel_file = open(self.script_dir + "text_includes/auto_function_generator.cpp.skel").read()
replacements = [["replace_text", "name", self.get_name()]]
generator_h_file = File(generator_h_skel_file, replacements)
generator_cpp_file = File(generator_cpp_skel_file, replacements)
return [generator_h_file.text, generator_cpp_file.text]
def get_section_code(self, section):
"""
Gets the transpiled C++ code inside a section, including auto-generated
variable casting code.
"""
raw_code = self.get_section(section)
var_init_lines = []
cast_functions = {
"int": "ConvertArgs::ls_convert_int({})",
"bool": "ConvertArgs::ls_convert_bool({})",
"float": "ConvertArgs::ls_convert_float({})",
"std::string": "ConvertArgs::ls_convert_string({})",
"Time": "ConvertArgs::ls_convert_time({})",
"Distance": "ConvertArgs::ls_convert_distance({})",
"Length": "ConvertArgs::ls_convert_distance({})",
"Angle": "ConvertArgs::ls_convert_angle({})",
"Velocity": "ConvertArgs::ls_convert_velocity({})",
"Acceleration": "ConvertArgs::ls_convert_acceleration({})",
"AngularVelocity": "ConvertArgs::ls_convert_angularvelocity({})",
"Voltage": "ConvertArgs::ls_convert_voltage({})"
}
argnum = 0
if len(self.get_args()) > 0:
for arg in self.get_args():
var_cast_func = cast_functions[arg[0]].format("ls_arg_list[{}]".format(argnum))
var_init_line = " {arg[0]} {arg[1]} = {cast};".format(arg=arg, cast=var_cast_func)
var_init_lines.append(var_init_line)
argnum += 1
return (" // BEGIN AUTO GENERATED CODE\n" +
"\n".join(var_init_lines) +
"\n // END AUTO GENERATED CODE\n\n" +
raw_code)
def get_includes(self):
"""
Returns a list of all of the lines in the "include" section of self.text,
stripping trailing commas if needed.
"""
includes = []
for line in self.get_section("include").split("\n"):
if line:
include_text = line.strip()
if include_text[-1] == ",":
include_text = include_text[:-1]
includes += [include_text]
return includes
def get_args(self):
"""
Returns a list of arguments. For an example of the format, see the
class docstring.
"""
try:
arg_string = self.get_raw_constructor().split("(")[1].split(")")[0]
except IndexError:
arg_string = ""
args = []
if arg_string.strip() != "":
for arg in arg_string.split(","):
arg = arg.strip()
arg_pair = [arg.strip().split(" ")[0], arg.strip().split(" ")[-1]]
args.append(arg_pair)
return args
def get_raw_constructor(self):
"""
Returns the first line of self.text, which we assume to be the
lemonscript-style constructor. This is not valid C++ code.
"""
return self.text.split("\n")[0]
def get_constructors(self):
"""
Returns the valid C++ constructors for the init and periodic functions.
"""
prefix = "bool AutoFunction::"
arg_list = "(CitrusRobot* robot, std::vector<void *> ls_arg_list)"
init_constructor = (prefix + self.get_name() + "Init" + arg_list)
periodic_constructor = (prefix + self.get_name() + "Periodic" + arg_list)
return [init_constructor, periodic_constructor]
def get_name(self):
"""
Returns the name of the function, as it will be used in lemonscript.
"""
return self.get_raw_constructor().split("(")[0].strip()
|
Python
| 0
|
@@ -2276,35 +2276,35 @@
))%0A%0A for
+v
ar
-g
in self.get_arg
@@ -2292,37 +2292,60 @@
var in self.get_
-args(
+section(%22global%22).split(%22%5Cn%22
):%0A c
@@ -2380,45 +2380,21 @@
s%22,
-%22%7B0%7D %7B1%7D = 0;%22.format(arg%5B0%5D, arg%5B1%5D)
+var + %22 = 0;%22
)%0A%0A
|
35a708ccdd1aa7b3e52c9ebd52044c033bff2ff4
|
Update countfile.py
|
bin/count/countfile.py
|
bin/count/countfile.py
|
#!/usr/bin/python
import gzip
import numpy as np
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import common
import config as cfg
def fileToDictionary(inputFile, indexColumn):
input = open(inputFile, "r")
rd = dict()
for x in input:
arow = x.rstrip().split("\t")
id = arow[indexColumn]
if rd.has_key(id):
#rd[id].append(arow)
print "duplicate knowngene id = " + id
print "arow = " + str(arow)
print "rd[id] = " + str(rd[id])
else:
rd[id] = arow
input.close()
return(rd)
def fileToArray(inputFile, skipFirst):
input = open(inputFile, "r")
ra = []
for i in range(skipFirst):
input.readline()
for x in input:
arow = x.rstrip().split("\t")
ra.append(arow)
input.close()
return(ra)
def countBins(samFile, countFile, statFile, sizeRef, binRef):
if samFile[-2:] == 'gz':
INFILE = gzip.open(samFile, "rb")
else:
INFILE = open(samFile, "r")
OUTFILE = open(countFile, "w")
STATFILE = open(statFile, "w")
chrominfo = fileToDictionary(sizeRef, 0)
bins = fileToArray(binRef, 0)
binCounts = []
for i in range(len(bins)):
binCounts.append(0)
counter = 0
totalReads = 0
prevChrompos = ""
for x in INFILE:
arow = x.rstrip().split("\t")
if arow[0][0] == '@':
continue
thisChrom = arow[2]
thisChrompos = arow[3]
if thisChrom.find("_") > -1:
continue
if thisChrom == "chrM":
continue
if thisChrom == "":
continue
if chrominfo.has_key(thisChrom):
pass
else:
continue
totalReads += 1
thisChrominfo = chrominfo[thisChrom]
thisAbspos = long(thisChrompos) + long(thisChrominfo[2])
counter += 1
indexUp = len(bins) - 1
indexDown = 0
indexMid = int((indexUp - indexDown) / 2.0)
while True:
if thisAbspos >= long(bins[indexMid][2]):
indexDown = indexMid + 0
indexMid = int((indexUp - indexDown) / 2.0) + indexMid
else:
indexUp = indexMid + 0
indexMid = int((indexUp - indexDown) / 2.0) + indexDown
if indexUp - indexDown < 2:
break
#####I ADDED IN THIS IF/ELSE TEST -- ORIGINALLY ONLY THE SECOND STATEMENT WAS HERE BUT THAT PREVENTS GETTING COUNTS IN THE LAST BIN#####
if thisAbspos >= long(bins[indexUp][2]):
binCounts[indexUp] += 1
else:
binCounts[indexDown] += 1
prevChrompos = thisChrompos
for i in range(len(binCounts)):
thisRatio = float(binCounts[i]) / (float(counter) / float(len(bins)))
OUTFILE.write("\t".join(bins[i][0:3]))
OUTFILE.write("\t")
OUTFILE.write(str(binCounts[i]))
OUTFILE.write("\t")
OUTFILE.write(str(thisRatio))
OUTFILE.write("\n")
binCounts.sort()
STATFILE.write('Reads\t')
STATFILE.write(str(totalReads))
STATFILE.write('\n')
STATFILE.write('AverageCount\t')
STATFILE.write(str(np.mean(binCounts)))
STATFILE.write('\n')
STATFILE.write('MedianCount\t')
STATFILE.write(str(binCounts[len(bins)/2]))
STATFILE.write('\n')
INFILE.close()
OUTFILE.close()
STATFILE.close()
def runOne(samFile, countDir, statsDir, species):
#get environment prepared#
countVars = cfg.Count()
if samFile[-2:] == 'gz':
sampleName = os.path.basename(samFile)[:-13]
else:
sampleName = os.path.basename(samFile)[:-11]
statFile = statsDir + sampleName + '.bincount.stats.txt'
countFile = countDir + sampleName + '.bincounts.txt'
countBins(samFile, countFile, statFile, countVars.chromDict[species], countVars.binDict[species])
printText = '\t\tFinished counting reads for ' + os.path.basename(samFile)
print(printText)
|
Python
| 0.000001
|
@@ -41,16 +41,40 @@
py as np
+%0Aimport sys, os, inspect
%0A%0Acurren
|
72f6ecc97d26a61e4c39963a8d88897e58a5a47d
|
Make dependency script detect need for git-fetch more reliably
|
build.py
|
build.py
|
#!/usr/bin/env python
import argparse
import multiprocessing
import os
import shutil
import string
import subprocess
import sys
ROOT_DIR = os.path.realpath(os.path.dirname(__file__))
DEPS_DIR = os.path.join(ROOT_DIR, 'deps')
def execute(cmd, cwd=DEPS_DIR, env=os.environ, err=None, err_expr=['error:', 'fatal:']):
try:
if not err:
err = 'Failed to execute "%s"\n' % ' '.join(cmd)
# print(cmd)
# return True
p = subprocess.Popen(cmd, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
o, e = p.communicate()
sys.stderr.write(e)
sys.stdout.write(o)
failed = p.returncode != 0 or any(e.find(expr) != -1 for expr in err_expr)
if failed:
sys.stderr.write(err)
return False
else:
return True
except Exception:
sys.stderr.write(err)
raise
class GitDependency(object):
def __init__(self, name, url, ref):
self.name = name
self.url = url
self.dir = os.path.join(DEPS_DIR, name)
self.ref = ref
self.source_updated = False
def _rev_parse(self, ref):
p = subprocess.Popen(['git', 'rev-parse', ref], cwd=self.dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
o, e = p.communicate()
if p.returncode != 0 or e.find('fatal:') != -1:
raise Exception('Failed to parse git rev.')
return string.strip(o)
def get_source(self):
target = None
if not os.path.exists(os.path.join(self.dir, '.git')):
cmd = [
'git',
'clone',
self.url,
self.name,
]
if not execute(cmd):
sys.stderr.write('Failed to clone source repository.\n')
return False
else:
try:
target = self._rev_parse(self.ref)
except Exception:
sys.stdout.write('Fetching git remote.\n')
cmd = ['git', 'fetch', '--all']
# TODO: If target is branch, we should fetch unconditionally
if not execute(cmd, cwd=self.dir):
sys.stderr.write('Failed to fetch source repository.\n')
return False
if not target:
target = self._rev_parse(self.ref)
current = self._rev_parse('HEAD')
if current == target:
sys.stdout.write('Local source is up to date.\n')
return True
sys.stdout.write('Checking out %s\n' % target)
self.source_updated = True
cmd = ['git', 'checkout', '-f', target]
return execute(cmd, cwd=self.dir)
def prepare(self, skip_setup=False):
return skip_setup or self.get_source()
class CppGitDependency(GitDependency):
def __init__(self, name, url, ref):
super(CppGitDependency, self).__init__(name, url, ref)
def build(self):
try:
jobs = multiprocessing.cpu_count()
except NotImplementedError:
jobs = 2
cmds = [
['sh', 'autogen.sh'],
['./configure', '--prefix=%s' % DEPS_DIR],
['make', '-j%d' % jobs],
['make', 'install'],
]
return all(execute(cmd, cwd=self.dir) for cmd in cmds)
def prepare(self, skip_setup=False):
if skip_setup:
return True
if not super(CppGitDependency, self).prepare(False):
return False
if not self.source_updated and os.path.exists(os.path.join(DEPS_DIR, 'bin')):
return True
return self.build()
def main(argv):
p = argparse.ArgumentParser()
p.add_argument('--clean', action='store_true', help='Cleanup deps directory.')
p.add_argument('--clean-build', action='store_true', help='Cleanup deps artifacts while retaining source checkout.')
p.add_argument('--skip-tests', action='store_true', help='Do not trigger tests.')
p.add_argument('--skip-setup', action='store_true', help='Do not trigger dependency setup and use files previously setup.')
p.add_argument('--skip-deps', action='store_true', help='Do not use downloaded dependencies.')
p.add_argument('--gyp-includes', "-I", help='Use additional gyp include.', nargs='+')
args = p.parse_args(argv)
deps = [
GitDependency('gyp', 'http://git.chromium.org/external/gyp.git', 'origin/master'),
CppGitDependency('protobuf', 'http://github.com/google/protobuf.git', '7f2a9fb1af432a9831b3e6769905601d72c29796'),
]
if args.clean or args.clean_build:
clean_dirs = [
'bin',
'lib',
'include',
]
if args.clean:
clean_dirs.extend([dep.name for dep in deps])
for dir in clean_dirs:
p = os.path.join(DEPS_DIR, dir)
if os.path.exists(p):
sys.stdout.write('Cleaning up "%s".\n' % p)
shutil.rmtree(p)
return 0
if not args.skip_deps:
for dep in deps:
dep.prepare(args.skip_setup)
gyp_cmd = [
'gyp' if args.skip_deps else os.path.join(DEPS_DIR, 'gyp', 'gyp'),
'--depth=%s' % ROOT_DIR,
]
if not args.skip_deps:
gyp_cmd.extend([
'-I',
os.path.join(DEPS_DIR, 'supplement.gypi'),
])
if args.gyp_includes:
gyp_cmd.extend(['-I%s' % i for i in args.gyp_includes])
def ninja_cmd(conf):
return [
'ninja',
'-C',
'out/%s' % conf,
]
def test_cmd(conf):
return [
os.path.join(ROOT_DIR, 'out', conf, 'run_tests.py')
]
if args.skip_tests:
cmds = [
gyp_cmd,
ninja_cmd('Debug'),
ninja_cmd('Release'),
]
else:
cmds = [
gyp_cmd,
ninja_cmd('Debug'),
test_cmd('Debug'),
ninja_cmd('Release'),
test_cmd('Release'),
]
env = os.environ
env['LD_LIBRARY_PATH'] = os.path.join(DEPS_DIR, 'lib')
env['GYP_GENERATORS'] = 'ninja'
return 0 if all(execute(cmd, cwd=ROOT_DIR, env=env) for cmd in cmds) else 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Python
| 0.000002
|
@@ -1657,41 +1657,56 @@
-target = self._rev_parse(
+res = execute(%5B'git', 'cat-file', '-t',
self.ref
)%0A
@@ -1697,24 +1697,25 @@
t', self.ref
+%5D
)%0A exce
@@ -1724,24 +1724,55 @@
Exception:%0A
+ pass%0A if not res:%0A
sys.
|
1aec16ffc5d5781562775de7faa76238d55b91b1
|
Improve sound loop set config validation
|
mpfmc/config_collections/sound_loop_set.py
|
mpfmc/config_collections/sound_loop_set.py
|
from typing import TYPE_CHECKING, Union
from mpf.core.randomizer import Randomizer
from mpfmc.core.config_collection import ConfigCollection
from mpfmc.core.audio.audio_exception import AudioException
if TYPE_CHECKING:
from mpfmc.core.mc import MpfMc
class SoundLoopSetCollection(ConfigCollection):
config_section = 'sound_loop_sets'
collection = 'sound_loop_sets'
class_label = 'SoundLoopSets'
def __init__(self, mc: "MpfMc"):
super().__init__(mc)
self._validate_handler = None
def create_entries(self, config: dict, **kwargs) -> None:
# config is localized to this collection's section
del kwargs
for name, settings in config.items():
try:
self[name] = self.process_config(settings)
except (AudioException, ValueError) as ex:
raise ValueError("An error occurred while processing the '{}' entry in "
"the sound_loop_sets config collection: {}".format(name, ex))
# Validation of referenced sound assets must be completed after all
# assets have been loaded (can use the init_done event for that)
self._validate_handler = self.mc.events.add_handler("init_done", self._validate_sound_assets)
def process_config(self, config: dict) -> dict:
# processes the 'sound_loop_sets' section of a config file to populate the
# mc.sound_loop_sets dict.
# config is localized to 'sound_loop_sets' section
if self.mc.sound_system is None or self.mc.sound_system.audio_interface is None:
raise AudioException("Audio features are not enabled. Cannot validate sound_loop_sets config collection.")
return self.process_loop_set(config)
def process_loop_set(self, config: dict) -> dict:
# config is localized to a single sound loop set settings within a list
self.mc.config_validator.validate_config('sound_loop_sets', config)
for layer in config["layers"]:
self.mc.config_validator.validate_config('sound_loop_sets:layers', layer)
# Clamp volume between 0 and 1
if layer['volume'] < 0:
layer['volume'] = 0
elif layer['volume'] > 1:
layer['volume'] = 1
return config
def _validate_sound_assets(self, **kwargs) -> None:
"""Validate the referenced sound assets in the loop set layers.
Notes:
This must be performed after all the sound assets have been loaded.
"""
del kwargs
if self._validate_handler:
self.mc.events.remove_handler(self._validate_handler)
for name, config in self.items():
# Validate sound setting (make sure only valid sound assets are referenced)
if config["sound"] not in self.mc.sounds:
raise ValueError("The '{}' sound_loop_set references an invalid sound asset "
"name '{}' in its sound setting".format(name, config["sound"]))
if self.mc.sounds[config["sound"]].streaming:
raise ValueError("The '{}' sound_loop_set references a streaming sound asset "
"'{}' in its sound setting (only in-memory sounds are "
"supported in loop sets)".format(name, config["sound"]))
# Validate sound settings in layers (make sure only valid sound assets are referenced)
for layer in config["layers"]:
if layer["sound"] not in self.mc.sounds:
raise ValueError("The '{}' sound_loop_set references an invalid sound asset "
"name '{}' in one of its layers".format(name, layer["sound"]))
if self.mc.sounds[layer["sound"]].streaming:
raise ValueError("The '{}' sound_loop_set references a streaming sound asset "
"'{}' in one of its layers (only in-memory sounds are "
"supported in loop sets)".format(name, layer["sound"]))
collection_cls = SoundLoopSetCollection
class SoundLoopSetGroup(object):
def __init__(self):
self.items = None
|
Python
| 0.000001
|
@@ -37,51 +37,8 @@
ion%0A
-from mpf.core.randomizer import Randomizer%0A
from
@@ -1921,24 +1921,336 @@
', config)%0A%0A
+ # Clamp volume between 0 and 1%0A if 'volume' in config and config%5B'volume'%5D:%0A if config%5B'volume'%5D %3C 0:%0A config%5B'volume'%5D = 0%0A elif config%5B'volume'%5D %3E 1:%0A config%5B'volume'%5D = 1%0A%0A # Validate optional layers%0A if 'layers' in config:%0A
for
@@ -2268,32 +2268,36 @@
nfig%5B%22layers%22%5D:%0A
+
self
@@ -2375,32 +2375,36 @@
+
# Clamp
volume betwe
@@ -2383,32 +2383,38 @@
# Clamp
+layer
volume between 0
@@ -2412,32 +2412,36 @@
between 0 and 1%0A
+
if l
@@ -2468,32 +2468,36 @@
+
+
layer%5B'volume'%5D
@@ -2492,32 +2492,36 @@
r%5B'volume'%5D = 0%0A
+
elif
@@ -2534,32 +2534,36 @@
%5B'volume'%5D %3E 1:%0A
+
@@ -4460,90 +4460,4 @@
ion%0A
-%0A%0Aclass SoundLoopSetGroup(object):%0A%0A def __init__(self):%0A self.items = None%0A
|
04d59394657e420025f8cc15c51732a68ae8cc03
|
Add the new boolean fields to the right model.
|
site/roaster/models.py
|
site/roaster/models.py
|
import re
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
WEEKDAY_CHOICES = (
(0, 'Monday'),
(1, 'Tuesday'),
(2, 'Wednesday'),
(3, 'Thursday'),
(4, 'Friday'),
(5, 'Saturday'),
(6, 'Sunday'),
)
def format_phone_number(phone):
phone = re.sub('[^\w]', '', phone)
if (len(phone) == 10):
return '(%s) %s-%s' % (phone[:3], phone[3:6], phone[6:10])
return ''
# TODO: Move Business and BusinessHours models to generic app.
# TODO: Add tests
class Business(models.Model):
name = models.CharField(max_length=200, db_index=True,)
slug = models.SlugField()
active = models.BooleanField()
address = models.CharField(max_length=200, blank=True,)
lat = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True,)
lng = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True,)
phone = models.CharField(max_length=14, blank=True,)
url = models.URLField(max_length=200, verbose_name='URL', blank=True,)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
modified_at = models.DateTimeField(auto_now=True, db_index=True)
class Meta:
ordering = ['name',]
get_latest_by = 'created_at'
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
self.slug = Business.unique_slugify(self.name)
# Sanitize the phone number
self.phone = format_phone_number(self.phone)
super(Business, self).save(*args, **kwargs)
@staticmethod
def unique_slugify(value, num=0):
if (num > 0):
slug = slugify("%s-%s" % (value, num))
else:
slug = slugify(value)
try:
Business.objects.get(slug=slug)
# Slug is not unique!
slug = Business.unique_slugify(value, num+1)
except Business.DoesNotExist:
pass
return slug
class BusinessHours(models.Model):
weekday = models.IntegerField(choices=WEEKDAY_CHOICES, default=0,)
open = models.TimeField()
close = models.TimeField()
business = models.ForeignKey(Business, related_name='hours',)
created_at = models.DateTimeField(auto_now_add=True, db_index=True,)
modified_at = models.DateTimeField(auto_now=True, db_index=True,)
class Meta:
ordering = ['weekday',]
verbose_name = 'Business Hours'
verbose_name_plural = 'Business Hours'
unique_together = (('weekday', 'business'),)
def __unicode__(self):
return "{weekday}: {open} to {close}".format(
weekday=self.get_weekday_display(), open=self.open,
close=self.close)
class Cafe(Business):
def get_absolute_url(self):
return reverse('roaster.views.cafe_details', args=[self.slug])
class Roaster(Business):
description = models.TextField(blank=True,)
photo_url = models.URLField(max_length=200, verbose_name='Photo URL',
blank=True,)
video_url = models.URLField(max_length=200, verbose_name='Video URL',
blank=True,)
cafes = models.ManyToManyField('Cafe', blank=True,)
def get_absolute_url(self):
return reverse('roaster.views.roaster_details', args=[self.slug])
class Roast(models.Model):
name = models.CharField(max_length=200, unique=True, db_index=True,)
roaster = models.ForeignKey('Roaster', related_name='roasts',)
order_online = models.BooleanField()
cafe_on_site = models.BooleanField()
open_to_publich = models.BooleanField()
active = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True, db_index=True,)
modified_at = models.DateTimeField(auto_now=True, db_index=True,)
class Meta:
ordering = ['name',]
def __unicode__(self):
return self.name
|
Python
| 0
|
@@ -2976,24 +2976,189 @@
lank=True,)%0A
+ online_only = models.BooleanField()%0A order_online = models.BooleanField()%0A cafe_on_site = models.BooleanField()%0A open_to_public = models.BooleanField()%0A
photo_ur
@@ -3678,134 +3678,8 @@
',)%0A
- order_online = models.BooleanField()%0A cafe_on_site = models.BooleanField()%0A open_to_publich = models.BooleanField()%0A
|
b68669f07aba05a4e96f900df71d6179382cd6f1
|
bump version
|
build.py
|
build.py
|
from pybuilder.core import use_plugin, init, Author, task
use_plugin("python.core")
use_plugin("python.unittest")
use_plugin("python.integrationtest")
use_plugin("python.install_dependencies")
use_plugin("python.flake8")
use_plugin("python.coverage")
use_plugin("python.distutils")
use_plugin('copy_resources')
use_plugin('filter_resources')
name = "yum-repos"
summary = "yum-repos: simple yum repositories with minimal rest api"
url = "https://github.com/arnehilmann/yum-repos"
version = "0.8.2"
authors = [Author('Arne Hilmann', 'arne.hilmann@gmail.com')]
description = """yum-repos
- serve yum repositories as simple folders
- ... via web server
- offer rest api for
- create/remove/link of repositories
- upload/stage/remove of rpms
"""
default_task = ["clean", "analyze", "publish"]
@task
def gittag(project, logger):
logger.info("The following commands create a new release, triggering all the fun stuff:")
logger.info("git tag -a v{0} -m v{0}".format(project.version))
logger.info("git push --tags")
@init
def set_properties(project):
project.build_depends_on('requests')
project.depends_on("flask")
try:
import functools.lru_cache
except ImportError:
pass
# project.depends_on("backports.functools_lru_cache")
project.set_property('copy_resources_target', '$dir_dist')
project.get_property('copy_resources_glob').extend(['setup.*cfg'])
project.get_property('filter_resources_glob').extend(['**/setup.*cfg'])
|
Python
| 0
|
@@ -493,9 +493,9 @@
0.8.
-2
+4
%22%0A%0Aa
|
0fe151d07f1f77bec1e8b7827823bf7197b52408
|
Fix string interpolation to delayed by logging
|
murano/common/plugins/extensions_loader.py
|
murano/common/plugins/extensions_loader.py
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import re
from oslo_config import cfg
from oslo_log import log as logging
import six
from stevedore import dispatch
from murano.common.i18n import _LE, _LI, _LW
from murano.dsl import murano_package
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# regexp validator to ensure that the entry-point name is a valid MuranoPL
# class name with an optional namespace name
NAME_RE = re.compile(r'^[a-zA-Z]\w*(\.[a-zA-Z]\w*)*$')
class PluginLoader(object):
def __init__(self, namespace="io.murano.extensions"):
LOG.info('Loading extension plugins')
self.namespace = namespace
extension_manager = dispatch.EnabledExtensionManager(
self.namespace,
PluginLoader.is_plugin_enabled,
on_load_failure_callback=PluginLoader._on_load_failure)
self.packages = {}
name_map = {}
for ext in extension_manager.extensions:
self.load_extension(ext, name_map)
self.cleanup_duplicates(name_map)
def load_extension(self, extension, name_map):
dist_name = str(extension.entry_point.dist)
name = extension.entry_point.name
if not NAME_RE.match(name):
LOG.warning(_LW("Entry-point 'name' {name} is invalid").format(
name=name))
return
name_map.setdefault(name, []).append(dist_name)
if dist_name in self.packages:
package = self.packages[dist_name]
else:
package = PackageDefinition(extension.entry_point.dist)
self.packages[dist_name] = package
plugin = extension.plugin
try:
package.classes[name] = initialize_plugin(plugin)
except Exception:
LOG.exception(_LE("Unable to initialize plugin for {name}").format(
name=name))
return
LOG.info(_LI("Loaded class {class_name} from {dist}").format(
class_name=name, dist=dist_name))
def cleanup_duplicates(self, name_map):
for class_name, package_names in six.iteritems(name_map):
if len(package_names) >= 2:
LOG.warning(_LW("Class is defined in multiple packages!"))
for package_name in package_names:
LOG.warning(_LW("Disabling class {class_name} in {dist} "
"due to conflict").format(
class_name=class_name,
dist=package_name))
self.packages[package_name].classes.pop(class_name)
@staticmethod
def is_plugin_enabled(extension):
if CONF.murano.enabled_plugins is None:
# assume all plugins are enabled until manually specified otherwise
return True
else:
return (extension.entry_point.dist.project_name in
CONF.murano.enabled_plugins)
@staticmethod
def _on_load_failure(manager, ep, exc):
LOG.warning(_LW("Error loading entry-point {ep} from package {dist}: "
"{err}").format(ep=ep.name, dist=ep.dist, err=exc))
def register_in_loader(self, package_loader):
for package in six.itervalues(self.packages):
package_loader.register_package(
MuranoPackage(package_loader, package))
def initialize_plugin(plugin):
if hasattr(plugin, "init_plugin"):
initializer = getattr(plugin, "init_plugin")
if inspect.ismethod(initializer) and initializer.__self__ is plugin:
LOG.debug("Initializing plugin class {name}".format(name=plugin))
initializer()
return plugin
class PackageDefinition(object):
def __init__(self, distribution):
self.name = distribution.project_name
self.version = distribution.version
if distribution.has_metadata(distribution.PKG_INFO):
# This has all the package metadata, including Author,
# description, License etc
self.info = distribution.get_metadata(distribution.PKG_INFO)
else:
self.info = None
self.classes = {}
class MuranoPackage(murano_package.MuranoPackage):
def __init__(self, pkg_loader, package_definition):
super(MuranoPackage, self).__init__(
pkg_loader, package_definition.name, runtime_version='1.0')
for class_name, clazz in six.iteritems(package_definition.classes):
if hasattr(clazz, "_murano_class_name"):
LOG.warning(_LW("Class '%(class_name)s' has a MuranoPL "
"name '%(name)s' defined which will be "
"ignored") %
dict(class_name=class_name,
name=getattr(clazz, "_murano_class_name")))
LOG.debug("Registering '%s' from '%s' in class loader"
% (class_name, package_definition.name))
self.register_class(clazz, class_name)
def get_resource(self, name):
raise NotImplementedError()
|
Python
| 0.99955
|
@@ -5442,16 +5442,17 @@
loader%22
+,
%0A
@@ -5470,11 +5470,8 @@
-%25 (
clas
@@ -5498,25 +5498,24 @@
nition.name)
-)
%0A
|
83b12f568ba8843ac6bff4c5179d8200d88505b0
|
Fix an issue where the .VERSION file output from build.py had multiple commit hashes
|
build.py
|
build.py
|
#/*******************************************************************************
# * Copyright (c) 2016 Synopsys, Inc
# * All rights reserved. This program and the accompanying materials
# * are made available under the terms of the Eclipse Public License v1.0
# * which accompanies this distribution, and is available at
# * http://www.eclipse.org/legal/epl-v10.html
# *
# * Contributors:
# * Synopsys, Inc - initial implementation and documentation
# *******************************************************************************/
import sys
import subprocess
import re
import json
import shutil
if __name__ == "__main__":
# Check to make sure we have the correct number of arguments
if len(sys.argv) != 4:
print "Incorrect number of arguments given. Build.py takes three arguments, first is the version number, second is the build number $BUILD_NUMBER and third is build id $BUILD_ID"
sys.exit(-1)
# Save version, build number and id that was passed in from jenkins
version = sys.argv[1]
build_number = sys.argv[2]
build_id = sys.argv[3]
# Grep git commit id
output = subprocess.Popen("git log --name-status HEAD^..HEAD | grep \"commit*\"", stdout=subprocess.PIPE, shell=True)
commit = output.stdout.read()
# Remove all head information, so that only the commit id is left
commit_id = re.sub(r'\(.*?\)','',commit)
commit_id = re.sub("commit","",commit_id)
# Generate the json output text
json_output = json.dumps({ "commit_id" : commit_id.strip(), "build_number" : build_number, "build_id" : build_id }, indent=4)
# Run the typical build for jenkins
subprocess.check_call("mvn clean install", shell=True)
# write the version output file
version_file = open("./target/coverity.hpi.VERSION","w")
version_file.write(json_output)
# move the .hpi file to a versioned file
shutil.move("./target/coverity.hpi", "./target/coverity-{0}.hpi".format(version))
|
Python
| 0.000021
|
@@ -1058,15 +1058,30 @@
%0A%09#
-Grep gi
+git log for the curren
t co
@@ -1087,16 +1087,21 @@
ommit id
+ hash
%0A%09output
@@ -1135,50 +1135,31 @@
g --
-name-status HEAD%5E..HEAD %7C grep %5C%22commit*%5C%22
+pretty=format:'%25H' -n 1
%22, s
@@ -1200,16 +1200,19 @@
%0A%09commit
+_id
= outpu
@@ -1231,160 +1231,8 @@
d()%0A
-%09# Remove all head information, so that only the commit id is left%0A%09commit_id = re.sub(r'%5C(.*?%5C)','',commit)%0A%09commit_id = re.sub(%22commit%22,%22%22,commit_id)%0A
%09# G
|
fe55a3e5ba9f4a368d39fcc3316a471df547d714
|
Bump version to 0.8.1+dev
|
h11/_version.py
|
h11/_version.py
|
# This file must be kept very simple, because it is consumed from several
# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.
# We use a simple scheme:
# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev
# where the +dev versions are never released into the wild, they're just what
# we stick into the VCS in between releases.
#
# This is compatible with PEP 440:
# http://legacy.python.org/dev/peps/pep-0440/
# via the use of the "local suffix" "+dev", which is disallowed on index
# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we
# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*
# 1.0.0.)
__version__ = "0.8.1"
|
Python
| 0
|
@@ -676,10 +676,14 @@
= %220.8.1
++dev
%22%0A
|
14753d1d445f876910ae181530082fe7952a9b10
|
Add some sanity checks to the search adder, and don't hit renamed docs over and over again.
|
regscrape/regs_common/commands/add_to_search.py
|
regscrape/regs_common/commands/add_to_search.py
|
GEVENT = False
from optparse import OptionParser
arg_parser = OptionParser()
arg_parser.add_option("-a", "--agency", dest="agency", action="store", type="string", default=None, help="Specify an agency to which to limit the dump.")
arg_parser.add_option("-d", "--docket", dest="docket", action="store", type="string", default=None, help="Specify a docket to which to limit the dump.")
arg_parser.add_option("-A", "--all", dest="process_all", action="store_true", default=False, help="Replace existing search data with new data.")
from regs_models import *
import urllib2, json, traceback, datetime, zlib, pymongo
import pyes
def run(options, args):
while True:
try:
return add_to_search(options, args)
except (pymongo.errors.OperationFailure, pyes.exceptions.NoServerAvailable):
print "Resetting..."
continue
def add_to_search(options, args):
es = pyes.ES(['localhost:9500'], timeout=30.0)
now = datetime.datetime.now()
query = {'deleted': False, 'scraped': 'yes', '$nor': [{'views.extracted': 'no'},{'attachments.views.extracted':'no'}]}
if options.agency:
query['agency'] = options.agency
if options.docket:
query['docket_id'] = options.docket
if not options.process_all:
query['in_search_index'] = False
for doc in Doc.objects(__raw__=query):
print 'trying', doc.id
if doc.renamed:
print 'renamed', doc.id
continue
# build initial ES document
es_doc = {
'docket_id': doc.docket_id,
'comment_on': doc.comment_on.get('document_id', None) if doc.comment_on else None,
'title': doc.title,
'agency': doc.agency,
'posted_date': doc.details.get('Date_Posted', None),
'document_type': doc.type,
'submitter_organization': doc.details.get('Organization_Name', None),
'submitter_name': ' '.join(filter(bool, [doc.details.get('First_Name', None), doc.details.get('Middle_Initial', None), doc.details.get('Last_Name', None)])),
'submitter_entities': doc.submitter_entities,
'files': []
}
# add views
for view in doc.views:
if not view.content:
continue
es_doc['files'].append({
"title": None,
"abstract": None,
"object_id": doc.object_id,
"file_type": view.type,
"view_type": "document_view",
"text": view.as_text(),
"entities": view.entities
})
# add attachments
for attachment in doc.attachments:
for view in attachment.views:
if not view.content:
continue
es_doc['files'].append({
"title": attachment.title,
"abstract": attachment.abstract,
"object_id": attachment.object_id,
"file_type": view.type,
"view_type": "attachment_view",
"text": view.as_text(),
"entities": view.entities
})
# save to es
es_status = es.index(es_doc, 'regulations', 'document', id=str(doc.id))
print 'saved %s to ES as %s' % (doc.id, es_status['_id'])
# update main mongo doc
doc.in_search_index = True
# save back to Mongo
doc.save()
print "saved %s back to mongo" % doc.id
|
Python
| 0
|
@@ -1436,32 +1436,94 @@
enamed', doc.id%0A
+ doc.in_search_index = True%0A doc.save()%0A
cont
@@ -2260,16 +2260,55 @@
dd views
+ (max of 5 to avoid pathological cases)
%0A
@@ -2325,24 +2325,28 @@
in doc.views
+%5B:5%5D
:%0A
@@ -2748,16 +2748,56 @@
achments
+ (max of 10 to avoid pathological cases)
%0A
@@ -2830,16 +2830,21 @@
achments
+%5B:10%5D
:%0A
@@ -2877,16 +2877,20 @@
nt.views
+%5B:5%5D
:%0A
|
f4d09f956a2408d38ba0bd8a108a6f27a9fc6639
|
Remove special support for celery from sitecustomize (#799)
|
opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
|
opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from logging import getLogger
from os import environ, path
from os.path import abspath, dirname, pathsep
from re import sub
from pkg_resources import iter_entry_points
from opentelemetry.instrumentation.dependencies import (
get_dist_dependency_conflicts,
)
from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro
from opentelemetry.instrumentation.environment_variables import (
OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,
)
logger = getLogger(__name__)
def _load_distros() -> BaseDistro:
for entry_point in iter_entry_points("opentelemetry_distro"):
try:
distro = entry_point.load()()
if not isinstance(distro, BaseDistro):
logger.debug(
"%s is not an OpenTelemetry Distro. Skipping",
entry_point.name,
)
continue
logger.debug(
"Distribution %s will be configured", entry_point.name
)
return distro
except Exception as exc: # pylint: disable=broad-except
logger.exception(
"Distribution %s configuration failed", entry_point.name
)
raise exc
return DefaultDistro()
def _load_instrumentors(distro):
package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])
if isinstance(package_to_exclude, str):
package_to_exclude = package_to_exclude.split(",")
# to handle users entering "requests , flask" or "requests, flask" with spaces
package_to_exclude = [x.strip() for x in package_to_exclude]
for entry_point in iter_entry_points("opentelemetry_pre_instrument"):
entry_point.load()()
for entry_point in iter_entry_points("opentelemetry_instrumentor"):
if entry_point.name in package_to_exclude:
logger.debug(
"Instrumentation skipped for library %s", entry_point.name
)
continue
try:
conflict = get_dist_dependency_conflicts(entry_point.dist)
if conflict:
logger.debug(
"Skipping instrumentation %s: %s",
entry_point.name,
conflict,
)
continue
# tell instrumentation to not run dep checks again as we already did it above
distro.load_instrumentor(entry_point, skip_dep_check=True)
logger.debug("Instrumented %s", entry_point.name)
except Exception as exc: # pylint: disable=broad-except
logger.exception("Instrumenting of %s failed", entry_point.name)
raise exc
for entry_point in iter_entry_points("opentelemetry_post_instrument"):
entry_point.load()()
def _load_configurators():
configured = None
for entry_point in iter_entry_points("opentelemetry_configurator"):
if configured is not None:
logger.warning(
"Configuration of %s not loaded, %s already loaded",
entry_point.name,
configured,
)
continue
try:
entry_point.load()().configure() # type: ignore
configured = entry_point.name
except Exception as exc: # pylint: disable=broad-except
logger.exception("Configuration of %s failed", entry_point.name)
raise exc
def initialize():
try:
distro = _load_distros()
distro.configure()
_load_configurators()
_load_instrumentors(distro)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to auto initialize opentelemetry")
finally:
environ["PYTHONPATH"] = sub(
fr"{dirname(abspath(__file__))}{pathsep}?",
"",
environ["PYTHONPATH"],
)
if (
hasattr(sys, "argv")
and sys.argv[0].split(path.sep)[-1] == "celery"
and "worker" in sys.argv[1:]
):
from celery.signals import worker_process_init # pylint:disable=E0401
@worker_process_init.connect(weak=False)
def init_celery(*args, **kwargs):
initialize()
else:
initialize()
|
Python
| 0
|
@@ -582,19 +582,8 @@
e.%0A%0A
-import sys%0A
from
@@ -630,22 +630,16 @@
environ
-, path
%0Afrom os
@@ -4426,318 +4426,8 @@
)%0A%0A%0A
-if (%0A hasattr(sys, %22argv%22)%0A and sys.argv%5B0%5D.split(path.sep)%5B-1%5D == %22celery%22%0A and %22worker%22 in sys.argv%5B1:%5D%0A):%0A from celery.signals import worker_process_init # pylint:disable=E0401%0A%0A @worker_process_init.connect(weak=False)%0A def init_celery(*args, **kwargs):%0A initialize()%0A%0A%0Aelse:%0A
init
|
0e3e9028598c8ebe49c3aec98fbfb584e8f5223b
|
Check both category_links and resource_link
|
myuw/management/commands/check_reslinks.py
|
myuw/management/commands/check_reslinks.py
|
"""
Test all the links in the CSV for non-200 status codes (after redirects).
"""
import logging
import sys
import urllib3
from django.core.mail import send_mail
from django.core.management.base import BaseCommand, CommandError
from myuw.dao.category_links import Res_Links
from myuw.util.settings import get_cronjob_recipient, get_cronjob_sender
# Disable SSL warnings
urllib3.disable_warnings()
# Need limit of 1, otherwise sdb gives us a 403
http = urllib3.PoolManager(1, timeout=8)
# Need to override UA for some links, e.g. LinkedIn
ua = 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
messages = []
links = Res_Links.get_all_links()
for link in links:
if link.url.startswith("https://sdb."):
continue
status = get_http_status(link.url, messages)
if status not in [200]:
msg = "{}, {}, URL: {} =status=> {}\n\n".format(
link.title, make_campus_human_readable(link.campus),
link.url, status)
logger.error(msg)
messages.append(msg)
if len(messages):
send_mail("Check Cetegory Links Cron",
"\n".join(messages),
"{}@uw.edu".format(get_cronjob_sender()),
["{}@uw.edu".format(get_cronjob_recipient())])
def get_http_status(url, messages):
"""
Given a url, get the HTTP status code or a human-readable exception.
"""
try:
result = http.request(
'GET',
url,
headers={'User-Agent': ua},
retries=urllib3.Retry(redirect=3, connect=2, read=2)
)
return result.status
except Exception as ex:
messages.append(str(ex))
def make_campus_human_readable(campus):
if campus is None:
return 'All Campuses'
else:
# Capitalize first letter
return campus[0:1].upper() + campus[1:]
|
Python
| 0.000001
|
@@ -267,17 +267,33 @@
es_Links
+, Resource_Links
%0A
-
from myu
@@ -804,16 +804,510 @@
+messages.append(Res_Links.csv_filename)%0A verify_links(links, messages)%0A%0A links = Resource_Links.get_all_links()%0A messages.append(%22%5Cn%5Cn%7B%7D%22.format(Resource_Links.csv_filename))%0A verify_links(links, messages)%0A send_mail(%22Check Cetegory and Resource Links%22,%0A %22%5Cn%22.join(messages),%0A %22%7B%7D@uw.edu%22.format(get_cronjob_sender()),%0A %5B%22%7B%7D@uw.edu%22.format(get_cronjob_recipient())%5D)%0A%0A%0Adef verify_links(links, messages):%0A
for link
@@ -1317,20 +1317,16 @@
links:%0A
-
@@ -1377,20 +1377,16 @@
-
continue
@@ -1394,20 +1394,16 @@
-
-
status =
@@ -1447,20 +1447,16 @@
-
if statu
@@ -1487,60 +1487,35 @@
-
-
msg =
-%22%7B%7D, %7B%7D, URL: %7B%7D =status=%3E %7B%7D%5Cn%5Cn%22.format(
+%7B%22title%22: link.title,
%0A
@@ -1530,28 +1530,25 @@
- link.title,
+%22campus%22:
make_ca
@@ -1596,24 +1596,30 @@
+%22url%22:
link.url, s
@@ -1620,16 +1620,8 @@
url,
- status)
%0A
@@ -1637,314 +1637,107 @@
-logger.error(msg)%0A messages.append(msg)%0A if len(messages):%0A send_mail(%22Check Cetegory Links Cron%22,%0A %22%5Cn%22.join(messages),%0A %22%7B%7D@uw.edu%22.format(get_cronjob_sender()),%0A %5B%22%7B%7D@uw.edu%22.format(get_cronjob_recipient())%5D
+ %22status%22: status%7D%0A logger.error(msg)%0A messages.append(%22%7B%7D%5Cn%5Cn%22.format(msg)
)%0A%0A%0A
@@ -2081,16 +2081,16 @@
.status%0A
-
exce
@@ -2109,16 +2109,41 @@
as ex:%0A
+ logger.error(ex)%0A
|
d242870d99634edc4b077b045fe5489039a3c821
|
add shebang to targetselection script
|
bin/targetselection.py
|
bin/targetselection.py
|
import numpy
from desitarget.io import read_tractor, write_targets
from desitarget.cuts import LRG, ELG, BGS, QSO
from desitarget import targetmask
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument("--type", choices=["tractor"], default="tractor", help="Assume a type for src files")
ap.add_argument("src", help="File that stores Candidates/Objects")
ap.add_argument("dest", help="File that stores targets")
TYPES = {
'LRG': LRG,
'ELG': ELG,
'BGS': BGS,
'QSO': QSO,
}
def main():
ns = ap.parse_args()
candidates = read_tractor(ns.src)
# FIXME: fits doesn't like u8; there must be a workaround
# but lets stick with i8 for now.
tsbits = numpy.zeros(len(candidates), dtype='i8')
for t in TYPES.keys():
cut = TYPES[t]
bitfield = targetmask.mask(t)
with numpy.errstate(all='ignore'):
mask = cut.apply(candidates)
tsbits[mask] |= bitfield
assert ((tsbits & bitfield) != 0).sum() == mask.sum()
print (t, 'selected', mask.sum())
write_targets(ns.dest, candidates, tsbits)
print ('written to', ns.dest)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -1,12 +1,35 @@
+#!/usr/bin/env python%0A%0A
import numpy
|
22a76a55c373af8f64717b10542b7230e63e9583
|
update person caches to 1hr
|
sis_provisioner/cache.py
|
sis_provisioner/cache.py
|
from restclients.cache_implementation import TimedCache
import re
class RestClientsCache(TimedCache):
""" A custom cache implementation for Canvas """
url_policies = {}
url_policies["sws"] = (
(re.compile(r"^/student/v5/term/"), 60 * 60 * 10),
(re.compile(r"^/student/v5/course/"), 60 * 5),
)
url_policies["pws"] = (
(re.compile(r"^/identity/v1/person/"), 60 * 60 * 10),
(re.compile(r"^/identity/v1/entity/"), 60 * 60 * 10),
)
url_policies["canvas"] = (
(re.compile(r"^/api/v1/accounts/\d+/roles"), 60 * 60 * 4),
)
def _get_cache_policy(self, service, url):
for policy in RestClientsCache.url_policies.get(service, []):
if policy[0].match(url):
return policy[1]
return 0
def getCache(self, service, url, headers):
cache_policy = self._get_cache_policy(service, url)
return self._response_from_cache(service, url, headers, cache_policy)
def processResponse(self, service, url, response):
if self._get_cache_policy(service, url):
return self._process_response(service, url, response)
|
Python
| 0
|
@@ -395,37 +395,32 @@
rson/%22), 60 * 60
- * 10
),%0A (re.c
@@ -456,29 +456,24 @@
/%22), 60 * 60
- * 10
),%0A )%0A
|
f409fb8c7243558ab6ad956904f5ab40e18c2734
|
Add flask management command for printing config values
|
manage.py
|
manage.py
|
""" Defines a series of scripts for running server and maintenance
FLASK_APP=manage.py flask --help
"""
import os
import click
import redis
import alembic.config
from flask_migrate import Migrate
from portal.factories.app import create_app
from portal.extensions import db
from portal.models.i18n import smartling_upload, smartling_download
from portal.models.fhir import add_static_concepts
from portal.models.intervention import add_static_interventions
from portal.models.organization import add_static_organization
from portal.models.relationship import add_static_relationships
from portal.models.role import add_static_roles
from portal.models.user import permanently_delete_user, flag_test
from portal.config.site_persistence import SitePersistence
app = create_app()
MIGRATIONS_DIR = os.path.join(app.root_path, 'migrations')
migrate = Migrate(app, db, directory=MIGRATIONS_DIR)
@app.cli.command()
def runserver():
# Todo: figure out how to override default host in `flask run`
# http://click.pocoo.org/5/commands/#overriding-defaults
app.run(
host='0.0.0.0',
threaded=True,
use_debugger=True,
use_reloader=True,
)
def _run_alembic_command(args):
"""Helper to manage working directory and run given alembic commands"""
# Alembic looks for the alembic.ini file in CWD
# hop over there and then return to CWD
cwd = os.getcwd()
os.chdir(MIGRATIONS_DIR)
alembic.config.main(argv=args)
os.chdir(cwd) # restore cwd
def stamp_db():
"""Run the alembic command to stamp the db with the current head"""
# if the alembic_version table exists, this db has been stamped,
# don't update to head, as it would potentially skip steps.
if db.engine.dialect.has_table(db.engine.connect(), 'alembic_version'):
return
_run_alembic_command(['--raiseerr', 'stamp', 'head'])
def upgrade_db():
"""Run any outstanding migration scripts"""
_run_alembic_command(['--raiseerr', 'upgrade', 'head'])
def flush_cache():
"""Flush redis of all values. Cached values may not longer correspond with new DB entries"""
r = redis.from_url(app.config['REDIS_URL'])
r.flushdb()
@app.cli.command()
def sync():
"""Synchronize database with latest schema and persistence data.
Idempotent function takes necessary steps to build tables, migrate
schema and run `seed`. Safe to run on existing or brand new databases.
To re/create the database, [delete and] create within the DBMS itself,
then invoke this function.
"""
if not db.engine.dialect.has_table(db.engine.connect(), 'alembic_version'):
db.create_all()
stamp_db()
flush_cache()
upgrade_db()
seed()
@click.option('--keep_unmentioned', '-k', default=False, help='Keep orgs and interventions not mentioned in persistence file')
@app.cli.command(name="seed")
def seed_command(keep_unmentioned):
seed(keep_unmentioned)
def seed(keep_unmentioned=False):
"""Seed database with required data"""
# Request context necessary for generating data from own HTTP APIs
with app.test_request_context():
add_static_concepts()
add_static_interventions()
add_static_organization()
add_static_relationships()
add_static_roles()
db.session.commit()
# import site export file if found
SitePersistence().import_(keep_unmentioned=keep_unmentioned)
@click.option('--dir', '-d', default=None, help="Export directory")
@app.cli.command(name="export_site")
def export_command(dir):
export_site(dir)
def export_site(dir):
"""Generate JSON file containing dynamic site config
:param dir: used to name a non-default target directory for export files
Portions of site configuration live in the database, such as
Organizations and Access Strategies. Generate a single export
file for migration of this data to other instances of the service.
NB the seed command imports the data file if found, along with
other static data.
"""
SitePersistence().export(dir)
@click.option('--email', '-e', help='Email of user to purge.')
@click.option(
'--actor', '-a',
help='Email of user to act as.',
prompt= \
"\n\nWARNING!!!\n\n"
" This will permanently delete the target user and all their related data.\n"
" If you want to contiue,"
" enter a valid user email as the acting party for our records"
)
@app.cli.command()
def purge_user(email, actor):
"""Purge the given user from the system"""
# import ipdb; ipdb.set_trace()
permanently_delete_user(email, actor=actor)
@app.cli.command()
def mark_test():
"""Designate all current users as test users"""
flag_test()
@app.cli.command()
def translation_upload():
"""Update .pot file on Smartling
Creates a new .pot file, updates the file with relevant DB entries, then
POSTs said .pot file to Smartling via their API
"""
smartling_upload()
@click.option('--language', '-l', help='language code (e.g. en_US).')
@click.option('--state', '-s', help='Translation state', type=click.Choice([
'pseudo',
'pending',
'published',
'contextMatchingInstrumented',
]))
@app.cli.command()
def translation_download(language, state):
"""Download .po file(s) from Smartling
GETs the .po file for the specified language from Smartling via their API.
If no language is specified, all available translations will be downloaded.
After download, .po file(s) are compiled into .mo file(s) using pybabel
"""
default_state = 'pending'
if app.config['SYSTEM_TYPE'].lower() == 'production':
default_state = 'published'
state = state or default_state
click.echo('Downloading {state} translations from Smartling'.format(state=state))
smartling_download(state=state, language=language)
|
Python
| 0
|
@@ -122,16 +122,28 @@
t click%0A
+import json%0A
import r
@@ -5848,8 +5848,429 @@
nguage)%0A
+%0A@click.option('--config_key', '-c', help='Return only a single config value')%0A@app.cli.command()%0Adef config(config_key):%0A %22%22%22List current flask configuration values in JSON%22%22%22%0A%0A if config_key:%0A print(app.config.get(config_key, ''))%0A return%0A%0A print(json.dumps(%0A # Skip un-serializable values%0A %7Bk:v for k,v in app.config.items() if isinstance(v, basestring)%7D,%0A indent=2,%0A ))%0A
|
909b14f4d4b82c72e8f3987c1ef82cc075520cb1
|
add @
|
botpi.py
|
botpi.py
|
import re
re_pi = re.compile(r'(?:^|\s)(((P|p)i)|(π)|(3(\.|,)14\d*))').search
message = "{name}, eigentlich is π ja 3.1415926535897932384626433832795028841971693\
9937510582097494... aber rechne lieber mit 3, das ist wesentlich einfacher!"
def check_pi(bot_stuff):
match = re_pi(bot_stuff['message'])
print(match)
if match is not None:
bot_stuff['sendMessage'](chat_id=bot_stuff['chat_id'],
text=message.format(
name=bot_stuff['username']))
|
Python
| 0
|
@@ -84,16 +84,17 @@
sage = %22
+@
%7Bname%7D,
|
d43fecb6f645dabf1740cd42aaf25191353a1b77
|
add curriculum to cache policy
|
sis_provisioner/cache.py
|
sis_provisioner/cache.py
|
from django.conf import settings
from memcached_clients import RestclientPymemcacheClient
from uw_kws import ENCRYPTION_KEY_URL, ENCRYPTION_CURRENT_KEY_URL
import re
ONE_MINUTE = 60
ONE_HOUR = 60 * 60
ONE_DAY = 60 * 60 * 24
ONE_WEEK = 60 * 60 * 24 * 7
ONE_MONTH = 60 * 60 * 24 * 30
NONPERSONAL_NETID_EXCEPTION_GROUP = getattr(
settings, 'NONPERSONAL_NETID_EXCEPTION_GROUP', 'none')
class RestClientsCache(RestclientPymemcacheClient):
def get_cache_expiration_time(self, service, url, status=200):
if 'sws' == service:
if re.match(r'^/student/v\d/course/', url):
return ONE_MINUTE * 5
if re.match(r'^/student/v\d/(?:campus|college|department|term)',
url):
return ONE_HOUR * 10
if 'pws' == service:
return ONE_HOUR
if 'kws' == service:
if re.match(r'{}'.format(
ENCRYPTION_KEY_URL.format(r'[\-\da-fA-F]{36}')), url):
return ONE_MONTH
if re.match(r'{}'.format(
ENCRYPTION_CURRENT_KEY_URL.format(r'[\-\da-zA-Z]+')), url):
return ONE_WEEK
if 'gws' == service:
if re.match(r'^/group_sws/v\d/group/{}/effective_member/'.format(
NONPERSONAL_NETID_EXCEPTION_GROUP), url):
return ONE_HOUR
if 'canvas' == service:
if re.match(r'^/api/v\d/accounts/sis_account_id:', url):
return ONE_HOUR * 10
if re.match(r'^/api/v\d/accounts/\d+/roles', url):
return ONE_MONTH
def delete_cached_kws_current_key(self, resource_type):
self.deleteCache('kws', ENCRYPTION_CURRENT_KEY_URL.format(
resource_type))
|
Python
| 0
|
@@ -640,32 +640,49 @@
if re.match(
+%0A
r'%5E/student/v%5Cd/
@@ -714,16 +714,27 @@
ent%7C
+curriculum%7C
term)',%0A
@@ -729,20 +729,16 @@
term)',%0A
-
@@ -782,25 +782,19 @@
urn ONE_
-HOUR * 10
+DAY
%0A%0A
|
9bb76df67c436d091d85d75c6968ede89d9194b7
|
add testing framework
|
statistics/test.py
|
statistics/test.py
|
from numberOfPublicMethods import *
def headersIsClass():
return headers()[0] == "class.h"
def propertiesIs4():
return parseFile("class.h") == ObjectiveCClass(publicMethods=4, className="TestClass")
if __name__ == "__main__":
print(headersIsClass())
print(propertiesIs4())
print(calculateAveragePublic([ObjectiveCClass(publicMethods=5, className="")]) == 5)
|
Python
| 0.000001
|
@@ -1,8 +1,24 @@
+import unittest%0A
from num
@@ -46,20 +46,495 @@
port *%0A%0A
-def
+class Tests(unittest.TestCase):%0A%0A @staticmethod%0A def listOfOCClasses():%0A return %5B%0A ObjectiveCClass(publicMethods=4, className=%22TestClass4%22),%0A ObjectiveCClass(publicMethods=5, className=%22TestClass5%22),%0A ObjectiveCClass(publicMethods=2, className=%22TestClass2%22),%0A ObjectiveCClass(publicMethods=1, className=%22TestClass1%22),%0A ObjectiveCClass(publicMethods=4, className=%22TestClass3%22)%5D%0A%0A def test_
headersI
@@ -540,30 +540,48 @@
IsClass(
+self
):%0A
-return
+ self.assertEqual(
headers(
@@ -584,19 +584,17 @@
ers()%5B0%5D
- ==
+,
%22class.
@@ -599,14 +599,24 @@
s.h%22
+)
%0A%0A
-def
+ def test_
prop
@@ -629,22 +629,40 @@
Is4(
+self
):%0A
-return
+ self.assertEqual(
pars
@@ -677,19 +677,17 @@
lass.h%22)
- ==
+,
Objecti
@@ -738,178 +738,376 @@
ss%22)
+)
%0A%0A
-if __name__ == %22__main__%22:%0A print(headersIsClass())%0A print(propertiesIs4())%0A print(calculateAveragePublic(%5BObjectiveCClass(publicMethods=5, className=%22%22)%5D) == 5
+%0A def test_average(self):%0A self.assertEqual(calculateAveragePublic(%5BObjectiveCClass(publicMethods=5, className=%22%22)%5D), 5)%0A%0A def test_findWorst(self):%0A classes = Tests.listOfOCClasses()%0A self.assertEqual(findMostPublic(classes, 1), %5BObjectiveCClass(publicMethods=5, className=%22TestClass5%22)%5D)%0A%0A%0A%0A%0Aif __name__ == %22__main__%22:%0A unittest.main(
)%0A%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.