commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
ffa85f74995be56942cbc88208187c1d5a4b6bce
|
docstring was wrong
|
fragments/apply.py
|
fragments/apply.py
|
import os
import argparse
import pdb
from .precisecodevillemerge import Weave
from .config import FragmentsConfig
from .diff import _diff_group, _split_diff
from . import Prompt
def apply(*args):
"""Revert changes to fragments repository"""
parser = argparse.ArgumentParser(prog="%s apply" % __package__, description="Apply changes in FILENAME that were made since last commit to as many other followed files as possible.")
parser.add_argument('FILENAME', help="file containing changes to be applied")
parser.add_argument('-U', '--unified', type=int, dest="NUM", default=3, action="store", help="number of lines of context to show")
group = parser.add_mutually_exclusive_group()
group.add_argument("-i", "--interactive", action="store_true" , default=True , dest="interactive", help="interactively select changes to apply")
group.add_argument("-a", "--automatic" , action="store_false", default=False, dest="interactive", help="automatically apply all changes")
args = parser.parse_args(args)
config = FragmentsConfig()
weave = Weave()
changed_path = os.path.realpath(args.FILENAME)
changed_key = changed_path[len(config.root)+1:]
if changed_key not in config['files']:
yield "Could not apply changes in %r, it is not being followed" % changed_key
return
elif not os.access(changed_path, os.R_OK|os.W_OK):
yield "Could not apply changes in %r, it no longer exists on disk" % changed_key
return
changed_uuid = config['files'][changed_key]
old_path = os.path.join(config.directory, changed_uuid)
if not os.access(old_path, os.R_OK|os.W_OK):
yield "Could not apply changes in %r, it has never been committed" % changed_key
return
old_revision = 1
weave.add_revision(old_revision, file(old_path, 'r').readlines(), [])
new_revision = 2
weave.add_revision(new_revision, file(changed_path, 'r').readlines(), [])
changes_to_apply = []
diff = weave.merge(old_revision, new_revision)
display_groups = _split_diff(diff, context_lines=args.NUM)
i = 0
old_line = 0 # not sure I need to be keeping track of these
new_line = 0
while i < len(diff):
line_or_tuple = diff[i]
if isinstance(line_or_tuple, tuple):
display_group = next(display_groups)
for dl in _diff_group(display_group): # show the group
yield dl
if args.interactive:
response = (yield Prompt("Apply this change? y/n"))
if response.lower().startswith('y'):
apply_change = True
elif response.lower().startswith('n'):
apply_change = False
else:
apply_change = True
while isinstance(display_group[0][-1], basestring):
display_group.pop(0) # preceeding context lines have already been added to the changes to apply
for display_line_or_tuple in display_group:
if isinstance(display_line_or_tuple[-1], tuple):
old, new = display_line_or_tuple[-1]
old_line += len(old)
new_line += len(new)
i += 1
if apply_change:
changes_to_apply.extend(new)
else:
changes_to_apply.extend(old)
else:
old_line += 1
new_line += 1
i += 1
changes_to_apply.append(display_line_or_tuple[-1])
else:
old_line += 1
new_line += 1
i += 1
changes_to_apply.append(line_or_tuple)
changed_revision = 3
weave.add_revision(changed_revision, changes_to_apply, [1])
current_revision = changed_revision
for other_key in config['files']:
other_path = os.path.join(config.root, other_key)
if other_path == changed_path:
continue # don't try to apply changes to ourself
current_revision += 1
weave.add_revision(current_revision, file(other_path, 'r').readlines(), [])
merge_result = weave.cherry_pick(changed_revision, current_revision) # Can I apply changes in changed_revision onto this other file?
if tuple in (type(mr) for mr in merge_result):
if len(merge_result) == 1 and isinstance(merge_result[0], tuple):
# total conflict, skip
yield "Changes in %r cannot apply to %r, skipping" % (changed_key, other_key)
continue
other_file = file(other_path, 'w')
for line_or_conflict in merge_result:
if isinstance(line_or_conflict, basestring):
other_file.write(line_or_conflict)
else:
other_file.write('>'*7 + '\n')
for line in line_or_conflict[0]:
other_file.write(line)
other_file.write('='*7 + '\n')
for line in line_or_conflict[1]:
other_file.write(line)
other_file.write('>'*7 + '\n')
other_file.close()
yield "Conflict merging %r => %r" % (changed_key, other_key)
else:
# Merge is clean:
other_file = file(other_path, 'w')
other_file.writelines(merge_result)
other_file.close()
yield "Changes in %r applied cleanly to %r" % (changed_key, other_key)
|
Python
| 0.999119
|
@@ -203,14 +203,13 @@
%22%22%22
-Revert
+Apply
cha
@@ -217,31 +217,97 @@
ges
-to fragments repository
+in one file that were made since last commit to as many other followed files as possible.
%22%22%22%0A
|
acd5c59f4de3b5040e7d3f787bd4f4c96610ae7b
|
add gpu memory config
|
movielens_vae_test.py
|
movielens_vae_test.py
|
import itertools
import os
import numpy as np
import tensorflow as tf
from sklearn.model_selection import KFold
from matrix_vae import VAEMF
# dataset = "ml-100k"
dataset = "ml-1m"
if dataset == "ml-100k":
# 100k dataset
num_user = 943
num_item = 1682
else:
# 1M dataset
num_user = 6040
num_item = 3952
hidden_encoder_dim = 216
hidden_decoder_dim = 216
latent_dim = 24
output_dim = 24
learning_rate = 0.002
batch_size = 64
reg_param = 1e-10
n_steps = 1000
hedims = [500]
hddims = [500]
ldims = [100]
odims = [500]
lrates = [0.001]
bsizes = [512]
regs = [0, 1e-10, 1e-7, 1e-5]
vaes = [True]
def read_dataset():
M = np.zeros([num_user, num_item])
if dataset == "ml-100k":
path ="./data/ml-100k/u.data"
delim = "\t"
else:
path = "./data/ml-1m/ratings.dat"
delim = "::"
with open(path, 'r') as f:
for line in f.readlines():
tokens = line.split(delim)
user_id = int(tokens[0]) - 1 # 0 base index
item_id = int(tokens[1]) - 1
rating = int(tokens[2])
M[user_id, item_id] = rating
return M
def cross_validation():
M = read_dataset()
n_fold = 10
rating_idx = np.array(M.nonzero()).T
kf = KFold(n_splits=n_fold, random_state=0)
with tf.Session() as sess:
model = VAEMF(sess, num_user, num_item,
hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param)
for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
n_fold, train_idx.size, test_idx.size))
model.train(M, train_idx=train_idx,
test_idx=test_idx, n_steps=n_steps)
def train_test_validation():
M = read_dataset()
num_rating = np.count_nonzero(M)
idx = np.arange(num_rating)
np.random.seed(1)
np.random.shuffle(idx)
train_idx = idx[:int(0.85 * num_rating)]
valid_idx = idx[int(0.85 * num_rating):int(0.90 * num_rating)]
test_idx = idx[int(0.90 * num_rating):]
for hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, vae in itertools.product(hedims, hddims, ldims, odims, lrates, bsizes, regs, vaes):
result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}".format(
hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, vae)
if not os.path.exists(result_path + "/model.ckpt.index"):
with tf.Session() as sess:
model = VAEMF(sess, num_user, num_item,
hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, vae=vae)
print("Train size={0}, Validation size={1}, Test size={2}".format(
train_idx.size, valid_idx.size, test_idx.size))
best_mse, best_mae = model.train_test_validation(
M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)
print("Best MSE = {0}, best MAE = {1}".format(
best_mse, best_mae))
with open('result.csv', 'a') as f:
f.write("{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}\n".format(hidden_encoder_dim, hidden_decoder_dim,
latent_dim, output_dim, learning_rate, batch_size, reg_param, vae, best_mse, best_mae))
tf.reset_default_graph()
if __name__ == '__main__':
train_test_validation()
# cross_validation()
|
Python
| 0.000001
|
@@ -2757,16 +2757,103 @@
ndex%22):%0A
+ config = tf.ConfigProto()%0A config.gpu_options.allow_growth=True%0A
@@ -2864,32 +2864,45 @@
with tf.Session(
+config=config
) as sess:%0A
|
413057374d55d851fa4717a66a0975f29b131f4f
|
Fix bytes output
|
cli.py
|
cli.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 13:58:24 2015
@author: mdurant
"""
import argparse
import hdfs
import sys
import inspect
parser = argparse.ArgumentParser(description='HDFS commands')
parser.add_argument("command", help="filesystem command to run")
parser.add_argument("par1", help="filesystem command to run", nargs="?", default=None)
parser.add_argument("par2", help="filesystem command to run", nargs="?", default=None)
parser.add_argument('--port', type=int,
help='Name node port')
parser.add_argument('--host', type=str,
help='Name node address')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity')
args = parser.parse_args()
par1, par2 = args.par1, args.par2
if args.verbose > 0:
print(args)
commands = ['ls', 'cat', 'info', 'mkdir', 'rmdir', 'rm', 'mv', 'exists',
'chmod', 'chmown', 'set_replication', 'get_block_locations',
'to_local', 'to_hdfs']
if __name__ == "__main__":
if args.command not in commands:
print("Available commands:", list(sorted(commands)))
sys.exit(1)
kwargs = {}
if args.host:
kwargs['host'] = args.host
if args.port:
kwargs['port'] = args.port
fs = hdfs.HDFileSystem(**kwargs)
cmd = getattr(fs, args.command)
nargs = len(inspect.getargspec(cmd).args) - 1
args = (par1, par2)[:nargs]
out = cmd(*args)
if isinstance(out, list):
for l in out:
print(l)
elif out is not None:
print(out)
|
Python
| 0
|
@@ -1516,16 +1516,77 @@
rint(l)%0A
+ elif hasattr(out, 'decode'):%0A print(out.decode())%0A
elif
|
a55a7162de4e237b4079c0517367ef23b7aa8b01
|
PodcastList.by_rating() should return a list
|
mygpo/share/models.py
|
mygpo/share/models.py
|
from random import random
from couchdbkit.ext.django.schema import *
from django.template.defaultfilters import slugify
from mygpo.core.proxy import DocumentABCMeta
from mygpo.users.models import RatingMixin
from mygpo.cache import cache_result
class PodcastList(Document, RatingMixin):
""" A list of Podcasts that a user creates for the purpose of sharing """
__metaclass__ = DocumentABCMeta
title = StringProperty(required=True)
slug = StringProperty(required=True)
podcasts = StringListProperty()
user = StringProperty(required=True)
random_key = FloatProperty(default=random)
@classmethod
def for_user_slug(cls, user_id, slug):
r = cls.view('podcastlists/by_user_slug',
key = [user_id, slug],
include_docs = True,
)
return r.first() if r else None
@classmethod
def for_user(cls, user_id):
r = cls.view('podcastlists/by_user_slug',
startkey = [user_id, None],
endkey = [user_id, {}],
include_docs = True,
)
return list(r)
@classmethod
@cache_result(timeout=60*69)
def by_rating(cls, **kwargs):
r = cls.view('podcastlists/by_rating',
descending = True,
include_docs = True,
stale = 'update_after',
**kwargs
)
return r.iterator()
@classmethod
@cache_result(timeout=60*60)
def count(cls, with_rating=True):
view = 'podcastlists/by_rating' if with_rating else \
'podcastlists/by_user_slug'
return cls.view(view,
limit = 0,
stale = 'update_after',
).total_rows
@classmethod
def random(cls, chunk_size=1):
while True:
rnd = random()
res = cls.view('podcastlists/random',
startkey = rnd,
include_docs = True,
limit = chunk_size,
stale = 'ok',
)
if not res:
break
for r in res:
yield r
def __repr__(self):
return '<{cls} "{title}" by {user}>'.format(
cls=self.__class__.__name__, title=self.title, user=self.user)
|
Python
| 0.999999
|
@@ -1447,19 +1447,14 @@
urn
-r.iterator(
+list(r
)%0A%0A%0A
|
72fa091716e1e0d40a8219701da94bee6d49c58b
|
remove debugging
|
csw.py
|
csw.py
|
#!/usr/bin/python -u
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# $Id$
#
# Authors: Tom Kralidis <tomkralidis@hotmail.com>
#
# Copyright (c) 2010 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# CGI wrapper for pycsw
import cgitb
cgitb.enable()
import os, sys
from StringIO import StringIO
from server import server
CONFIG = 'default.cfg'
GZIP = False
if os.environ.has_key('PYCSW_CONFIG'):
CONFIG = os.environ['PYCSW_CONFIG']
if os.environ['QUERY_STRING'].lower().find('config') != -1:
for kvp in os.environ['QUERY_STRING'].split('&'):
if kvp.lower().find('config') != -1:
CONFIG = kvp.split('=')[1]
if (os.environ.has_key('HTTP_ACCEPT_ENCODING') and
os.environ['HTTP_ACCEPT_ENCODING'].find('gzip') != -1):
# set for gzip compressed response
GZIP = True
# get runtime configuration
CSW = server.Csw(CONFIG)
# set compression level
if CSW.config.has_option('server', 'gzip_compresslevel'):
GZIP_COMPRESSLEVEL = \
int(CSW.config.get('server', 'gzip_compresslevel'))
else:
GZIP_COMPRESSLEVEL = 0
# go!
OUTP = CSW.dispatch_cgi()
sys.stdout.write("Content-Type:%s\r\n" % CSW.contenttype)
if GZIP and GZIP_COMPRESSLEVEL > 0:
import gzip
BUF = StringIO()
GZIPFILE = gzip.GzipFile(mode='wb', fileobj=BUF,
compresslevel=GZIP_COMPRESSLEVEL)
GZIPFILE.write(OUTP)
GZIPFILE.close()
OUTP = BUF.getvalue()
sys.stdout.write('Content-Encoding: gzip\r\n')
sys.stdout.write('Content-Length: %d\r\n' % len(OUTP))
sys.stdout.write('\r\n')
sys.stdout.write(OUTP)
|
Python
| 0.000065
|
@@ -1372,37 +1372,8 @@
sw%0A%0A
-import cgitb%0Acgitb.enable()%0A%0A
impo
|
76ade2292a6a8e57ebf9e3990d3b50cfceebc4ea
|
Change instance status line width
|
jungle/ec2.py
|
jungle/ec2.py
|
# -*- coding: utf-8 -*-
import subprocess
import sys
import boto3
import botocore
import click
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<10}{2:<13}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
def cli():
"""EC2 CLI group"""
pass
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
def ls(name, list_formatted):
"""List EC2 instances"""
ec2 = boto3.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
def up(instance_id):
"""Start EC2 instance"""
ec2 = boto3.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
def down(instance_id):
"""Stop EC2 instance"""
ec2 = boto3.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(instance_id, instance_name, username, key_file, port, gateway_instance_id):
"""Create SSH Login command string"""
ec2 = boto3.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = instance.public_ip_address
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
condition = {'Name': 'tag:Name', 'Values': [instance_name]}
instances = ec2.instances.filter(Filters=[condition])
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}'.format(
idx, i.id, i.private_ip_address, i.state['Name'], tag_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
# TODO: add validation for if selected_idx exceeds length of target_instances
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
if instance.public_ip_address is not None:
hostname = instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
hostname = instance.private_ip_address
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt ubuntu@{0} -i {1} -p {2} ssh {3}@{4}'.format(
gateway_public_ip, key_file, port, username, hostname)
else:
cmd = 'ssh {0}@{1} -i {2} -p {3}'.format(username, hostname, key_file, port)
return cmd
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default='ubuntu', help='Login username')
@click.option('--key-file', '-k', required=True, help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
def ssh(instance_id, instance_name, username, key_file, port, gateway_instance_id, dry_run):
"""SSH to EC2 instance"""
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(2)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(2)
cmd = create_ssh_command(
instance_id, instance_name, username, key_file, port, gateway_instance_id)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
Python
| 0.000001
|
@@ -346,17 +346,17 @@
'%7D%7B1:%3C1
-0
+4
%7D%7B2:%3C13%7D
|
50e40b4f2c0eb264d1f5442122dcca05dcde60db
|
Disable debug logging for streaming http requests
|
k8s/client.py
|
k8s/client.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import requests
from requests import RequestException
from . import config
DEFAULT_TIMEOUT_SECONDS = 10
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
class K8sClientException(RequestException):
pass
class NotFound(K8sClientException):
"""The resource was not found, and the operation could not be completed"""
class ServerError(K8sClientException):
"""The API-server returned an internal error"""
class ClientError(K8sClientException):
"""The client made a bad request"""
class Client(object):
_session = requests.Session()
@classmethod
def clear_session(cls):
cls._session = requests.Session()
@classmethod
def init_session(cls):
if "Authorization" not in cls._session.headers and config.api_token:
cls._session.headers.update({"Authorization": "Bearer {}".format(config.api_token)})
if cls._session.cert is None and config.cert:
cls._session.cert = config.cert
cls._session.verify = config.verify_ssl
if not config.verify_ssl:
import requests.packages.urllib3 as urllib3
urllib3.disable_warnings()
def get(self, url, timeout=DEFAULT_TIMEOUT_SECONDS, **kwargs):
return self._call("GET", url, timeout=timeout, **kwargs)
def delete(self, url, timeout=DEFAULT_TIMEOUT_SECONDS, **kwargs):
return self._call("DELETE", url=url, timeout=timeout, **kwargs)
def post(self, url, body, timeout=DEFAULT_TIMEOUT_SECONDS):
return self._call("POST", url, body, timeout=timeout)
def put(self, url, body, timeout=DEFAULT_TIMEOUT_SECONDS):
return self._call("PUT", url, body, timeout=timeout)
def _call(self, method, url, body=None, timeout=DEFAULT_TIMEOUT_SECONDS, **kwargs):
self.init_session()
resp = self._session.request(method, config.api_server + url, json=body, timeout=timeout, **kwargs)
if config.debug:
message = ['{:d} for url: {:s}'.format(resp.status_code, resp.url)]
Client._add_request(message, resp.request)
Client._add_response(message, resp)
LOG.debug("\n".join(message))
self._raise_on_status(resp)
return resp
@staticmethod
def _raise_on_status(resp):
if resp.status_code < 400:
return
elif resp.status_code == 404:
exc = NotFound
elif 400 <= resp.status_code < 500:
exc = ClientError
else:
exc = ServerError
http_error_msg = Client._build_error_message(resp)
raise exc(http_error_msg, response=resp)
@staticmethod
def _build_error_message(resp):
request = resp.request
message = ['{:d}: {:s} for url: {:s}'.format(resp.status_code, resp.reason, resp.url)]
Client._add_causes(message, resp)
Client._add_request(message, request)
Client._add_response(message, resp)
return "\n".join(message)
@staticmethod
def _add_causes(message, resp):
try:
json_response = resp.json()
json_causes = json_response.get(u"details", {}).get(u"causes", {})
if json_causes:
message.append("Causes:")
message.extend("\t{}: {}".format(d[u"field"], d[u"message"]) for d in json_causes)
except Exception as e:
LOG.debug("Exception when dealing with client error response: %s", e)
LOG.debug("Response: %r", resp.text)
@staticmethod
def _add_response(message, resp):
message.append("Response:")
Client._add_headers(message, resp.headers, "<<<")
if resp.text:
message.append("<<< ")
message.extend("<<< {}".format(line) for line in resp.text.splitlines())
@staticmethod
def _add_request(message, request):
message.append("Request:")
message.append(">>> {method} {url}".format(method=request.method, url=request.url))
Client._add_headers(message, request.headers, ">>>")
if request.body:
message.append(">>> ")
message.extend(">>> {}".format(line) for line in request.body.splitlines())
@staticmethod
def _add_headers(message, headers, prefix):
for key, value in headers.items():
message.append("{} {}: {}".format(prefix, key, value))
|
Python
| 0
|
@@ -2035,16 +2035,52 @@
ig.debug
+ and not kwargs.get('stream', False)
:%0A
|
82bf7fbf5c92c29f058df06ba3828002322f6bf3
|
Add Qantas 94 Heavy to privileged Tavern users
|
globalvars.py
|
globalvars.py
|
import os
from datetime import datetime
from ChatExchange.chatexchange.client import Client
import HTMLParser
import md5
class GlobalVars:
false_positives = []
whitelisted_users = []
blacklisted_users = []
ignored_posts = []
auto_ignored_posts = []
startup_utc = datetime.utcnow().strftime("%H:%M:%S")
latest_questions = []
blockedTime = 0
charcoal_room_id = "11540"
meta_tavern_room_id = "89"
site_filename = {"electronics.stackexchange.com": "ElectronicsGood.txt",
"gaming.stackexchange.com": "GamingGood.txt", "german.stackexchange.com": "GermanGood.txt",
"italian.stackexchange.com": "ItalianGood.txt", "math.stackexchange.com": "MathematicsGood.txt",
"spanish.stackexchange.com": "SpanishGood.txt", "stats.stackexchange.com": "StatsGood.txt"}
experimental_reasons = ["Code block"] # Don't widely report these
parser = HTMLParser.HTMLParser()
wrap = Client("stackexchange.com")
wrapm = Client("meta.stackexchange.com")
privileged_users = {charcoal_room_id: ["117490", "66258", "31768", "103081", "73046", "88521", "59776", "31465",
"88577", "34124"],
meta_tavern_room_id: ["259867", "244519", "244382", "194047", "158100", "178438", "237685",
"215468", "229438", "180276", "161974", "244382", "186281", "266094",
"245167", "230261", "213575", "241919", "203389", "202832", "160017",
"201151", "188558", "229166", "159034", "203972", "188673", "258672",
"227577", "255735", "279182", "271104"]}
smokeDetector_user_id = {charcoal_room_id: "120914", meta_tavern_room_id: "266345"}
censored_committer_names = {"3f4ed0f38df010ce300dba362fa63a62": "Undo1"}
commit = os.popen('git log --pretty=format:"%h" -n 1').read()
commit_author = os.popen('git log --pretty=format:"%cn" -n 1').read()
if md5.new(commit_author).hexdigest() in censored_committer_names:
commit_author = censored_committer_names[md5.new(commit_author).hexdigest()]
commit_with_author = os.popen('git log --pretty=format:"%h (' + commit_author + ': *%s*)" -n 1').read()
on_master = os.popen("git rev-parse --abbrev-ref HEAD").read().strip() == "master"
charcoal_hq = None
tavern_on_the_meta = None
s = ""
s_reverted = ""
specialrooms = []
bayesian_testroom = None
apiquota = -1
bodyfetcher = None
se_sites = []
tavern_users_chatting = []
|
Python
| 0
|
@@ -1771,16 +1771,26 @@
%22271104%22
+, %22220428%22
%5D%7D%0A s
|
0b600e96f4778ea7f82f357cdadfd97967ecbe86
|
Add PeterJ to privileged Tavern users
|
globalvars.py
|
globalvars.py
|
import os
from datetime import datetime
from ChatExchange.chatexchange.client import Client
import HTMLParser
import md5
class GlobalVars:
false_positives = []
whitelisted_users = []
blacklisted_users = []
ignored_posts = []
auto_ignored_posts = []
startup_utc = datetime.utcnow().strftime("%H:%M:%S")
latest_questions = []
blockedTime = 0
charcoal_room_id = "11540"
meta_tavern_room_id = "89"
site_filename = {"electronics.stackexchange.com": "ElectronicsGood.txt",
"gaming.stackexchange.com": "GamingGood.txt", "german.stackexchange.com": "GermanGood.txt",
"italian.stackexchange.com": "ItalianGood.txt", "math.stackexchange.com": "MathematicsGood.txt",
"spanish.stackexchange.com": "SpanishGood.txt", "stats.stackexchange.com": "StatsGood.txt"}
experimental_reasons = ["Code block"] # Don't widely report these
parser = HTMLParser.HTMLParser()
wrap = Client("stackexchange.com")
wrapm = Client("meta.stackexchange.com")
privileged_users = {charcoal_room_id: ["117490", "66258", "31768", "103081", "73046", "88521", "59776", "31465",
"88577"],
meta_tavern_room_id: ["259867", "244519", "244382", "194047", "158100", "178438", "237685",
"215468", "229438", "180276", "161974", "244382", "186281", "266094",
"245167", "230261", "213575", "241919", "203389", "202832", "160017",
"201151", "188558", "229166", "159034"]}
smokeDetector_user_id = {charcoal_room_id: "120914", meta_tavern_room_id: "266345"}
censored_committer_names = {"3f4ed0f38df010ce300dba362fa63a62": "Undo1"}
commit = os.popen('git log --pretty=format:"%h" -n 1').read()
commit_author = os.popen('git log --pretty=format:"%cn" -n 1').read()
if md5.new(commit_author).hexdigest() in censored_committer_names:
commit_author = censored_committer_names[md5.new(commit_author).hexdigest()]
commit_with_author = os.popen('git log --pretty=format:"%h (' + commit_author + ': *%s*)" -n 1').read()
on_master = os.popen("git rev-parse --abbrev-ref HEAD").read().strip() == "master"
charcoal_hq = None
tavern_on_the_meta = None
s = ""
s_reverted = ""
specialrooms = []
bayesian_testroom = None
apiquota = -1
bodyfetcher = None
se_sites = []
|
Python
| 0
|
@@ -1646,16 +1646,26 @@
%22159034%22
+, %22203972%22
%5D%7D%0A s
|
d71993acd73aa15e079c6e5a9df23bb1eb0dbf7c
|
Remove bad iter calls
|
tools/helpers.py
|
tools/helpers.py
|
#!/usr/bin/env python
# encoding: utf-8
"""Little helpers for everyday python workLittle helpers for everyday python work.."""
from __future__ import division
import os
import subprocess
import sys
import time
import progressbar as pb
from progressbar import ProgressBar
from collections import Sequence, Iterator, Iterable
import fcntl
import termios
from itertools import islice
class Timer(object):
"""Simple timing object. Measure the system time spend in the with-block
and prints it to stdout after completion.
Usage:
with Timer('Function took'):
do_something()
"""
def __init__(self, msg='Timer'):
"""
:param msg: Additional message to show after finishing timing.
"""
self._msg = msg
self._start = None
def __enter__(self):
self._start = time.time()
def __exit__(self, *args):
print('{}: {}s'.format(self._msg, time.time() - self._start))
def _nr_digits(number):
"""Returns the string-length of `number`
:param number: Number to measure string length
:returns: The length of number when seen as a string
"""
return len(str(number))
class CountSlice(Iterable):
"""Docstring for CountSlice. """
def __init__(self, iterable, steps):
"""@todo: to be defined1.
:param iterable: @todo
:param steps: @todo
"""
self._iterable = iterable
self._steps = steps
def __len__(self):
try:
return min(len(self._iterable), self._steps)
except TypeError:
return self._steps
def __iter__(self):
return islice(iter(self._iterable), self._steps)
class RuntimeSlice(Iterable):
"""Docstring for RuntimeSlice. """
def __init__(self, iterable, runtime):
"""@todo: to be defined1.
:param iterable: @todo
:param runtime: @todo
"""
self._iterable = iterable
self._runtime = runtime
@property
def runtime(self):
return self._runtime
def __iter__(self):
starttime = time.time()
for val in iter(self._iterable):
runtime = time.time() - starttime
yield runtime, val
if runtime > self.runtime:
raise StopIteration()
def Progress(the_iterable, *args, **kwargs):
if isinstance(the_iterable, RuntimeSlice):
return TimelyProgress(the_iterable, *args, **kwargs)
else:
return CountProgress(the_iterable, *args, **kwargs)
class TimelyProgress(ProgressBar, Iterable):
"""Progress bar for looping over iteratable object. Use as:
for i in Monitor(...):
do_something
As long as there is no printing involved in do_something, you get
a nice little progress bar. Works fine on the console as well as all
ipython interfaces.
"""
def __init__(self, iterable, *args, rettime=False, **kwargs):
"""
:param iterable: Iteratable object to loop over
:param size: Number of characters for the progress bar (default 50).
"""
maxtime = time.strftime("%H:%M:%S", time.gmtime(iterable.runtime))
super().__init__(*args, max_value=iterable.runtime,
widgets=[pb.Percentage(), ' ', pb.Bar(), ' ',
pb.Timer(), ' / ', maxtime],
**kwargs)
self._iterable = iterable
self._rettime = rettime
def __iter__(self):
"""Fetch next object from the iterable"""
self.start()
for runtime, val in iter(self._iterable):
self.update(min(runtime, self._iterable.runtime))
yield val if not self._rettime else (runtime, val)
self.finish()
class CountProgress(ProgressBar, Iterable):
"""Progress bar for looping over iteratable object. Use as:
for i in Monitor(...):
do_something
As long as there is no printing involved in do_something, you get
a nice little progress bar. Works fine on the console as well as all
ipython interfaces.
"""
def __init__(self, iterable, *args, **kwargs):
"""
:param iterable: Iteratable object to loop over
:param size: Number of characters for the progress bar (default 50).
"""
if 'max_value' not in kwargs:
kwargs['max_value'] = len(iterable) if hasattr(iterable, '__len__')\
else None
super().__init__(*args, **kwargs)
self._iterable = iterable
def __iter__(self):
"""Fetch next object from the iterable"""
self.start()
for n, val in enumerate(self._iterable):
self.update(n)
yield val
def watch_async_view(view):
try:
bar = ProgressBar(max_value=len(view))
bar.start()
while not view.done():
bar.update(value=view.progress)
time.sleep(1)
bar.finish()
except KeyboardInterrupt:
pass
def getch():
"""Gets a single character from the console."""
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
while 1:
try:
c = sys.stdin.read(1)
return c
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
def get_git_revision_hash():
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).split()[0]
def get_git_revision_short_hash():
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).split()[0]
def mkdir(dirname):
"""Create dir if it doesnt exist."""
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != os.errno.EEXIST:
raise
|
Python
| 0.000001
|
@@ -1647,21 +1647,16 @@
islice(
-iter(
self._it
@@ -1653,33 +1653,32 @@
e(self._iterable
-)
, self._steps)%0A%0A
@@ -2101,37 +2101,32 @@
for val in
-iter(
self._iterable):
@@ -2115,33 +2115,32 @@
n self._iterable
-)
:%0A ru
@@ -3577,13 +3577,8 @@
in
-iter(
self
@@ -3579,33 +3579,32 @@
n self._iterable
-)
:%0A se
|
0c39a098605454d9cfa863497b0aaee657e0b8d5
|
Update tf-plan.py
|
tools/tf-plan.py
|
tools/tf-plan.py
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import shutil
import requests
from python_terraform import Terraform
def main(PR):
TOKEN = os.getenv('GITHUB_TOKEN')
GITHUB_WORKSPACE = os.getenv('GITHUB_WORKSPACE')
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
# Get Added / Modified files in PR
modified_files, modified_files_raw, removed_files = pr_files(GITHUB_REPOSITORY, PR)
# Get Working directories to run TF Plan on
working_directories = get_updated_modules(modified_files, removed_files)
# Loop through all the identified working directories
# Deleting added/modified & removed files
try:
for dir in working_directories:
print("----------> RUN FOR: " + dir)
# Copying main directory
shutil.copytree(GITHUB_WORKSPACE+'/'+dir, os.getcwd()+'/temp/'+dir)
# Deleting added/modified & removed files
for mfile in modified_files:
if os.path.exists(os.getcwd()+'/temp/'+mfile):
print("Deleting file: " + mfile)
os.remove(os.getcwd()+'/temp/'+mfile)
for rfile in removed_files:
if os.path.exists(os.getcwd()+'/temp/'+rfile):
print("Deleting file: " + rfile)
os.remove(os.getcwd()+'/temp/'+rfile)
except requests.exceptions.RequestException as e:
print('No working directory with TF configs in PR.')
raise SystemExit(e)
# Loop through all the identified working directories
# Download added/modified files
try:
for dir in working_directories:
# Download added/modified files
for file in modified_files:
if dir in file:
for raw in modified_files_raw:
if file in raw:
print("Downloading file: " + raw)
downloadprfiles(raw, file, os.getcwd()+'/temp/'+dir)
break
except requests.exceptions.RequestException as e:
print('No working directory with TF configs in PR.')
raise SystemExit(e)
# Loop through all the identified working directories
# Run Terraform Plan
try:
for dir in working_directories:
comment, status = tf(os.getcwd() + '/temp/' + dir)
commentpr(GITHUB_REPOSITORY, PR, comment, TOKEN)
if(status == 'fail'):
sys.exit('Terraform Init or Terraform Plan FAILED for: '+ dir)
except requests.exceptions.RequestException as e:
print('No working directory with TF configs in PR.')
raise SystemExit(e)
def pr_files(GITHUB_REPOSITORY,pr):
removed_files = []
modified_files = []
modified_files_raw = []
try:
response = requests.get('https://api.github.com/repos/'+ GITHUB_REPOSITORY +'/pulls/'+ str(pr) +'/files')
for file in response.json():
if(file['status'] == 'removed'):
print("Removed File: " + file['filename'])
removed_files.append(file['filename'])
else:
print("Added/Modified File: " + file['filename'])
modified_files.append(file['filename'])
modified_files_raw.append(file['raw_url'])
return modified_files, modified_files_raw, removed_files
except requests.exceptions.RequestException as e:
raise SystemExit(e)
def downloadprfiles(raw, file, path):
# print(path)
if not os.path.exists(path):
os.makedirs(path)
# print('Beginning file download with requests')
r = requests.get(raw)
with open(path + '/' + os.path.basename(file), 'wb') as f:
f.write(r.content)
# Retrieve HTTP meta-data
# print(r.status_code)
# print(r.headers['content-type'])
# print(r.encoding)
def get_updated_modules(modified_files, removed_files):
modified_files_dir = []
removed_files_dir = []
for file in modified_files:
modified_files_dir.append(os.path.dirname(file))
for file in removed_files:
removed_files_dir.append(os.path.dirname(file))
working_directories = modified_files_dir + removed_files_dir
working_directories = list(set(working_directories))
print("Working Directories:")
print(working_directories)
modules = [x for x in working_directories if 'module/' in x]
modules = [x for x in working_directories if x.count('/') == 1]
print("Modules Updated:")
print(modules)
return modules
def tf(dir):
tr = Terraform(working_dir=dir)
return_code_init, stdout_init, stderr_init = tr.init_cmd(capture_output=False)
return_code_plan, stdout_plan, stderr_plan = tr.plan_cmd(capture_output=False,var={'parent':'organizations/1234567890', 'billing_account':'ABCD-EFGH-IJKL-MNOP'})
if(return_code_init == 1):
comment = 'Terraform Init FAILED!\nFor Module: ' + dir.replace(os.getenv('TERRAFORM_CLI_PATH')+'/', '')
status = 'fail'
if(return_code_plan == 1):
comment = 'Terraform Plan FAILED!\nFor Module: ' + dir.replace(os.getenv('TERRAFORM_CLI_PATH')+'/', '')
status = 'fail'
else:
comment = 'Terraform Init & Terraform Plan SUCCESSFUL!\nFor Module: ' + dir.replace(os.getenv('TERRAFORM_CLI_PATH')+'/', '')
status = 'pass'
return comment, status
def commentpr(GITHUB_REPOSITORY, pr, comment, TOKEN):
headers = {'Authorization': f'token {TOKEN}', 'Accept': 'application/vnd.github.v3+json'}
# print(comment)
data = {"body":comment}
try:
response = requests.post('https://api.github.com/repos/'+ GITHUB_REPOSITORY +'/issues/'+ str(pr) +'/comments', data=json.dumps(data), headers=headers)
# print(response.text)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if __name__ == '__main__':
if len(sys.argv) != 2:
raise SystemExit('No PR passed.')
main(sys.argv[1])
|
Python
| 0.000001
|
@@ -2715,24 +2715,26 @@
' + dir)%0A
+ #
commentpr
@@ -4674,70 +4674,35 @@
if
-'module/' in x%5D%0A modules = %5Bx for x in working_directories if
+x.startswith('module/') and
x.c
|
1c068004b9e1831bde30be527f20b0d2835c467c
|
fix multigpu bug
|
train_policy/trainer.py
|
train_policy/trainer.py
|
#!/usr/bin/python3
#-*-coding:utf-8-*-
#$File: trainer.py
#$Date: Sat May 7 11:00:10 2016
#$Author: Like Ma <milkpku[at]gmail[dot]com>
from config import Config
from dataset import load_data
from model import get_model
import tensorflow as tf
import argparse
def train(load_path=None):
# load data
train_data = load_data('train')
val_data = load_data('validation')
# load model
model = get_model('train')
# trainer init
optimizer = Config.optimizer
train_step = optimizer.minimize(model.loss)
# init session and server
sess = tf.InteractiveSession()
saver = tf.train.Saver()
if load_path==None:
sess.run(tf.initialize_all_variables())
else:
saver.restore(sess, load_path)
print("Model restored from %s" % load_path)
# accuracy
pred = tf.reshape(model.pred, [-1, 9*10*16])
label = tf.reshape(model.label, [-1, 9*10*16])
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# train steps
for i in range(Config.n_epoch):
# training step
batch_data, batch_label = train_data.next_batch(Config.minibatch_size)
input_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
input_dict[var]=data
#from IPython import embed;embed()
sess.run(train_step, feed_dict=input_dict)
# evalue step
if (i+1)%Config.evalue_point == 0:
batch_data, batch_label = val_data.next_batch(Config.minibatch_size)
val_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
val_dict[var]=data
score = accuracy.eval(feed_dict=val_dict)
print("epoch %d, accuracy is %.2f" % (i,score))
# save step
if (i+1)%Config.check_point == 0:
save_path = saver.save(sess, "%s/epoch-%d" %(Config.save_path, i))
print("Model saved in file: %s" % save_path)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--device", type=int, default=0, help="gpu id")
parser.add_argument("-c", "--load_path", default=None, help="load trained model")
args = parser.parse_args()
with tf.device('/gpu:%d' % args.device):
train(args.load_path)
|
Python
| 0.000001
|
@@ -267,16 +267,70 @@
f train(
+args):%0A%0A device = args.device%0A load_path = args.
load_pat
@@ -334,16 +334,8 @@
path
-=None):%0A
%0A
@@ -440,16 +440,60 @@
d model%0A
+ with tf.device('/gpu:%25d' %25 device):%0A
mode
@@ -2410,75 +2410,16 @@
-with tf.device('/gpu:%25d' %25 args.device):%0A train(args.load_path
+train(args
)%0A
|
b666228405e9b23e65d6d631968a7f6f334b6b46
|
change string for utf8 translation (#48)
|
translation/samples/snippets/snippets_test.py
|
translation/samples/snippets/snippets_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import snippets
def test_detect_language(capsys):
snippets.detect_language('Hæ sæta')
out, _ = capsys.readouterr()
assert 'is' in out
def test_list_languages(capsys):
snippets.list_languages()
out, _ = capsys.readouterr()
assert 'Icelandic (is)' in out
def test_list_languages_with_target(capsys):
snippets.list_languages_with_target('is')
out, _ = capsys.readouterr()
assert u'íslenska (is)' in out
def test_translate_text(capsys):
snippets.translate_text('is', 'Hello world')
out, _ = capsys.readouterr()
assert u'Halló heimur' in out
def test_translate_utf8(capsys):
text = u'나는 파인애플을 좋아한다.'
snippets.translate_text('en', text)
out, _ = capsys.readouterr()
assert u'I like pineapple' in out
|
Python
| 0
|
@@ -1240,22 +1240,16 @@
= u'
-%EB%82%98%EB%8A%94
%ED%8C%8C%EC%9D%B8%EC%95%A0%ED%94%8C
-%EC%9D%84 %EC%A2%8B%EC%95%84%ED%95%9C%EB%8B%A4.
+ 13%EA%B0%9C
'%0A
@@ -1336,14 +1336,10 @@
t u'
-I like
+13
pin
@@ -1344,16 +1344,17 @@
ineapple
+s
' in out
|
19acfbad5db83c20f6e6459f35b63600203ba09c
|
Test to make sure that the cinder-volumes vg exists
|
packstack/plugins/cinder_250.py
|
packstack/plugins/cinder_250.py
|
"""
Installs and configures Cinder
"""
import logging
import packstack.installer.engine_validators as validate
from packstack.installer import basedefs
import packstack.installer.common_utils as utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-Cinder"
PLUGIN_NAME_COLORED = utils.getColoredText(PLUGIN_NAME, basedefs.BLUE)
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding Openstack Cinder configuration")
paramsList = [
{"CMD_OPTION" : "cinder-host",
"USAGE" : "The IP address of the server on which to install Cinder",
"PROMPT" : "The IP address of the server on which to install Cinder",
"OPTION_LIST" : [],
"VALIDATION_FUNC" : validate.validatePing,
"DEFAULT_VALUE" : "127.0.0.1",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_CINDER_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDER",
"DESCRIPTION" : "Cinder Config paramaters",
"PRE_CONDITION" : "CONFIG_CINDER_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
if controller.CONF['CONFIG_CINDER_INSTALL'] != 'y':
return
cindersteps = [
{'title': 'Adding Cinder Keystone Manifest entries', 'functions':[createkeystonemanifest]},
{'title': 'Creating Cinder Manifest', 'functions':[createmanifest]}
]
controller.addSequence("Installing Cinder", [], [], cindersteps)
def createkeystonemanifest():
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_cinder.pp")
appendManifestFile(manifestfile, manifestdata)
def createmanifest():
manifestfile = "%s_cinder.pp"%controller.CONF['CONFIG_CINDER_HOST']
manifestdata = getManifestTemplate("cinder.pp")
appendManifestFile(manifestfile, manifestdata)
|
Python
| 0
|
@@ -2023,16 +2023,126 @@
fest%5D%7D,%0A
+ %7B'title': 'Checking if the Cinder server has a cinder-volumes vg', 'functions':%5Bcheckcindervg%5D%7D,%0A
@@ -2290,16 +2290,294 @@
steps)%0A%0A
+def checkcindervg():%0A server = utils.ScriptRunner(controller.CONF%5B'CONFIG_CINDER_HOST'%5D)%0A server.append('vgdisplay cinder-volumes')%0A try:%0A server.execute()%0A except:%0A print %22The cinder server should contain a cinder-volumes volume group%22%0A raise%0A%0A
def crea
|
3b4d8d6ecae3ee1f57dd71af990bb480e6c82d6c
|
clearly indentify git-checkout as dev version
|
mitmproxy/version.py
|
mitmproxy/version.py
|
IVERSION = (3, 0, 0)
VERSION = ".".join(str(i) for i in IVERSION)
PATHOD = "pathod " + VERSION
MITMPROXY = "mitmproxy " + VERSION
if __name__ == "__main__":
print(VERSION)
|
Python
| 0.999986
|
@@ -12,16 +12,23 @@
(3, 0, 0
+, 'dev'
)%0AVERSIO
|
7db97dd21b7896f624f37ef44f72445965a65123
|
provide urls for bug reports. refs #20
|
h1ds_configdb/version.py
|
h1ds_configdb/version.py
|
"""
Current h1ds_configdb version constant plus version pretty-print method.
Code copied from Fabric:
https://github.com/bitprophet/fabric/raw/master/fabric/version.py
This functionality is contained in its own module to prevent circular import
problems with ``__init__.py`` (which is loaded by setup.py during installation,
which in turn needs access to this version information.)
"""
from subprocess import Popen, PIPE
from os.path import abspath, dirname
def git_sha():
loc = abspath(dirname(__file__))
p = Popen(
"cd \"%s\" && git log -1 --format=format:%%h\ /\ %%cD" % loc,
shell=True,
stdout=PIPE,
stderr=PIPE
)
return p.communicate()[0]
VERSION = (0, 8, 1, 'final', 0)
def get_version(form='short'):
"""
Return a version string for this package, based on `VERSION`.
Takes a single argument, ``form``, which should be one of the following
strings:
* ``branch``: just the major + minor, e.g. "0.9", "1.0".
* ``short`` (default): compact, e.g. "0.9rc1", "0.9.0". For package
filenames or SCM tag identifiers.
* ``normal``: human readable, e.g. "0.9", "0.9.1", "0.9 beta 1". For e.g.
documentation site headers.
* ``verbose``: like ``normal`` but fully explicit, e.g. "0.9 final". For
tag commit messages, or anywhere that it's important to remove ambiguity
between a branch and the first final release within that branch.
"""
# Setup
versions = {}
branch = "%s.%s" % (VERSION[0], VERSION[1])
tertiary = VERSION[2]
type_ = VERSION[3]
final = (type_ == "final")
type_num = VERSION[4]
firsts = "".join([x[0] for x in type_.split()])
sha = git_sha()
sha1 = (" / %s" % sha) if sha else ""
# Branch
versions['branch'] = branch
# Short
v = branch
if (tertiary or final):
v += "." + str(tertiary)
if not final:
v += firsts
if type_num:
v += str(type_num)
else:
v += sha1
versions['short'] = v
# Normal
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
versions['normal'] = v
# Verbose
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
else:
v += " final"
versions['verbose'] = v
try:
return versions[form]
except KeyError:
raise TypeError, '"%s" is not a valid form specifier.' % form
__version__ = get_version('short')
|
Python
| 0.999556
|
@@ -728,16 +728,177 @@
l', 0)%0A%0A
+def get_module_urls():%0A return (%22https://code.h1svr.anu.edu.au/projects/h1ds-configdb%22, %22https://code.h1svr.anu.edu.au/projects/h1ds-configdb/issues/new%22, )%0A%0A
def get_
|
a1977d9654e7feae08b9523ea2370d5cb8974672
|
Remove redundant get_obj method (#26324)
|
lib/ansible/modules/cloud/vmware/vmware_guest_facts.py
|
lib/ansible/modules/cloud/vmware/vmware_guest_facts.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_guest_facts
short_description: Gather facts about a single VM
description:
- Gather facts about a single VM on a VMware ESX cluster
version_added: 2.3
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with
required: True
name_match:
description:
- If multiple VMs matching the name, use the first or last found
default: 'first'
choices: ['first', 'last']
uuid:
description:
- UUID of the instance to manage if known, this is VMware's unique identifier.
- This is required if name is not supplied.
folder:
description:
- Destination folder, absolute path to find an existing guest.
- This is required if name is supplied.
datacenter:
description:
- Destination datacenter for the deploy operation
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather VM facts
vmware_guest_facts:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
delegate_to: localhost
register: facts
'''
RETURN = """
instance:
description: metadata about the virtual machine
returned: always
type: dict
sample: None
"""
import os
import time
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import iteritems
from ansible.module_utils.vmware import connect_to_api, find_vm_by_id, gather_vm_facts
try:
import json
except ImportError:
import simplejson as json
HAS_PYVMOMI = False
try:
import pyVmomi
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
pass
class PyVmomiHelper(object):
def __init__(self, module):
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi module required')
self.module = module
self.params = module.params
self.content = connect_to_api(self.module)
def getvm(self, name=None, uuid=None, folder=None):
vm = None
if uuid:
vm = find_vm_by_id(self.content, vm_id=uuid, vm_id_type="uuid")
elif folder:
# Build the absolute folder path to pass into the search method
if not self.params['folder'].startswith('/'):
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
searchpath = '%(datacenter)s%(folder)s' % self.params
# get all objects for this path ...
f_obj = self.content.searchIndex.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == name:
vm = c_obj
if self.params['name_match'] == 'first':
break
return vm
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def get_obj(content, vimtype, name):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
def main():
module = AnsibleModule(
argument_spec=dict(
hostname=dict(
type='str',
default=os.environ.get('VMWARE_HOST')
),
username=dict(
type='str',
default=os.environ.get('VMWARE_USER')
),
password=dict(
type='str', no_log=True,
default=os.environ.get('VMWARE_PASSWORD')
),
validate_certs=dict(required=False, type='bool', default=True),
name=dict(required=True, type='str'),
name_match=dict(required=False, type='str', default='first'),
uuid=dict(required=False, type='str'),
folder=dict(required=False, type='str', default='/vm'),
datacenter=dict(required=True, type='str'),
),
)
# Prepend /vm if it was missing from the folder path, also strip trailing slashes
if not module.params['folder'].startswith('/vm') and module.params['folder'].startswith('/'):
module.params['folder'] = '/vm%(folder)s' % module.params
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.getvm(name=module.params['name'],
folder=module.params['folder'],
uuid=module.params['uuid'])
# VM already exists
if vm:
try:
module.exit_json(instance=pyv.gather_facts(vm))
except Exception:
e = get_exception()
module.fail_json(msg="Fact gather failed with exception %s" % e)
else:
module.fail_json(msg="Unable to gather facts for non-existing VM %(name)s" % module.params)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -4382,478 +4382,8 @@
)%0A%0A%0A
-def get_obj(content, vimtype, name):%0A %22%22%22%0A Return an object by name, if name is None the%0A first found object is returned%0A %22%22%22%0A obj = None%0A container = content.viewManager.CreateContainerView(%0A content.rootFolder, vimtype, True)%0A for c in container.view:%0A if name:%0A if c.name == name:%0A obj = c%0A break%0A else:%0A obj = c%0A break%0A%0A container.Destroy()%0A return obj%0A%0A%0A
def
|
833cd8342385fc095181afc3306ce04414bfd447
|
Add work around for destroying models too quickly.
|
perfscale_mass_model_destruction.py
|
perfscale_mass_model_destruction.py
|
#!/usr/bin/env python
"""Perfscale test measuring adding and destroying a large number of models.
Steps taken in this test:
- Bootstraps a provider
- Creates x amount of models and waits for them to be ready
- Delete all the models at once.
"""
import argparse
from datetime import datetime
import logging
import sys
from deploy_stack import (
BootstrapManager,
)
from generate_perfscale_results import (
DeployDetails,
TimingData,
run_perfscale_test,
)
from utility import (
add_basic_testing_arguments,
configure_logging,
)
log = logging.getLogger("perfscale_mass_model_destruction")
__metaclass__ = type
def perfscale_assess_model_destruction(client, args):
"""Create a bunch of models and then destroy them all."""
model_count = args.model_count
all_models = []
for item in xrange(0, model_count):
model_name = 'model{}'.format(item)
log.info('Creating model: {}'.format(model_name))
new_model = client.add_model(client.env.clone(model_name))
new_model.wait_for_started()
all_models.append(new_model)
destruction_start = datetime.utcnow()
for doomed in all_models:
doomed.destroy_model()
destruction_end = datetime.utcnow()
destruction_timing = TimingData(destruction_start, destruction_end)
return DeployDetails(
'Destroy {} models'.format(model_count),
{'Model Count': model_count},
destruction_timing)
def parse_args(argv):
"""Parse all arguments."""
parser = argparse.ArgumentParser(
description="Perfscale bundle deployment test.")
add_basic_testing_arguments(parser)
parser.add_argument(
'--model-count',
type=int,
help='Number of models to create.',
default=100)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
configure_logging(args.verbose)
bs_manager = BootstrapManager.from_args(args)
run_perfscale_test(perfscale_assess_model_destruction, bs_manager, args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -317,16 +317,39 @@
port sys
+%0Afrom time import sleep
%0A%0Afrom d
@@ -1117,16 +1117,173 @@
model)%0A%0A
+ # Workaround for bug: https://bugs.launchpad.net/juju/+bug/1635052%0A # Noted here: https://bugs.launchpad.net/juju-ci-tools/+bug/1635109%0A sleep(10)%0A
dest
|
f29ff5eaa24b0671066f145d76b53e534a574119
|
Set "text/javascript" type if given a callback
|
freegeoip/geoip.py
|
freegeoip/geoip.py
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# freegeoip.net
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cyclone.web
import cyclone.escape
import socket
from twisted.python import log
from twisted.internet import defer
import freegeoip.search
class BaseHandler(cyclone.web.RequestHandler):
@defer.inlineCallbacks
def get(self, address):
try:
ip, data = yield freegeoip.search.geoip(self.settings.db,
address or self.request.remote_ip)
if data:
data = cyclone.escape.json_decode(data[0][0])
data["ip"] = ip
except socket.error:
raise cyclone.web.HTTPError(404)
except ValueError:
raise cyclone.web.HTTPError(400)
except Exception, e:
log.err("search.geoip('%s') failed: %s" % (address, e))
raise cyclone.web.HTTPError(503)
if data:
self.dump(data)
else:
raise cyclone.web.HTTPError(404)
def dump(self, data):
raise NotImplementedError
class CsvHandler(BaseHandler):
def dump(self, data):
self.set_header("Content-Type", "text/csv")
self.render("geoip.csv", data=data)
class XmlHandler(BaseHandler):
def dump(self, data):
self.set_header("Content-Type", "text/xml")
self.render("geoip.xml", data=data)
class JsonHandler(BaseHandler):
def dump(self, data):
callback = self.get_argument("callback", None)
self.set_header("Content-Type", "application/json")
if callback:
self.finish("%s(%s);" % (callback, cyclone.escape.json_encode(data)))
else:
self.finish(cyclone.escape.json_encode(data))
|
Python
| 0.999994
|
@@ -2008,16 +2008,41 @@
, None)%0A
+ if callback:%0A
@@ -2078,47 +2078,25 @@
%22, %22
-application/json%22)%0A if callback:
+text/javascript%22)
%0A
@@ -2174,16 +2174,16 @@
data)))%0A
-
@@ -2180,32 +2180,96 @@
)%0A else:%0A
+ self.set_header(%22Content-Type%22, %22application/json%22)%0A
self
|
c0624c3f6750714f2bf1e81586c462c077c35f4b
|
Fix dynamine annotation build script
|
build/management/commands/build_dynamine_annotation.py
|
build/management/commands/build_dynamine_annotation.py
|
from django.core.management.base import BaseCommand
from build.management.commands.base_build import Command as BaseBuild
from residue.models import ResidueDataType, ResidueDataPoint
from protein.models import *
import logging
from urllib import request, parse
import json,time
class Command(BaseBuild):
help = 'Add dynamine annotations.'
logger = logging.getLogger(__name__)
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
def handle(self, *args, **options):
# All human proteins and xtaled
self.proteins = list(set(list(Protein.objects.filter(sequence_type__slug='wt',species__common_name="Human").all())+list(ProteinSet.objects.get(name='All').proteins.all())))
self.prepare_input(options['proc'], self.proteins)
# self.logger.info('Finishing adding dynamine annotations')
def get_dynamine_prediction(self, protein):
json_api_key = '26cf5c434a171cab9220666030cd981bdbe485a449729a7c8c6272b9'
job = {'protocol': '1.0',
'json_api_key': json_api_key,
'sequences': {protein.entry_name: protein.sequence},
'predictions_only': 'true',
}
url = 'http://dynamine.ibsquare.be/batch_request'
d = cache.get('dynamine_prediction_%s' % (protein.entry_name))
if d:
return d
try:
data = parse.urlencode({'batch':json.dumps(job)}).encode()
req = request.Request(url, data=data) # this will make the method "POST"
resp = json.loads(request.urlopen(req).read().decode('UTF-8'))
except Exception as e:
resp = {}
print('error starting job',e)
job_id = resp['job_id']
poll = {'protocol': '1.0',
'json_api_key': json_api_key,
'job_id': job_id
}
resp['status'] = 'Started'
tries=0
while resp['status'] != 'completed':
time.sleep(2)
tries += 1
try:
data = parse.urlencode({'batch':json.dumps(poll)}).encode()
req = request.Request(url, data=data) # this will make the method "POST"
resp = json.loads(request.urlopen(req).read().decode('UTF-8'))
except Exception as e:
resp = {}
print('error polling job',e)
if tries>60:
break
cache.set('dynamine_prediction_%s' % (protein.entry_name), resp, 60*60*24*7) #7 days
return resp
def save_dynamine_prediction(self,protein):
r = self.get_dynamine_prediction(protein)
dynamine, created = ResidueDataType.objects.get_or_create(slug=slugify('dynamine'), name='Dynamine Prediction')
residues = Residue.objects.filter(protein_conformation__protein=protein)
if r['status'] == 'completed':
predictions = r['results']['predictions'][protein.entry_name]
for i, p in enumerate(predictions):
# fetch residue
r = residues.filter(sequence_number=i+1).get()
point, created = ResidueDataPoint.objects.get_or_create(data_type=dynamine, residue=r, value=p[1])
# @transaction.atomic
def main_func(self, positions, iteration,count,lock):
while count.value<len(self.proteins):
with lock:
p = self.proteins[count.value]
count.value +=1
self.logger.info('Generating dynamine data for \'{}\'... ({} out of {})'.format(p, count.value, len(self.proteins)))
print(p)
self.save_dynamine_prediction(p)
|
Python
| 0.000001
|
@@ -871,16 +871,137 @@
ll())))%0A
+ # self.proteins = list(set(list(ProteinSet.objects.get(name='All').proteins.all())))%0A print(self.proteins)
%0A%0A
@@ -3076,33 +3076,63 @@
protein=protein)
+.all()%0A c = r%5B'status'%5D
%0A
-
if r%5B'st
@@ -3228,16 +3228,67 @@
y_name%5D%0A
+ # print(predictions)%0A c = 0%0A
@@ -3359,16 +3359,41 @@
residue%0A
+ try:%0A
@@ -3447,16 +3447,71 @@
).get()%0A
+ # print(protein,r,p%5B1%5D,r.pk,i)%0A
@@ -3617,16 +3617,192 @@
e=p%5B1%5D)%0A
+ if created:%0A c += 1%0A except:%0A print(%22Missing residue for%22,protein.entry_name,i+1)%0A return c%0A
%0A%0A #
@@ -4177,28 +4177,84 @@
-print(p)%0A
+# if 'opsd_bovin'!=str(p):%0A # continue%0A dynamine =
sel
@@ -4282,10 +4282,40 @@
ction(p)
+%0A print(p,dynamine)
%0A%0A
|
582af80eaa558cd6343a293cc54ced7b93f6d218
|
Add timeout to wait for event
|
proto/parallel/Communicate.py
|
proto/parallel/Communicate.py
|
from Queue import Queue
from threading import Event
try:
from multiprocessing.managers import BaseManager
except ImportError:
class Python26Required(object):
def __call__(self, *args):
raise RuntimeError('Requires Python > 2.6')
def __getattr__(self, name):
raise RuntimeError('Requires Python > 2.6')
BaseManager = Python26Required()
def _create_caching_getter(clazz):
objects = {}
def get_object(key):
if key not in objects:
objects[key] = clazz()
return objects[key]
return get_object
class Communicate(object):
"""Library for communication between processes.
For example this can be used to handle communication between processes of the Parallel robot library.
Requires Python 2.6
Example:
Process 1 test file:
| *Settings* |
| Library | Communicate |
| *Test Cases* |
| Communicator |
| | [Setup] | Start Communication Service |
| | Send Message To | my message queue | hello world! |
| | ${message}= | Receive Message From | other message queue |
| | Should Be Equal | ${message} | hello! |
| | [Teardown] | Stop Communication Service |
Process 2 test file:
| *Settings* |
| Library | Communicate | ${process 1 ip address if on a different machine} |
| *Test Cases* |
| Helloer |
| | ${message}= | Receive Message From | my message queue |
| | Should Be Equal | ${message} | hello world! |
| | Send Message To | other message queue | hello! |
"""
def __init__(self, address='127.0.0.1', port=2187):
"""
`address` of the communication server.
`port` of the communication server.
"""
self._address = address
self._port = int(port)
self._authkey = 'live long and prosper'
self._queue = None
self._connected = False
def _connect(self):
self._create_manager().connect()
self._connected = True
def start_communication_service(self):
"""Starts a communication server that will be used to share messages and objects between processes.
"""
self._create_manager(_create_caching_getter(Queue),
_create_caching_getter(Event)).start()
self._connected = True
def stop_communication_service(self):
"""Stops a started communication server.
This ensures that the server and the messages that it has don't influence the next tests.
To ensure that this keyword really happens place this in the teardown section.
"""
self._manager.shutdown()
self._connected = False
def _create_manager(self, queue_getter=None, event_getter=None):
BaseManager.register('get_queue', queue_getter)
BaseManager.register('get_event', event_getter)
self._manager = BaseManager((self._address, self._port), self._authkey)
return self._manager
def send_message_to(self, queue_id, value):
"""Send a message to a message queue.
`queue_id` is the identifier for the queue.
`value` is the message. This can be a string, a number or any serializable object.
Example:
In one process
| Send Message To | my queue | hello world! |
...
In another process
| ${message}= | Receive Message From | my queue |
| Should Be Equal | ${message} | hello world! |
"""
self._get_queue(queue_id).put(value)
def receive_message_from(self, queue_id, timeout=None):
"""Receive and consume a message from a message queue.
By default this keyword will block until there is a message in the queue.
`queue_id` is the identifier for the queue.
`timeout` is the time out in seconds to wait.
Returns the value from the message queue. Fails if timeout expires.
Example:
In one process
| Send Message To | my queue | hello world! |
...
In another process
| ${message}= | Receive Message From | my queue |
| Should Be Equal | ${message} | hello world! |
"""
timeout = float(timeout) if timeout is not None else None
return self._get_queue(queue_id).get(timeout=timeout)
def _get_queue(self, queue_id):
if not self._connected:
self._connect()
return self._manager.get_queue(queue_id)
def wait_for_event(self, event_id):
"""Waits until event with `event_id` is signaled.
Example:
In one process
| Wait For Event | my event |
...
In another process
| Signal Event | my event |
"""
return self._get_event(event_id).wait()
def signal_event(self, event_id):
"""Signals an event.
If a process is waiting for this event it will stop waiting after the signal.
`event` is the identifier for the event.
Example:
In one process
| Wait For Event | my event |
...
In another process
| Signal Event | my event |
"""
return self._get_event(event_id).set()
def _get_event(self, event_id):
if not self._connected:
self._connect()
return self._manager.get_event(event_id)
|
Python
| 0.000004
|
@@ -4603,35 +4603,49 @@
t(self, event_id
+, timeout=None
):%0A
-
%22%22%22Waits
@@ -4685,16 +4685,114 @@
ignaled.
+%0A Fails if optional timeout expires.%0A%0A %60timeout%60 is the time out in seconds to wait.
%0A%0A
@@ -4962,46 +4962,380 @@
-return self._get_event(event_id).wait(
+timeout = float(timeout) if timeout is not None else None%0A self._get_event(event_id).wait(timeout=timeout)%0A #NOTE! If Event#clear is ever exposed it has to be secured (for example r/w lock) that none%0A #of the processes can do it while another is at this position.%0A if not self._get_event(event_id).isSet():%0A raise Exception('Timeout'
)%0A%0A
|
0ac0ec672eeaa4bd5431905808405e0f73bf7e00
|
remove unused import
|
cmsplugin_cascade/bootstrap4/buttons.py
|
cmsplugin_cascade/bootstrap4/buttons.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from django.conf import settings
from django.forms import widgets
from django.forms.fields import CharField
from django.utils.html import format_html, format_html_join
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import GlossaryField
from cmsplugin_cascade.link.config import LinkPluginBase, LinkElementMixin, LinkForm
from cmsplugin_cascade.link.forms import TextLinkFormMixin
if 'cmsplugin_cascade.icon' in settings.INSTALLED_APPS:
from cmsplugin_cascade.icon.mixins import IconPluginMixin
else:
from cmsplugin_cascade.plugin_base import CascadePluginMixinBase as IconPluginMixin
class ButtonTypeWidget(widgets.RadioSelect):
"""
Render sample buttons in different colors in the button's backend editor.
"""
BUTTON_TYPES = OrderedDict([
('btn-primary', _("Primary")),
('btn-secondary', _("Secondary")),
('btn-success', _("Success")),
('btn-danger', _("Danger")),
('btn-warning', _("Warning")),
('btn-info', _("Info")),
('btn-light', _("Light")),
('btn-dark', _("Dark")),
('btn-link', _("Link")),
('btn-outline-primary', _("Primary")),
('btn-outline-secondary', _("Secondary")),
('btn-outline-success', _("Success")),
('btn-outline-danger', _("Danger")),
('btn-outline-warning', _("Warning")),
('btn-outline-info', _("Info")),
('btn-outline-light', _("Light")),
('btn-outline-dark', _("Dark")),
('btn-outline-link', _("Link")),
])
template_name = 'cascade/forms/widgets/button_types.html'
@classmethod
def get_instance(cls):
return cls(choices=[(k, v) for k, v in cls.BUTTON_TYPES.items()])
class ButtonSizeWidget(widgets.RadioSelect):
"""
Render sample buttons in different sizes in the button's backend editor.
"""
BUTTON_SIZES = OrderedDict([
('btn-lg', _("Large button")),
('', _("Default button")),
('btn-sm', _("Small button")),
])
template_name = 'cascade/forms/widgets/button_sizes.html'
@classmethod
def get_instance(cls):
return cls(choices=[(k, v) for k, v in cls.BUTTON_SIZES.items()])
class BootstrapButtonMixin(IconPluginMixin):
require_parent = True
parent_classes = ('BootstrapColumnPlugin', 'SimpleWrapperPlugin',)
render_template = 'cascade/bootstrap4/button.html'
allow_children = False
default_css_class = 'btn'
default_css_attributes = ['button_type', 'button_size', 'button_options', 'quick_float']
ring_plugin = 'ButtonMixin'
button_type = GlossaryField(
ButtonTypeWidget.get_instance(),
label=_("Button Type"),
initial='btn-primary',
help_text=_("Display Link using this Button Style")
)
button_size = GlossaryField(
ButtonSizeWidget.get_instance(),
label=_("Button Size"),
initial='',
help_text=_("Display Link using this Button Size")
)
button_options = GlossaryField(
widgets.CheckboxSelectMultiple(choices=[
('btn-block', _('Block level')),
('disabled', _('Disabled')),
]),
label=_("Button Options"),
)
quick_float = GlossaryField(
widgets.RadioSelect(choices=[
('', _("Do not float")),
('pull-left', _("Pull left")),
('pull-right', _("Pull right")),
]),
label=_("Quick Float"),
initial='',
help_text=_("Float the button to the left or right.")
)
icon_align = GlossaryField(
widgets.RadioSelect(choices=[
('', _("No Icon")),
('icon-left', _("Icon placed left")),
('icon-right', _("Icon placed right")),
]),
label=_("Icon alignment"),
initial='',
help_text=_("Add an Icon before or after the button content.")
)
symbol = GlossaryField(
widgets.HiddenInput(),
label=_("Select Symbol"),
)
class Media:
js = ['cascade/js/admin/buttonmixin.js']
def render(self, context, instance, placeholder):
context = super(BootstrapButtonMixin, self).render(context, instance, placeholder)
try:
icon_font = self.get_icon_font(instance)
symbol = instance.glossary.get('symbol')
except AttributeError:
icon_font, symbol = None, None
if icon_font and symbol:
context['stylesheet_url'] = icon_font.get_stylesheet_url()
mini_template = '{0}<i class="icon-{1} {2}" aria-hidden="true"></i>{3}'
icon_align = instance.glossary.get('icon_align')
if icon_align == 'icon-left':
context['icon_left'] = format_html(mini_template, '', symbol, 'cascade-icon-left', ' ')
elif icon_align == 'icon-right':
context['icon_right'] = format_html(mini_template, ' ', symbol, 'cascade-icon-right', '')
return context
class BootstrapButtonPlugin(BootstrapButtonMixin, LinkPluginBase):
module = 'Bootstrap'
name = _("Button")
model_mixins = (LinkElementMixin,)
fields = ['link_content'] + list(LinkPluginBase.fields)
glossary_field_order = ['button_type', 'button_size', 'button_options', 'quick_float',
'target', 'title', 'icon_align', 'icon_font', 'symbol']
ring_plugin = 'ButtonPlugin'
class Media:
css = {'all': ['cascade/css/admin/bootstrap4-buttons.css', 'cascade/css/admin/iconplugin.css']}
js = ['cascade/js/admin/buttonplugin.js']
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapButtonPlugin, cls).get_identifier(obj)
content = obj.glossary.get('link_content')
if not content:
try:
content = force_text(ButtonTypeWidget.BUTTON_TYPES[obj.glossary['button_type']])
except KeyError:
content = _("Empty")
return format_html('{}{}', identifier, content)
def get_form(self, request, obj=None, **kwargs):
link_content = CharField(
required=False,
label=_("Button Content"),
widget=widgets.TextInput(attrs={'size': 50}),
)
Form = type(str('ButtonForm'), (TextLinkFormMixin, getattr(LinkForm, 'get_form_class')(),),
{'link_content': link_content})
kwargs.update(form=Form)
return super(BootstrapButtonPlugin, self).get_form(request, obj, **kwargs)
plugin_pool.register_plugin(BootstrapButtonPlugin)
|
Python
| 0.000001
|
@@ -248,26 +248,8 @@
html
-, format_html_join
%0Afro
|
827661e790bd407c29f4d109e428c8f36d44f537
|
Update ReferralFollow test_form's build_form to allow True, False, None for has_appointment.
|
pttrack/test_forms.py
|
pttrack/test_forms.py
|
'''Module for testing the various custom forms used in Osler.'''
import datetime
from django.test import TestCase
from . import forms
from . import models
from . import followup_models
# pylint: disable=invalid-name
class TestReferralFollowupForms(TestCase):
'''
Test the validation and behavior of the forms used to do followups.
'''
def setUp(self):
self.contact_method = models.ContactMethod.objects.create(
name="Carrier Pidgeon")
self.pt = models.Patient.objects.create(
first_name="Juggie",
last_name="Brodeltein",
middle_name="Bayer",
phone='+49 178 236 5288',
gender=models.Gender.objects.create(long_name="Male",
short_name="M"),
address='Schulstrasse 9',
city='Munich',
state='BA',
zip_code='63108',
pcp_preferred_zip='63018',
date_of_birth=datetime.date(1990, 01, 01),
patient_comfortable_with_english=False,
preferred_contact_method=self.contact_method,
)
self.successful_res = followup_models.ContactResult.objects.create(
name="Got him", patient_reached=True)
self.unsuccessful_res = followup_models.ContactResult.objects.create(
name="Disaster", patient_reached=False)
self.reftype = models.ReferralType.objects.create(name="Chiropracter")
models.ReferralLocation.objects.create(
name="Franklin's Back Adjustment",
address="1435 Sillypants Drive")
followup_models.NoAptReason.objects.create(
name="better things to do")
def build_form(self, contact_successful, has_appointment, apt_location, noapt_reason):
'''
Construct a ReferralFollowup form to suit the needs of the testing
subroutines based upon what is provided and not provided.
'''
contact_resolution = self.successful_res if contact_successful else self.unsuccessful_res
form_data = {
'contact_method': self.contact_method,
'contact_resolution': contact_resolution,
'patient': self.pt,
'referral_type': self.reftype,
'has_appointment': has_appointment,
}
if apt_location:
form_data['apt_location'] = models.ReferralLocation.objects.all()[0]
if noapt_reason:
form_data['noapt_reason'] = followup_models.NoAptReason.objects.all()[0]
return forms.ReferralFollowup(data=form_data)
def test_correct_successful_noapt(self):
'''
Test a correct submission of ReferralFollowup when
ContactResult.patient_reached is True but has_appointment is false.
That is, apt_location and noapt_reason are provided.
'''
form = self.build_form(
contact_successful=True,
has_appointment=False,
apt_location=True,
noapt_reason=True)
self.assertEqual(len(form['noapt_reason'].errors), 0)
def test_incorrect_successful_noapt(self):
'''
Test that a successful contact with no appointment that lacks a
noapt_reason is considered incorrect.
'''
form = self.build_form(
contact_successful=True,
has_appointment=False,
noapt_reason=False,
apt_location=False)
self.assertGreater(len(form['noapt_reason'].errors), 0)
def test_correct_unsuccssful_noapt(self):
'''
Test that an unsuccessful contact requires only has_appointment and
referral_type. apt_location and noapt_reason are not required.
'''
form = self.build_form(
contact_successful=False,
has_appointment=False,
apt_location=False,
noapt_reason=False)
self.assertEqual(len(form['noapt_reason'].errors), 0)
|
Python
| 0
|
@@ -2272,57 +2272,302 @@
-'has_appointment': has_appointment,%0A %7D
+%7D%0A%0A # Has appointment could (at least in principle) be True, False, or%0A # unspecified.%0A if has_appointment:%0A form_data%5B'has_appointment'%5D = True%0A elif has_appointment is None:%0A pass%0A else:%0A form_data%5B'has_appointment'%5D = False
%0A%0A
@@ -4065,36 +4065,35 @@
has_appointment=
-Fals
+Non
e,%0A a
|
c9917b3dc54290bb3fc7c977e8c1db76ac60cf82
|
Update project queries
|
polyaxon/api/projects/queries.py
|
polyaxon/api/projects/queries.py
|
from django.db.models import Count, Q
from db.models.projects import Project
projects = Project.objects.select_related('user')
projects_details = projects.select_related('repo').annotate(
Count('experiments', distinct=True),
Count('jobs', distinct=True),
Count('build_jobs', distinct=True),
Count('experiment_groups', distinct=True)).annotate(
independent_experiments__count=Count(
'experiments',
filter=Q(experiments__experiment_group__isnull=True),
distinct=True))
|
Python
| 0
|
@@ -122,16 +122,17 @@
'user')%0A
+%0A
projects
@@ -344,27 +344,17 @@
ct=True)
-).annotate(
+,
%0A ind
|
086b7a7de994e30d2e5defa214eca846862aec59
|
update default configuration in config
|
nova/common/config.py
|
nova/common/config.py
|
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_middleware import cors
def set_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
# CORS Defaults
# TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
cfg.set_defaults(cors.CORS_OPTS,
allow_headers=['X-Auth-Token',
'X-Openstack-Request-Id',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id'],
expose_headers=['X-Auth-Token',
'X-Openstack-Request-Id',
'X-Subject-Token',
'X-Service-Token'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH']
)
|
Python
| 0.000001
|
@@ -615,36 +615,8 @@
e.%0A%0A
-from oslo_config import cfg%0A
from
@@ -644,16 +644,16 @@
rt cors%0A
+
%0A%0Adef se
@@ -753,151 +753,27 @@
-# CORS Defaults%0A # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/%0A cfg.set_defaults(cors.CORS_OPTS,%0A
+cors.set_defaults(%0A
@@ -826,37 +826,24 @@
-
-
'X-Openstack
@@ -879,29 +879,16 @@
-
-
'X-Ident
@@ -923,29 +923,16 @@
-
'X-Roles
@@ -953,37 +953,24 @@
-
-
'X-Service-C
@@ -978,29 +978,16 @@
talog',%0A
-
@@ -1037,29 +1037,16 @@
-
'X-Tenan
@@ -1049,37 +1049,24 @@
enant-Id'%5D,%0A
-
expo
@@ -1113,37 +1113,24 @@
-
'X-Openstack
@@ -1163,37 +1163,24 @@
-
-
'X-Subject-T
@@ -1206,37 +1206,24 @@
-
-
'X-Service-T
@@ -1230,29 +1230,16 @@
oken'%5D,%0A
-
@@ -1287,41 +1287,15 @@
-
- 'PUT',%0A
+'PUT',%0A
@@ -1344,29 +1344,16 @@
-
-
'DELETE'
@@ -1377,29 +1377,16 @@
-
-
'PATCH'%5D
@@ -1386,31 +1386,14 @@
PATCH'%5D%0A
-
)%0A
|
bc224499e2f4f663a1fe5e41cbfad691e7c04de4
|
Drop unused import
|
turbine/code/py/turbine_helpers.py
|
turbine/code/py/turbine_helpers.py
|
# Copyright 2013 University of Chicago and Argonne National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# TURBINE HELPERS PY
# Python helpers for JSON module
import json
import sys
# Type classes for comparison:
_zero = 0
_zerof = 0.0
type_str = "x".__class__
type_int = _zero.__class__
type_float = _zerof.__class__
type_list = [].__class__
type_dict = {}.__class__
type_none = None.__class__
def set_key_type(k):
""" Convert to integer if possible """
try:
result = int(k)
except ValueError:
result = k
return result
def json_path(J, path):
""" Reusable function to search a JSON tree """
J = json.loads(J)
P = path.split(",")
for p in P:
if len(p) > 0:
k = set_key_type(p)
J = J[k]
return J
def json_type(J, path):
""" Obtain the type of the entry at given path in the JSON tree """
J = json_path(J, path)
c = J.__class__
if c == type_str:
return "string"
elif c == type_int:
return "int"
elif c == type_float:
return "float"
elif c == type_list:
return "array"
elif c == type_dict:
return "object"
elif c == type_none:
return "null"
else:
raise Exception("json_type: ERROR class='%s'" % str(c))
def json_object_names(J, path):
""" Assume dict and return all names at given path """
J = json_path(J, path)
L = []
for i in J.keys():
L.append(i)
result = ",".join(L)
return result
def json_array_size(J, path):
""" Assume list and return length of it """
J = json_path(J, path)
return str(len(J))
def json_get(J, path):
""" Return whatever is at the given path (usually scalar) """
J = json_path(J, path)
if J == None:
return "null"
return str(J)
|
Python
| 0
|
@@ -681,19 +681,8 @@
json
-%0Aimport sys
%0A%0A#
|
63cb9a8b3acb78be155bbc770cdd1d06170eccc1
|
Fix customer filter field.
|
src/nodeconductor_assembly_waldur/invoices/filters.py
|
src/nodeconductor_assembly_waldur/invoices/filters.py
|
import django_filters
from django.conf import settings
from django.core import exceptions
from django.db.models import Q, BooleanField
from django.utils import timezone
from nodeconductor.core import filters as core_filters
from nodeconductor.structure import filters as structure_filters
from . import models
class InvoiceFilter(django_filters.FilterSet):
customer = core_filters.URLFilter(view_name='customer-detail', name='customer__uuid')
customer_uuid = django_filters.UUIDFilter(name='customer__uuid')
state = django_filters.MultipleChoiceFilter(choices=models.Invoice.States.CHOICES)
class Meta(object):
model = models.Invoice
fields = ('year', 'month')
class PaymentDetailsFilter(django_filters.FilterSet):
customer = core_filters.URLFilter(view_name='customer-detail', name='customer__uuid')
customer_uuid = django_filters.UUIDFilter(name='customer__uuid')
class Meta(object):
model = models.PaymentDetails
fields = '__all__'
class AccountingStartDateFilter(core_filters.BaseExternalFilter):
def filter(self, request, queryset, view):
if not settings.INVOICES['ENABLE_ACCOUNTING_START_DATE']:
return queryset
value = request.query_params.get('accounting_is_running')
boolean_field = BooleanField()
try:
value = boolean_field.to_python(value)
except exceptions.ValidationError:
value = None
if value is None:
return queryset
query = Q(payment_details__isnull=True) | Q(payment_details__accounting_start_date__gt=timezone.now())
if value:
return queryset.exclude(query)
else:
return queryset.filter(query)
structure_filters.ExternalCustomerFilterBackend.register(AccountingStartDateFilter())
|
Python
| 0
|
@@ -117,22 +117,33 @@
rt Q
-, BooleanField
+%0Afrom django import forms
%0Afro
@@ -1307,16 +1307,26 @@
field =
+forms.Null
BooleanF
|
0bf7a7321a4ca2cf66763924ad4a21a6d0e8fd05
|
fix mongodb_parameter
|
lib/ansible/modules/database/misc/mongodb_parameter.py
|
lib/ansible/modules/database/misc/mongodb_parameter.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2016, Loic Blot <loic.blot@unix-experience.fr>
Sponsored by Infopro Digital. http://www.infopro-digital.com/
Sponsored by E.T.A.I. http://www.etai.fr/
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: mongodb_parameter
short_description: Change an administrative parameter on a MongoDB server.
description:
- Change an administrative parameter on a MongoDB server.
version_added: "2.1"
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 27017
login_database:
description:
- The database where login credentials are stored
required: false
default: null
replica_set:
description:
- Replica set to connect to (automatically connects to primary for writes)
required: false
default: null
database:
description:
- The name of the database to add/remove the user from
required: true
ssl:
description:
- Whether to use an SSL connection when connecting to the database
required: false
default: false
param:
description:
- MongoDB administrative parameter to modify
required: true
value:
description:
- MongoDB administrative parameter value to set
required: true
param_type:
description:
- Define the parameter value (str, int)
required: false
default: str
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: "Loic Blot (@nerzhul)"
'''
EXAMPLES = '''
# Set MongoDB syncdelay to 60 (this is an int)
- mongodb_parameter:
param: syncdelay
value: 60
param_type: int
'''
RETURN = '''
before:
description: value before modification
returned: success
type: string
after:
description: value after modification
returned: success
type: string
'''
import ConfigParser
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
# =========================================
# MongoDB module specific support methods.
#
def load_mongocnf():
config = ConfigParser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (ConfigParser.NoOptionError, IOError):
return False
return creds
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default=27017, type='int'),
login_database=dict(default=None),
replica_set=dict(default=None),
param=dict(default=None, required=True),
value=dict(default=None, required=True),
param_type=dict(default="str", choices=['str', 'int']),
ssl=dict(default=False, type='bool'),
)
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
ssl = module.params['ssl']
param = module.params['param']
param_type = module.params['param_type']
value = module.params['value']
# Verify parameter is coherent with specified type
try:
if param_type == 'int':
value = int(value)
except ValueError:
e = get_exception()
module.fail_json(msg="value '%s' is not %s" % (value, param_type))
try:
if replica_set:
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
else:
client = MongoClient(login_host, int(login_port), ssl=ssl)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
except ConnectionFailure:
e = get_exception()
module.fail_json(msg='unable to connect to database: %s' % str(e))
db = client.admin
try:
after_value = db.command("setParameter", **{param: int(value)})
except OperationFailure:
e = get_exception()
module.fail_json(msg="unable to change parameter: %s" % str(e))
if "was" not in after_value:
module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
else:
module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
after=value)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -6711,26 +6711,21 @@
%7Bparam:
-int(
value
-)
%7D)%0A e
|
881a27ab3d4ee0f9f988a7f183bdd0a76b517526
|
Add additional activation tests.
|
panda/tests/test_views.py
|
panda/tests/test_views.py
|
#!/usr/bin/env python
from django.contrib.auth import authenticate
from django.test import TransactionTestCase
from django.test.client import Client
from django.utils import simplejson as json
from panda.models import User
from panda.tests import utils
class TestLogin(TransactionTestCase):
fixtures = ['init_panda.json']
def setUp(self):
self.user = utils.get_panda_user()
self.client = Client()
def test_login_success(self):
response = self.client.post('/login/', { 'email': 'user@pandaproject.net', 'password': 'user' })
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['email'], 'user@pandaproject.net')
self.assertEqual(body['api_key'], 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84c')
self.assertEqual(body['notifications'], [])
def test_login_disabled(self):
self.user.is_active = False
self.user.save()
response = self.client.post('/login/', { 'email': 'user@pandaproject.net', 'password': 'user' })
self.assertEqual(response.status_code, 400)
body = json.loads(response.content)
self.assertIn('disabled', body['__all__'])
self.user.is_active = True
self.user.save()
def test_login_invalid_email(self):
response = self.client.post('/login/', { 'email': 'NOTPANDA@pandaproject.net', 'password': 'panda' })
self.assertEqual(response.status_code, 400)
body = json.loads(response.content)
self.assertIn('incorrect', body['__all__'])
def test_login_incorrect_password(self):
response = self.client.post('/login/', { 'email': 'user@pandaproject.net', 'password': 'NOPANDA' })
self.assertEqual(response.status_code, 400)
body = json.loads(response.content)
self.assertIn('incorrect', body['__all__'])
def test_no_get(self):
response = self.client.get('/login/', { 'email': 'user@pandaproject.net', 'password': 'NOPANDA' })
self.assertEqual(response.status_code, 400)
body = json.loads(response.content)
self.assertEqual(body, None)
class TestActivate(TransactionTestCase):
fixtures = ['init_panda.json']
def setUp(self):
self.user = utils.get_panda_user()
self.client = Client()
def test_activate(self):
new_user = User.objects.create(
email="foo@bar.com",
username="foo@bar.com",
is_active=False
)
response = self.client.post('/activate/', { 'activation_key': new_user.get_profile().activation_key, 'email': 'foo@bar.com', 'password': 'foobarbaz' })
self.assertEqual(response.status_code, 200)
self.assertEqual(authenticate(username='foo@bar.com', password='foobarbaz'), new_user)
|
Python
| 0
|
@@ -2342,32 +2342,854 @@
ent = Client()%0A%0A
+ def test_check_activation_key_valid(self):%0A new_user = User.objects.create(%0A email=%22foo@bar.com%22,%0A username=%22foo@bar.com%22,%0A is_active=False%0A )%0A%0A key = new_user.get_profile().activation_key%0A%0A response = self.client.get('/check_activation_key/%25s/' %25 key)%0A%0A self.assertEqual(response.status_code, 200)%0A %0A body = json.loads(response.content) %0A%0A self.assertEqual(body%5B'activation_key'%5D, key)%0A self.assertEqual(body%5B'email'%5D, new_user.email)%0A self.assertEqual(body%5B'first_name'%5D, '')%0A self.assertEqual(body%5B'last_name'%5D, '')%0A%0A def test_check_activation_key_invalid(self):%0A response = self.client.get('/check_activation_key/NOT_A_VALID_KEY/')%0A%0A self.assertEqual(response.status_code, 400)%0A %0A
def test_act
|
2e4837721a22985894f932536c45989aaec8006b
|
Stop printing the exception object.
|
code/daemon/transporters/transporter.py
|
code/daemon/transporters/transporter.py
|
"""transporter.py Transporter class for daemon"""
__author__ = "Wim Leers (work@wimleers.com)"
__version__ = "$Rev$"
__date__ = "$Date$"
__license__ = "GPL"
import sys
import os
sys.path.append(os.path.abspath('../dependencies'))
from django.core.files.storage import Storage
from django.core.files import File
# Define exceptions.
class TransporterError(Exception): pass
class InvalidSettingError(TransporterError): pass
class MissingSettingError(TransporterError): pass
class InvalidCallbackError(TransporterError): pass
class ConnectionError(TransporterError): pass
import threading
import Queue
import time
from sets import Set, ImmutableSet
class Transporter(threading.Thread):
"""threaded abstraction around a Django Storage subclass"""
def __init__(self, settings, callback):
if not callable(callback):
raise InvalidCallbackError
self.settings = settings
self.storage = False
self.is_ready = False
self.lock = threading.Lock()
self.queue = Queue.Queue()
self.callback = callback
self.die = False
threading.Thread.__init__(self)
def run(self):
while not self.die:
self.lock.acquire()
try:
(filepath, path) = self.queue.get_nowait()
self.lock.release()
# Sync the file.
f = File(open(filepath, "rb"))
target = os.path.join(path, filepath)
if self.storage.exists(target):
self.storage.delete(target)
self.storage.save(target, f)
f.close()
# Call the callback function.
url = self.storage.url(filepath)
url = self.alter_url(url)
self.callback(filepath, url)
except Exception, e:
print e
self.lock.release()
# Sleep a little bit.
time.sleep(0.1)
def alter_url(self, url):
"""allow some classes to alter the generated URL"""
return url
def stop(self):
self.lock.acquire()
self.die = True
self.lock.release()
def validate_settings(self, valid_settings, required_settings, settings):
if len(settings.difference(valid_settings)):
raise InvalidSettingError
if len(required_settings.difference(settings)):
raise InvalidSettingError
def sync_file(self, filepath, path=""):
"""sync a file"""
self.lock.acquire()
self.queue.put((filepath, path))
self.lock.release()
|
Python
| 0
|
@@ -1849,32 +1849,8 @@
e:%0A
- print e%0A
|
0ab96a8984d3ac4eb26bf8cd5cf02228101341d7
|
Update formatting status
|
polyaxon_cli/utils/formatting.py
|
polyaxon_cli/utils/formatting.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import click
import six
import sys
from polyaxon_schemas.experiment import ContainerResourcesConfig
from polyaxon_schemas.settings import K8SResourcesConfig
from polyaxon_schemas.utils import to_list, to_percentage, to_unit_memory
from tabulate import tabulate
def get_meta_response(response):
results = {}
if response.get('next'):
results['next'] = '--page={}'.format(response['next'])
if response.get('previous'):
results['previous'] = '--page={}'.format(response['previous'])
if response.get('count'):
results['count'] = response['count']
return results
def list_dicts_to_tabulate(list_dicts):
results = OrderedDict()
for d_value in list_dicts:
for k, v in six.iteritems(d_value):
if k in results:
results[k].append(v)
else:
results[k] = [v]
return results
def dict_tabulate(dict_value, is_list_dict=False):
if is_list_dict:
headers = six.iterkeys(dict_value)
click.echo(tabulate(dict_value, headers=headers))
else:
click.echo(tabulate(six.iteritems(dict_value)))
class Printer(object):
COLORS = ['yellow', 'blue', 'magenta', 'green', 'cyan', 'red', 'white']
@staticmethod
def print_header(text):
click.secho('\n{}\n'.format(text), fg='yellow')
@staticmethod
def print_warning(text):
click.secho('\n{}\n'.format(text), fg='magenta')
@staticmethod
def print_success(text):
click.secho('\n{}\n'.format(text), fg='green')
@staticmethod
def print_error(text):
click.secho('\n{}\n'.format(text), fg='red')
@staticmethod
def add_color(value, color):
return click.style('{}'.format(value), fg=color)
@classmethod
def add_status_color(cls, obj_dict):
if obj_dict.get('is_running'):
obj_dict['last_status'] = cls.add_color(obj_dict['last_status'], color='yellow')
elif obj_dict.get('is_done'):
color = 'green' if obj_dict['last_status'] == 'Succeeded' else 'red'
obj_dict['last_status'] = cls.add_color(obj_dict['last_status'], color=color)
elif obj_dict.get('last_status'):
obj_dict['last_status'] = cls.add_color(obj_dict['last_status'], color='cyan')
obj_dict.pop('is_done', None)
obj_dict.pop('is_running', None)
return obj_dict
@classmethod
def add_memory_unit(cls, obj_dict, keys):
keys = to_list(keys)
for key in keys:
obj_dict[key] = to_unit_memory(obj_dict[key])
return obj_dict
@classmethod
def handle_statuses(cls, obj_dict):
if obj_dict.get('status') == 'Created':
obj_dict['status'] = cls.add_color(obj_dict['status'], color='cyan')
elif obj_dict.get('status') == 'Succeeded':
obj_dict['status'] = cls.add_color(obj_dict['status'], color='green')
elif obj_dict.get('status') in ['Failed', 'Deleted']:
obj_dict['status'] = cls.add_color(obj_dict['status'], color='red')
obj_dict['status'] = cls.add_color(obj_dict['status'], color='yellow')
return obj_dict
@classmethod
def decorate_format_value(cls, text_format, values, color):
values = to_list(values)
values = [cls.add_color(value, color) for value in values]
click.echo(text_format.format(*values))
@staticmethod
def log(value):
click.echo(value, nl=False)
@classmethod
def resources(cls, jobs_resources):
jobs_resources = to_list(jobs_resources)
click.clear()
data = [['Job', 'Mem Usage / Total', 'CPU% - CPUs']]
for job_resources in jobs_resources:
job_resources = ContainerResourcesConfig.from_dict(job_resources)
line = [
job_resources.job_name,
'{} / {}'.format(to_unit_memory(job_resources.memory_used),
to_unit_memory(job_resources.memory_limit)),
'{} - {}'.format(to_percentage(job_resources.cpu_percentage / 100),
job_resources.n_cpus)]
data.append(line)
click.echo(tabulate(data, headers="firstrow"))
sys.stdout.flush()
@classmethod
def gpu_resources(cls, jobs_resources):
jobs_resources = to_list(jobs_resources)
click.clear()
data = [
['job_name', 'name', 'GPU Usage', 'GPU Mem Usage / Total', 'GPU Temperature',
'Power Draw / Limit']
]
non_gpu_jobs = 0
for job_resources in jobs_resources:
job_resources = ContainerResourcesConfig.from_dict(job_resources)
line = []
if not job_resources.gpu_resources:
non_gpu_jobs += 1
continue
for gpu_resources in job_resources.gpu_resources:
line += [
job_resources.job_name,
gpu_resources.name,
to_percentage(gpu_resources.utilization_gpu / 100),
'{} / {}'.format(
to_unit_memory(gpu_resources.memory_used),
to_unit_memory(gpu_resources.memory_total)),
gpu_resources.temperature_gpu,
'{} / {}'.format(gpu_resources.power_draw, gpu_resources.power_limit),
]
data.append(line)
if non_gpu_jobs == len(jobs_resources):
Printer.print_error(
'No GPU job was found, please run `resources` command without `-g | --gpu` option.')
exit(1)
click.echo(tabulate(data, headers="firstrow"))
sys.stdout.flush()
def get_experiments_with_metrics(response):
objects = [
o.to_light_dict(include_attrs=['sequence', 'unique_name', 'total_run', 'last_metric'])
for o in response['results']
]
# Extend experiment with metrics
metric_keys = set([])
for obj in objects:
last_metric = obj.pop('last_metric', {}) or {}
metric_keys |= set(six.iterkeys(last_metric))
obj.update(last_metric)
# Check that all obj have all metrics
# TODO: optimize this process
for obj in objects:
obj_keys = set(six.iterkeys(obj))
for metric in metric_keys:
if metric not in obj_keys:
obj[metric] = None
return objects
def get_resources(resources, header=None):
header = header or 'Resources:'
Printer.print_header(header)
objects = []
for item in six.iterkeys(resources):
item_dict = OrderedDict()
item_dict['resource'] = item
item_dict.update(resources[item] or K8SResourcesConfig().to_dict())
objects.append(item_dict)
objects = list_dicts_to_tabulate(objects)
dict_tabulate(objects, is_list_dict=True)
|
Python
| 0
|
@@ -3080,13 +3080,13 @@
', '
-Delet
+Stopp
ed'%5D
|
1d1071d782619c2e1daebda86cf8bc550d7c4ce5
|
Improve import grouping in binder.py
|
tweepy/binder.py
|
tweepy/binder.py
|
# Tweepy
# Copyright 2009-2021 Joshua Roesslein
# See LICENSE for details.
import logging
import sys
import time
import requests
from urllib.parse import urlencode
from tweepy.error import is_rate_limit_error_message, RateLimitError, TweepError
from tweepy.models import Model
log = logging.getLogger(__name__)
def bind_api(api, method, endpoint, *args, allowed_param=[], params=None,
headers=None, json_payload=None, parser=None, payload_list=False,
payload_type=None, post_data=None, require_auth=False,
return_cursors=False, upload_api=False, use_cache=True, **kwargs):
# If authentication is required and no credentials
# are provided, throw an error.
if require_auth and not api.auth:
raise TweepError('Authentication required!')
api.cached_result = False
# Build the request URL
path = f'/1.1/{endpoint}.json'
if upload_api:
url = 'https://' + api.upload_host + path
else:
url = 'https://' + api.host + path
if params is None:
params = {}
for idx, arg in enumerate(args):
if arg is None:
continue
try:
params[allowed_param[idx]] = str(arg)
except IndexError:
raise TweepError('Too many parameters supplied!')
for k, arg in kwargs.items():
if arg is None:
continue
if k in params:
raise TweepError(f'Multiple values for parameter {k} supplied!')
params[k] = str(arg)
log.debug("PARAMS: %r", params)
# Query the cache if one is available
# and this request uses a GET method.
if use_cache and api.cache and method == 'GET':
cache_result = api.cache.get(f'{path}?{urlencode(params)}')
# if cache result found and not expired, return it
if cache_result:
# must restore api reference
if isinstance(cache_result, list):
for result in cache_result:
if isinstance(result, Model):
result._api = api
else:
if isinstance(cache_result, Model):
cache_result._api = api
api.cached_result = True
return cache_result
# Monitoring rate limits
remaining_calls = None
reset_time = None
session = requests.Session()
if parser is None:
parser = api.parser
try:
# Continue attempting request until successful
# or maximum number of retries is reached.
retries_performed = 0
while retries_performed <= api.retry_count:
if (api.wait_on_rate_limit and reset_time is not None
and remaining_calls is not None
and remaining_calls < 1):
# Handle running out of API calls
sleep_time = reset_time - int(time.time())
if sleep_time > 0:
log.warning(f"Rate limit reached. Sleeping for: {sleep_time}")
time.sleep(sleep_time + 1) # Sleep for extra sec
# Apply authentication
auth = None
if api.auth:
auth = api.auth.apply_auth()
# Execute request
try:
resp = session.request(
method, url, params=params, headers=headers,
data=post_data, json=json_payload, timeout=api.timeout,
auth=auth, proxies=api.proxy
)
except Exception as e:
raise TweepError(f'Failed to send request: {e}').with_traceback(sys.exc_info()[2])
if 200 <= resp.status_code < 300:
break
rem_calls = resp.headers.get('x-rate-limit-remaining')
if rem_calls is not None:
remaining_calls = int(rem_calls)
elif remaining_calls is not None:
remaining_calls -= 1
reset_time = resp.headers.get('x-rate-limit-reset')
if reset_time is not None:
reset_time = int(reset_time)
retry_delay = api.retry_delay
if resp.status_code in (420, 429) and api.wait_on_rate_limit:
if remaining_calls == 0:
# If ran out of calls before waiting switching retry last call
continue
if 'retry-after' in resp.headers:
retry_delay = float(resp.headers['retry-after'])
elif api.retry_errors and resp.status_code not in api.retry_errors:
# Exit request loop if non-retry error code
break
# Sleep before retrying request again
time.sleep(retry_delay)
retries_performed += 1
# If an error was returned, throw an exception
api.last_response = resp
if resp.status_code and not 200 <= resp.status_code < 300:
try:
error_msg, api_error_code = parser.parse_error(resp.text)
except Exception:
error_msg = f"Twitter error response: status code = {resp.status_code}"
api_error_code = None
if is_rate_limit_error_message(error_msg):
raise RateLimitError(error_msg, resp)
else:
raise TweepError(error_msg, resp, api_code=api_error_code)
# Parse the response payload
return_cursors = return_cursors or 'cursor' in params or 'next' in params
result = parser.parse(
resp.text, api=api, payload_list=payload_list,
payload_type=payload_type, return_cursors=return_cursors
)
# Store result into cache if one is available.
if use_cache and api.cache and method == 'GET' and result:
api.cache.store(f'{path}?{urlencode(params)}', result)
return result
finally:
session.close()
def pagination(mode):
def decorator(method):
method.pagination_mode = mode
return method
return decorator
def payload(payload_type, **payload_kwargs):
payload_list = payload_kwargs.get('list', False)
def decorator(method):
def wrapper(*args, **kwargs):
kwargs['payload_list'] = payload_list
kwargs['payload_type'] = payload_type
return method(*args, **kwargs)
wrapper.payload_list = payload_list
wrapper.payload_type = payload_type
return wrapper
return decorator
|
Python
| 0.000001
|
@@ -110,25 +110,8 @@
time
-%0A%0Aimport requests
%0Afro
@@ -143,16 +143,33 @@
encode%0A%0A
+import requests%0A%0A
from twe
|
c7f0402bce524eeca79f7fb0041d251ff2644b57
|
fix py2 issue
|
pandasdmx/writer/data2pandas.py
|
pandasdmx/writer/data2pandas.py
|
'''
pandasdmx.writer.data2pandas - a pandas writer for PandaSDMX
@author: Dr. Leo
'''
import pandas as PD
import numpy as NP
from pandasdmx.writer.common import BaseWriter
class Writer(BaseWriter):
def write(self, data, asframe = False, dtype = NP.float64,
attributes = 'osgd'):
'''
Generate pandas.Series from model.Series
data: a model.DataSet or iterator of model.Series
asframe: if True, merge the series of values and/or attributes
into one or two multi-indexed
pandas.DataFrame(s), otherwise return an iterator of pandas.Series.
(default: False)
dtype: datatype for values. Defaults to 'float64'
if None, do not return the values of a series. In this case,
'attributes' must not be an empty string.
attributes: string determining which attributes, if any,
should be returned in separate series or a separate DataFrame.
'attributes' may have one of the following values: '', 'o', 's', 'g', 'd'
or any combination thereof such as 'os', 'go'. Defaults to 'osgd'.
Where 'o', 's', and 'g' mean that attributes at observation,
series, group and dataset level will be returned as members of
per-observation namedtuples.
'''
# Preparations
dim_at_obs = self.msg.header.dim_at_obs
# validate 'attributes'
try:
attributes = attributes.lower()
except AttributeError:
raise TypeError("'attributes' argument must be of type str.")
if set(attributes) - {'o','s','g', 'd'}:
raise ValueError("'attributes' must only contain 'o', 's' or 'g'.")
# Allow data to be either an iterator or a model.DataSet instance
if hasattr(data, '__iter__'): iter_series = data
else: iter_series = data.series
if asframe:
series_list = list(s for s in self.iter_pd_series(
iter_series, dim_at_obs, dtype, attributes))
if dtype and attributes:
pd_series, pd_attributes = zip(*series_list)
index_source = pd_series
elif dtype:
pd_series = index_source = series_list
elif attributes:
pd_attributes = index_source = series_list
# Extract dimensions
index_tuples = list(s.name for s in index_source)
level_names = list(index_source[0].name._fields)
col_index = PD.MultiIndex.from_tuples(index_tuples,
names = level_names)
if dtype:
for s in pd_series: s.name = None
# Merge series into multi-indexed DataFrame and return it.
d_frame = PD.concat(list(pd_series), axis = 1)
d_frame.columns = col_index
if attributes:
for s in pd_attributes: s.name = None
a_frame = PD.concat(pd_attributes, axis = 1)
a_frame.columns = col_index
if dtype and attributes: return d_frame, a_frame
elif dtype: return d_frame
else: return a_frame
# return an iterator
else:
return self.iter_pd_series(iter_series, dim_at_obs, dtype,
attributes)
def iter_pd_series(self, iter_series, dim_at_obs, dtype, attributes):
# Pre-compute some values before looping over the series
o_in_attrib = 'o' in attributes
s_in_attrib = 's' in attributes
g_in_attrib = 'g' in attributes
d_in_attrib = 'd' in attributes
for series in iter_series:
# Generate the 3 main columns: index, values and attributes
obs_zip = zip(*series.obs(dtype, attributes))
obs_dim = next(obs_zip)
l = len(obs_dim)
obs_values = NP.array(next(obs_zip), dtype = dtype)
if attributes:
obs_attrib = NP.array(tuple(next(obs_zip)), dtype = 'O')
# Generate the index
if dim_at_obs == 'TIME_PERIOD':
# Check if we can build the index based on start and freq
try:
f = series.key.FREQ
series_index = PD.period_range(start = obs_dim[0], periods = l, freq = f)
except KeyError:
series_index = PD.PeriodIndex(obs_dim)
elif dim_at_obs == 'TIME':
try:
f = series.key.FREQ
series_index = PD.date_range(start = obs_dim[0], periods = l, freq = f)
except KeyError:
series_index = PD.DatetimeIndex(obs_dim)
else: series_index = PD.Index(obs_dim)
if dtype:
value_series = PD.Series(obs_values, index = series_index, name = series.key)
if attributes:
for d in obs_attrib:
if not o_in_attrib: d.clear()
if s_in_attrib: d.update(series.attrib)
if g_in_attrib: d.update(series.group_attrib)
if d_in_attrib: d.update(series.dataset.attrib)
attrib_series = PD.Series(obs_attrib,
index = series_index, dtype = 'object')
# decide what to yield
if dtype and attributes:
yield value_series, attrib_series
elif dtype: yield value_series
elif attributes: yield attrib_series
else: raise ValueError(
"At least one of 'dtype' or 'attributes' args must be True.")
|
Python
| 0.000001
|
@@ -4067,16 +4067,21 @@
s_zip =
+iter(
zip(*ser
@@ -4099,32 +4099,33 @@
pe, attributes))
+)
%0D%0A ob
|
fc75f5843af70c09e0d63284277bf88689cbb06d
|
Add apidoc to doc building
|
invocations/docs.py
|
invocations/docs.py
|
import os
from invoke.tasks import task
from invoke.runner import run
docs_dir = 'docs'
build = os.path.join(docs_dir, '_build')
@task
def clean_docs():
run("rm -rf %s" % build)
@task
def browse_docs():
run("open %s" % os.path.join(build, 'index.html'))
@task
def docs(clean=False, browse=False):
if clean:
clean_docs.body()
run("sphinx-build %s %s" % (docs_dir, build), pty=True)
if browse:
browse_docs.body()
|
Python
| 0
|
@@ -278,40 +278,1187 @@
def
-docs(clean=False, browse=False):
+api_docs(target, output=%22api%22, exclude=%22%22):%0A %22%22%22%0A Runs %60%60sphinx-apidoc%60%60 to autogenerate your API docs.%0A%0A Must give target directory/package as %60%60target%60%60. Results are written out%0A to %60%60docs/%3Coutput%3E%60%60 (%60%60docs/api%60%60 by default).%0A%0A To exclude certain output files from the final build give %60%60exclude%60%60 as a%0A comma separated list of file paths.%0A %22%22%22%0A output = os.path.join('docs', output)%0A # Have to make these absolute or apidoc is dumb :(%0A exclude = map(%0A lambda x: os.path.abspath(os.path.join(os.getcwd(), x)),%0A exclude.split(',')%0A )%0A run(%22sphinx-apidoc -o %25s %25s %25s%22 %25 (output, target, ' '.join(exclude)))%0A%0A%0A@task%0Adef docs(clean=False, browse=False, api_target=None, api_output=None,%0A api_exclude=None):%0A %22%22%22%0A Build Sphinx docs, optionally %60%60clean%60%60ing and/or %60%60browse%60%60ing.%0A%0A Can also build API docs by giving %60%60api_target%60%60 and optionally%0A %60%60api_output%60%60 and/or %60%60api_exclude%60%60.%0A %22%22%22%0A if api_target:%0A kwargs = %7B'target': api_target%7D%0A if api_output:%0A kwargs%5B'output'%5D = api_output%0A if api_exclude:%0A kwargs%5B'exclude'%5D = api_exclude%0A api_docs.body(**kwargs)
%0A
|
0a07f6ac82f099d836eb5276063adab245979258
|
rename `recall` to `call`
|
chainer/training/triggers/once_trigger.py
|
chainer/training/triggers/once_trigger.py
|
class OnceTrigger(object):
"""Trigger based on the starting point of the iteration.
This trigger accepts only once at starting point of the iteration. There
are two ways to specify the starting point: only starting point in whole
iteration or recalled when training resumed.
Args:
recall_on_resume (bool): Whether the extension is recalled or not when
restored from a snapshot. It is set to ``False`` by default.
"""
def __init__(self, recall_on_resume=False):
self._recall_on_resume = recall_on_resume
self._flag_called = False
def trigger(self, trainer):
if self._flag_called:
return False
self._flag_called = True
return True
@property
def skip_initialize(self):
"""The flag decide to call `Extension.initialize` or not.
If this flag is exist and set `True`, `Extension.initialize` is
skipped.
"""
return self._flag_called
def serialize(self, serializer):
if not self._recall_on_resume:
self._flag_called = serializer('_flag_called', self._flag_called)
|
Python
| 0.002129
|
@@ -254,25 +254,29 @@
tion or
-re
called
+again
when tra
@@ -309,18 +309,16 @@
-re
call_on_
@@ -361,28 +361,27 @@
is
-re
called
+again
or not
- when
%0A
@@ -389,16 +389,21 @@
+when
restored
@@ -487,18 +487,16 @@
_(self,
-re
call_on_
@@ -520,26 +520,24 @@
self._
-re
call_on_resu
@@ -541,18 +541,16 @@
esume =
-re
call_on_
@@ -1046,18 +1046,16 @@
t self._
-re
call_on_
|
2d6906bc58275b18102b4523a4faa5078a6e74f1
|
fix wrong description (all?)
|
chainercv/transforms/image/random_crop.py
|
chainercv/transforms/image/random_crop.py
|
import random
import six
def random_crop(img, output_shape, return_slices=False, copy=False):
"""Crop array randomly into `output_shape`.
All arrays will be cropped by the same region randomly selected. The
output will all be in shape :obj:`output_shape`.
Args:
img (~numpy.ndarray): An image array to be cropped. This is in
CHW format.
output_shape (tuple): the size of output image after cropping.
This value is :math:`(heihgt, width)`.
return_slices (bool): If :obj:`True`, this function returns
information of slices.
copy (bool): If :obj:`False`, a view of :obj:`img` is returned.
Returns:
This function returns :obj:`out_img, slice_H, slice_W` if
:obj:`return_slices = True`. Otherwise, this returns
:obj:`out_img`.
Note that :obj:`out_img` is the transformed image array.
Also, :obj:`slice_H` and :obj:`slice_W` are slices used to crop the
input image. The following relationship is satisfied.
.. code::
out_img = img[:, slice_H, slice_W]
"""
H, W = output_shape
if img.shape[1] == H:
start_H = 0
elif img.shape[1] > H:
start_H = random.choice(six.moves.range(img.shape[1] - H))
else:
raise ValueError('shape of image is larger than output shape')
slice_H = slice(start_H, start_H + H)
if img.shape[2] == W:
start_W = 0
elif img.shape[2] > W:
start_W = random.choice(six.moves.range(img.shape[2] - W))
else:
raise ValueError('shape of image is larger than output shape')
slice_W = slice(start_W, start_W + W)
img = img[:, slice_H, slice_W]
if copy:
img = img.copy()
if return_slices:
return img, slice_H, slice_W
else:
return img
|
Python
| 0.000377
|
@@ -146,108 +146,83 @@
-All arrays will be cropped by the same region randomly selected. The%0A output will all be in shape
+The input image is cropped by a randomly selected region whose shape%0A is
:ob
|
57e177e47bcc54683654e5d0de81af6e0cbd803d
|
update version
|
pimat_web/version.py
|
pimat_web/version.py
|
__version__ = '0.6.17'
|
Python
| 0
|
@@ -17,7 +17,7 @@
.6.1
-7
+8
'%0A
|
e3b6b9864376f2dabe42b6d80d4a5db65cb85d30
|
Update docs for 'prep_command()'
|
src/python/pants/backend/core/targets/prep_command.py
|
src/python/pants/backend/core/targets/prep_command.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.base.target import Target
class PrepCommand(Target):
"""A command that must be run before some other target can be built.
For example, a script that sets up tunnels to database servers
might need to be run before running integration tests
"""
def __init__(self, prep_executable=None, prep_args=None, payload=None, prep_environ=False, **kwargs):
"""
:param prep_executable: The path to the executable that should be run.
:param prep_args: A list of command-line args to the excutable.
:param prep_environ: If True, the output of the command will be treated as
a \0-separated list of key=value pairs to insert into the environment.
Note that this will pollute the environment for all future tests, so
avoid it if at all possible.
"""
payload = payload or Payload()
payload.add_fields({
'prep_command_executable': PrimitiveField(prep_executable),
'prep_command_args': PrimitiveField(prep_args or []),
'prep_environ': PrimitiveField(prep_environ),
})
super(PrepCommand, self).__init__(payload=payload, **kwargs)
|
Python
| 0.000001
|
@@ -518,13 +518,14 @@
be
-built
+tested
.%0A%0A
@@ -537,16 +537,56 @@
example,
+ you can use %60prep_command()%60 to execute
a scrip
@@ -623,16 +623,18 @@
base
+%0A
servers
%0A m
@@ -633,64 +633,205 @@
vers
-%0A might need to be run before running integration tests
+. These tunnels could then be leveraged by integration tests.%0A%0A Pants will only execute the %60prep_command()%60 under the test goal, when testing targets that%0A depend on the %60prep_command()%60 target.
%0A %22
@@ -1176,16 +1176,19 @@
a
+%5C%5C%5C
%5C0-separ
|
633768a7e78c0ec9c827206ea88ad83ddf1b709a
|
fix tests for site links
|
bluebottle/clients/tests/test_utils.py
|
bluebottle/clients/tests/test_utils.py
|
from django.contrib.auth.models import Permission
from django.contrib.auth import get_user_model
from bluebottle.test.utils import BluebottleTestCase
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.cms import (
SiteLinksFactory, LinkFactory, LinkGroupFactory, LinkPermissionFactory
)
from bluebottle.clients.utils import get_user_site_links
class TestSiteLinks(BluebottleTestCase):
def setUp(self):
super(TestSiteLinks, self).setUp()
self.user1 = BlueBottleUserFactory.create()
self.site_links = SiteLinksFactory.create()
self.link_groups = {}
self._add_link(title='Project List', component='project')
self._add_link(title='Task List', component='task')
self._add_link(group_name='about', title='Search', external_link='https://duck.example.com')
def _add_link(self, group_name='main', **kwargs):
if group_name not in self.link_groups:
self.link_groups[group_name] = LinkGroupFactory.create(title='{} Group'.format(group_name), name=group_name,
site_links=self.site_links)
return LinkFactory.create(link_group=self.link_groups[group_name], **kwargs)
def test_user_site_links_response(self):
results = get_user_site_links(self.user1)
self.assertEqual(len(results['main']['links']), 2)
self.assertEqual(len(results['about']['links']), 1)
link1 = results['main']['links'][0]
expected1 = {
'route': 'project',
'isHighlighted': False,
'title': 'Project List'
}
self.assertEqual(results['main']['title'], 'main Group')
self.assertEqual(link1, expected1)
def test_user_site_links_external(self):
results = get_user_site_links(self.user1)
print(results)
link = results['about']['links'][0]
self.assertTrue(link['external'])
def test_user_site_links_perm(self):
# Add link with resultpage permission
secret_link = self._add_link(title='Results Page', component='results')
perm = LinkPermissionFactory.create(permission='cms.api_change_resultpage',
present=True)
secret_link.link_permissions.add(perm)
# User can't access link with permissions
results = get_user_site_links(self.user1)
self.assertEqual(len(results['main']['links']), 2)
# Add resultpage permission to User
resultpage_perm = Permission.objects.get(codename='api_change_resultpage')
self.user1.user_permissions.add(resultpage_perm)
self.user1 = get_user_model().objects.get(pk=self.user1.pk)
# User can now access link with resultpage permission
results = get_user_site_links(self.user1)
self.assertEqual(len(results['main']['links']), 3)
def test_user_site_links_missing_perm(self):
# Add link with absent resultpage permission
secret_link = self._add_link(title='Public Results Page', component='results')
perm = LinkPermissionFactory.create(permission='cms.api_change_resultpage',
present=False)
secret_link.link_permissions.add(perm)
# User can access link without permission
results = get_user_site_links(self.user1)
self.assertEqual(len(results['main']['links']), 3)
# Add resultpage permission to User
resultpage_perm = Permission.objects.get(codename='api_change_resultpage')
self.user1.user_permissions.add(resultpage_perm)
self.user1 = get_user_model().objects.get(pk=self.user1.pk)
# User can not access link with absent resultpage permission
results = get_user_site_links(self.user1)
self.assertEqual(len(results['main']['links']), 2)
|
Python
| 0
|
@@ -403,16 +403,155 @@
links%0A%0A%0A
+def _group_by_name(results, name):%0A groups = results%5B'groups'%5D%0A return (group for group in groups if group%5B'name'%5D == name).next()%0A%0A%0A
class Te
@@ -1507,24 +1507,106 @@
elf.user1)%0A%0A
+ main = _group_by_name(results, 'main')%0A main_links = main%5B'links'%5D%0A
self
@@ -1626,32 +1626,18 @@
len(
-results%5B'main'%5D%5B'
+main_
links
-'%5D
), 2
@@ -1667,32 +1667,48 @@
ual(len(
+_group_by_name(
results
-%5B
+,
'about'
-%5D
+)
%5B'links'
@@ -1735,32 +1735,18 @@
1 =
-results%5B'main'%5D%5B'
+main_
links
-'%5D
%5B0%5D%0A
@@ -1870,16 +1870,43 @@
ct List'
+,%0A 'sequence': 1
%0A
@@ -1933,31 +1933,20 @@
rtEqual(
-results%5B'
main
-'%5D
%5B'title'
@@ -2104,31 +2104,8 @@
er1)
-%0A print(results)
%0A%0A
@@ -2121,24 +2121,40 @@
k =
+_group_by_name(
results
-%5B
+,
'about'
-%5D
+)
%5B'li
@@ -2687,39 +2687,55 @@
rtEqual(len(
+_group_by_name(
results
-%5B
+,
'main'
-%5D
+)
%5B'links'%5D),
@@ -3128,39 +3128,55 @@
rtEqual(len(
+_group_by_name(
results
-%5B
+,
'main'
-%5D
+)
%5B'links'%5D),
@@ -3684,39 +3684,55 @@
rtEqual(len(
+_group_by_name(
results
-%5B
+,
'main'
-%5D
+)
%5B'links'%5D),
@@ -4140,23 +4140,39 @@
len(
+_group_by_name(
results
-%5B
+,
'main'
-%5D
+)
%5B'li
|
a9ac098ec492739f37005c9bd6278105df0261c5
|
Add fields to save question url and annexure links
|
parliamentsearch/items.py
|
parliamentsearch/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MemberofParliament(scrapy.Item):
"""
Data structure to define Member of Parliament information
"""
mp_id = scrapy.Field()
mp_name = scrapy.Field()
mp_constituency = scrapy.Field()
mp_party = scrapy.Field()
mp_photo = scrapy.Field()
class RajyaSabhaQuestion(scrapy.Item):
"""
Data structure to define a Rajya Sabha question
"""
q_no = scrapy.Field()
q_type = scrapy.Field()
q_date = scrapy.Field()
q_ministry = scrapy.Field()
q_member = scrapy.Field()
q_subject = scrapy.Field()
class LokSabhaQuestion(scrapy.Item):
"""
Data structure to define a Lok Sabha question
"""
q_no = scrapy.Field()
q_session = scrapy.Field()
q_type = scrapy.Field()
q_date = scrapy.Field()
q_ministry = scrapy.Field()
q_member = scrapy.Field()
q_subject = scrapy.Field()
|
Python
| 0
|
@@ -926,28 +926,78 @@
%09q_subject = scrapy.Field()%0A
+%09q_url = scrapy.Field()%0A%09q_annex = scrapy.Field()%0A
|
f1bf3492d8106a953b0e0666a5042e8cbfefa797
|
Fix comments
|
hs_core/management/commands/solr_recover.py
|
hs_core/management/commands/solr_recover.py
|
"""This lists all the large resources and their statuses.
This helps in checking that they download properly.
* By default, prints errors on stdout.
* Optional argument --log: logs output to system log.
"""
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import get_resource_by_shortkey
from haystack import connection_router, connections
from haystack.exceptions import NotHandled
import logging
def has_subfolders(resource):
for f in resource.files.all():
if '/' in f.short_path:
return True
return False
def repair_solr(short_id):
""" Print size and sharing status of a resource """
logger = logging.getLogger(__name__)
try:
res = BaseResource.objects.get(short_id=short_id)
except BaseResource.DoesNotExist:
print("{} does not exist".format(short_id))
# instance with proper type
instance = res.get_content_model()
assert instance, (res, res.content_model)
print("re-indexing {} in solr".format(short_id))
# instance of BaseResource matching real instance
baseinstance = BaseResource.objects.get(pk=instance.pk)
basesender = BaseResource
using_backends = connection_router.for_write(instance=baseinstance)
for using in using_backends:
# if object is public/discoverable or becoming public/discoverable, index it
if instance.raccess.public or instance.raccess.discoverable:
try:
index = connections[using].get_unified_index().get_index(basesender)
index.update_object(baseinstance, using=using)
except NotHandled:
logger.exception(
"Failure: changes to %s with short_id %s not added to Solr Index.",
str(type(instance)), baseinstance.short_id)
# if object is private or becoming private, delete from index
else:
try:
index = connections[using].get_unified_index().get_index(basesender)
index.remove_object(baseinstance, using=using)
except NotHandled:
logger.exception("Failure: delete of %s with short_id %s failed.",
str(type(instance)), baseinstance.short_id)
class Command(BaseCommand):
help = "Print size information."
def add_arguments(self, parser):
# a list of resource id's: none does nothing.
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--log',
action='store_true', # True for presence, False for absence
dest='log', # value is options['log']
help='log errors to system log',
)
parser.add_argument(
'--type',
dest='type',
help='limit to resources of a particular type'
)
parser.add_argument(
'--storage',
dest='storage',
help='limit to specific storage medium (local, user, federated)'
)
parser.add_argument(
'--access',
dest='access',
help='limit to specific access class (public, discoverable, private)'
)
parser.add_argument(
'--has_subfolders',
action='store_true', # True for presence, False for absence
dest='has_subfolders', # value is options['has_subfolders']
help='limit to resources with subfolders',
)
def repair_filtered_solr(self, resource, options):
if (options['type'] is None or resource.resource_type == options['type']) and \
(options['storage'] is None or resource.storage_type == options['storage']) and \
(options['access'] != 'public' or resource.raccess.public) and \
(options['access'] != 'discoverable' or resource.raccess.discoverable) and \
(options['access'] != 'private' or not resource.raccess.discoverable) and \
(not options['has_subfolders'] or has_subfolders(resource)):
storage = resource.get_irods_storage()
if storage.exists(resource.root_path):
repair_solr(resource.short_id)
else:
print("{} does not exist in iRODS".format(resource.short_id))
def handle(self, *args, **options):
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
resource = get_resource_by_shortkey(rid)
self.repair_filtered_solr(resource, options)
else:
for resource in BaseResource.objects.all():
self.repair_filtered_solr(resource, options)
|
Python
| 0
|
@@ -5,113 +5,72 @@
his
-lists all the large resources and their statuses.%0A This helps in checking that they download properly.%0A
+re-indexes resources in SOLR to fix problems during SOLR builds.
%0A* B
@@ -609,40 +609,37 @@
%22%22%22
-Print size and sharing status of
+Repair SOLR index content for
a r
@@ -2294,31 +2294,48 @@
= %22
-Print size information.
+Repair SOLR index for a set of resources
%22%0A%0A
|
20e63fb5b5a02966acbe66f3cda19bc59ff89934
|
Set a default value for a default_roles
|
monasca_log_api/conf/role_middleware.py
|
monasca_log_api/conf/role_middleware.py
|
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
role_m_opts = [
cfg.ListOpt(name='path',
default='/',
help='List of paths where middleware applies to'),
cfg.ListOpt(name='default_roles',
default=None,
help='List of roles allowed to enter api'),
cfg.ListOpt(name='agent_roles',
default=None,
help=('List of roles, that if set, mean that request '
'comes from agent, thus is authorized in the same '
'time')),
cfg.ListOpt(name='delegate_roles',
default=['admin'],
help=('Roles that are allowed to POST logs on '
'behalf of another tenant (project)'))
]
role_m_group = cfg.OptGroup(name='roles_middleware', title='roles_middleware')
def register_opts(conf):
conf.register_group(role_m_group)
conf.register_opts(role_m_opts, role_m_group)
def list_opts():
return role_m_group, role_m_opts
|
Python
| 0.000915
|
@@ -798,36 +798,48 @@
default=
-None
+%5B'monasca-user'%5D
,%0A
|
8a0d31d59ae33f768d8ae8821cec6d0e8897267c
|
Extend testing
|
bluebottle/offices/tests/test_admin.py
|
bluebottle/offices/tests/test_admin.py
|
from django.contrib.admin.sites import AdminSite
from django.urls import reverse
from bluebottle.geo.admin import LocationAdmin
from bluebottle.geo.models import Location
from bluebottle.initiatives.admin import InitiativeAdmin
from bluebottle.initiatives.models import Initiative, InitiativePlatformSettings
from bluebottle.initiatives.tests.factories import InitiativeFactory
from bluebottle.offices.admin import OfficeSubRegionAdmin, OfficeRegionAdmin
from bluebottle.offices.models import OfficeSubRegion, OfficeRegion
from bluebottle.offices.tests.factories import OfficeSubRegionFactory, OfficeRegionFactory
from bluebottle.test.utils import BluebottleTestCase
class MockRequest(object):
pass
class OfficeAdminTest(BluebottleTestCase):
"""
Test Offices in admin
"""
def setUp(self):
super(OfficeAdminTest, self).setUp()
self.africa = OfficeRegionFactory.create(name='Africa')
self.europe = OfficeRegionFactory.create(name='Europe')
self.bulgaria = OfficeSubRegionFactory.create(name='Bulgaria', region=self.europe)
OfficeSubRegionFactory.create_batch(6, region=self.europe)
self.ghana = OfficeSubRegionFactory.create(name='Ghana', region=self.africa)
OfficeSubRegionFactory.create_batch(3, region=self.africa)
self.location1 = Location.objects.create(
name='Lyutidol',
subregion=self.bulgaria
)
self.location2 = Location.objects.create(
name='Sofia',
subregion=self.bulgaria
)
self.location3 = Location.objects.create(
name='Lozenets',
subregion=self.bulgaria
)
self.location4 = Location.objects.create(
name='Batak',
subregion=self.bulgaria
)
self.location5 = Location.objects.create(
name='Accra',
subregion=self.ghana
)
self.site = AdminSite()
self.location_admin = LocationAdmin(Location, self.site)
self.subregion_admin = OfficeSubRegionAdmin(OfficeSubRegion, self.site)
self.region_admin = OfficeRegionAdmin(OfficeRegion, self.site)
self.initiative_admin = InitiativeAdmin(Initiative, self.site)
self.initiatives_url = reverse('admin:initiatives_initiative_changelist')
self.activities_url = reverse('admin:activities_activity_changelist')
InitiativeFactory.create(location=self.location1)
InitiativeFactory.create_batch(3, location=self.location2)
InitiativeFactory.create_batch(2, location=self.location3)
InitiativeFactory.create_batch(4, location=self.location4)
InitiativeFactory.create_batch(8, location=self.location5)
def test_initiatives_link(self):
initiatives_link = self.location_admin.initiatives(self.location1)
self.assertTrue('>1<' in initiatives_link)
self.assertTrue(
'/en/admin/initiatives/initiative/?location__id__exact={}'.format(
self.location1.id
) in initiatives_link
)
initiatives_link = self.location_admin.initiatives(self.location5)
self.assertTrue('>8<' in initiatives_link)
self.assertTrue(
'/en/admin/initiatives/initiative/?location__id__exact={}'.format(
self.location5.id
) in initiatives_link
)
def test_initiatives_link_regions_enabled(self):
initiative_settings = InitiativePlatformSettings.objects.get()
initiative_settings.enable_office_regions = True
initiative_settings.save()
initiatives_link = self.region_admin.initiatives(self.europe)
self.assertTrue('>10<' in initiatives_link)
self.assertTrue(
'/en/admin/initiatives/initiative/?location__subregion__region__id__exact={}'.format(
self.europe.id
) in initiatives_link
)
initiatives_link = self.region_admin.initiatives(self.africa)
self.assertTrue('>8<' in initiatives_link)
self.assertTrue(
'/en/admin/initiatives/initiative/?location__subregion__region__id__exact={}'.format(
self.africa.id
) in initiatives_link
)
def test_office_filters(self):
request = MockRequest()
filters = self.initiative_admin.get_list_filter(request)
self.assertTrue('location' in filters)
self.assertFalse('location__subregion_exact_id' in filters)
self.assertFalse('location__subregion__region_exact_id' in filters)
def test_office_filters_regions_enabled(self):
self.initiative_admin = InitiativeAdmin(Initiative, self.site)
request = MockRequest()
initiative_settings = InitiativePlatformSettings.objects.get()
initiative_settings.enable_office_regions = True
initiative_settings.save()
filters = self.initiative_admin.get_list_filter(request)
self.assertTrue('location__subregion' in filters)
self.assertTrue('location__subregion__region' in filters)
|
Python
| 0
|
@@ -644,32 +644,37 @@
mport Bluebottle
+Admin
TestCase%0A%0A%0Aclass
@@ -738,16 +738,21 @@
uebottle
+Admin
TestCase
@@ -5042,28 +5042,371 @@
region__region' in filters)%0A
+%0A def test_office_admin(self):%0A self.client.force_login(self.superuser)%0A url = reverse('admin:geo_location_changelist')%0A response = self.client.get(url)%0A self.assertEquals(response.status_code, 200)%0A self.assertContains(response, 'Office subregion')%0A self.assertContains(response, 'Office region')%0A
|
5cb2c4b91f42a7c961d129a402a11515bc1f8c55
|
add some missing files
|
hs_model_program/migrations/0001_initial.py
|
hs_model_program/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import hs_core.models
import datetime
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('pages', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
('hs_core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ModelProgramMetaData',
fields=[
('coremetadata_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='hs_core.CoreMetaData')),
],
options={
},
bases=('hs_core.coremetadata',),
),
migrations.CreateModel(
name='ModelProgramResource',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')),
('comments_count', models.IntegerField(default=0, editable=False)),
('public', models.BooleanField(default=True, help_text=b'If this is true, the resource is viewable and downloadable by anyone')),
('frozen', models.BooleanField(default=False, help_text=b'If this is true, the resource should not be modified')),
('do_not_distribute', models.BooleanField(default=False, help_text=b'If this is true, the resource owner has to designate viewers')),
('discoverable', models.BooleanField(default=True, help_text=b'If this is true, it will turn up in searches.')),
('published_and_frozen', models.BooleanField(default=False, help_text=b'Once this is true, no changes can be made to the resource')),
('content', models.TextField()),
('short_id', models.CharField(default=hs_core.models.short_id, max_length=32, db_index=True)),
('doi', models.CharField(help_text=b"Permanent identifier. Never changes once it's been set.", max_length=1024, null=True, db_index=True, blank=True)),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
('creator', models.ForeignKey(related_name='creator_of_hs_model_program_modelprogramresource', to=settings.AUTH_USER_MODEL, help_text=b'This is the person who first uploaded the resource')),
('edit_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can edit the resource', related_name='group_editable_hs_model_program_modelprogramresource', null=True, to='auth.Group', blank=True)),
('edit_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can edit the resource', related_name='user_editable_hs_model_program_modelprogramresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('last_changed_by', models.ForeignKey(related_name='last_changed_hs_model_program_modelprogramresource', to=settings.AUTH_USER_MODEL, help_text=b'The person who last changed the resource', null=True)),
('owners', models.ManyToManyField(help_text=b'The person who uploaded the resource', related_name='owns_hs_model_program_modelprogramresource', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(related_name='modelprogramresources', verbose_name='Author', to=settings.AUTH_USER_MODEL)),
('view_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can view the resource', related_name='group_viewable_hs_model_program_modelprogramresource', null=True, to='auth.Group', blank=True)),
('view_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can view the resource', related_name='user_viewable_hs_model_program_modelprogramresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Model Program Resource',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='MpMetadata',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('software_version', models.CharField(default=b'1.0', max_length=255, blank=True, help_text=b'The software version of the model', null=True, verbose_name=b'Version ')),
('software_language', models.CharField(default=b'', max_length=100, blank=True, help_text=b'The programming language(s) that the model was written in', null=True, verbose_name=b'Language')),
('operating_sys', models.CharField(default=b'unknown', max_length=255, blank=True, help_text=b'Compatible operating systems', null=True, verbose_name=b'Operating System')),
('date_released', models.DateTimeField(default=datetime.datetime(2015, 3, 5, 18, 29, 54, 938656), help_text=b'The date of the software release (m/d/Y H:M)', null=True, verbose_name=b'Release Date', blank=True)),
('program_website', models.CharField(default=None, max_length=255, blank=True, help_text=b'A URL providing addition information about the software', null=True, verbose_name=b'Website')),
('software_repo', models.CharField(default=None, max_length=255, blank=True, help_text=b'A URL for the source code repository (e.g. git, mecurial, svn)', null=True, verbose_name=b'Software Repository')),
('release_notes', models.CharField(default=b'', choices=[(b'-', b' ')], max_length=400, blank=True, help_text=b'Notes about the software release (e.g. bug fixes, new functionality)', null=True, verbose_name=b'Release Notes')),
('user_manual', models.CharField(default=None, choices=[(b'-', b' ')], max_length=400, blank=True, help_text=b'User manual for the model program (e.g. .doc, .md, .rtf, .pdf', null=True, verbose_name=b'User Manual')),
('theoretical_manual', models.CharField(default=None, choices=[(b'-', b' ')], max_length=400, blank=True, help_text=b'Theoretical manual for the model program (e.g. .doc, .md, .rtf, .pdf', null=True, verbose_name=b'Theoretical Manual')),
('source_code', models.CharField(default=None, choices=[(b'-', b' ')], max_length=400, blank=True, help_text=b'Archive of the source code for the model (e.g. .zip, .tar)', null=True, verbose_name=b'Source Code')),
('content_type', models.ForeignKey(related_name='hs_model_program_mpmetadata_related', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
Python
| 0.000004
|
@@ -5265,26 +5265,24 @@
5,
-18
+20
, 29,
-5
4,
-938656
+68151
), h
|
dd725349e0613461bdbe75a0c32115b323e9ccc3
|
change settings import in wsgi for Travis CI
|
reflow/wsgi.py
|
reflow/wsgi.py
|
"""
WSGI config for reflow project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
# Set matplotlib configuration directory, else Django complains it is not writable
# We'll just use a tempfile
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
paths = [
'/srv/django-projects/ReFlow',
'/srv/django-projects/ReFlow/reflow'
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reflow.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
import settings
if settings.INTERACTIVE_DEBUG:
class Debugger:
def __init__(self, object):
self.__object = object
def __call__(self, *args, **kwargs):
import pdb
debugger = pdb.Pdb()
debugger.use_rawinput = 0
debugger.reset()
sys.settrace(debugger.trace_dispatch)
try:
return self.__object(*args, **kwargs)
finally:
debugger.quitting = 1
sys.settrace(None)
application = Debugger(get_wsgi_application())
else:
application = get_wsgi_application()
|
Python
| 0
|
@@ -1327,16 +1327,23 @@
settings
+_sample
%0A%0Aif set
|
6fb1b24a3cf1a4cdb3bd35c6f575d96cb2da9415
|
Add binding for DSA_size
|
cryptography/hazmat/bindings/openssl/dsa.py
|
cryptography/hazmat/bindings/openssl/dsa.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/dsa.h>
"""
TYPES = """
typedef struct dsa_st {
// prime number (public)
BIGNUM *p;
// 160-bit subprime, q | p-1 (public)
BIGNUM *q;
// generator of subgroup (public)
BIGNUM *g;
// private key x
BIGNUM *priv_key;
// public key y = g^x
BIGNUM *pub_key;
...;
} DSA;
typedef struct {
BIGNUM *r;
BIGNUM *s;
} DSA_SIG;
"""
FUNCTIONS = """
DSA *DSA_generate_parameters(int, unsigned char *, int, int *, unsigned long *,
void (*)(int, int, void *), void *);
int DSA_generate_key(DSA *);
DSA *DSA_new(void);
void DSA_free(DSA *);
DSA_SIG *DSA_SIG_new(void);
void DSA_SIG_free(DSA_SIG *);
int i2d_DSA_SIG(const DSA_SIG *, unsigned char **);
DSA_SIG *d2i_DSA_SIG(DSA_SIG **, const unsigned char **, long);
"""
MACROS = """
int DSA_generate_parameters_ex(DSA *, int, unsigned char *, int,
int *, unsigned long *, BN_GENCB *);
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
|
Python
| 0
|
@@ -1416,16 +1416,43 @@
long);%0A
+int DSA_size(const DSA *);%0A
%22%22%22%0A%0AMAC
|
9bff4e9ed59d2c15b2da681476385274f5ef9059
|
Fix create_sample_event
|
src/sentry/management/commands/create_sample_event.py
|
src/sentry/management/commands/create_sample_event.py
|
"""
sentry.management.commands.create_sample_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.core.management.base import BaseCommand, CommandError, make_option
class Command(BaseCommand):
help = 'Creates a sample event in Sentry (if applicable)'
option_list = BaseCommand.option_list + (
make_option('--project', dest='project'),
make_option('--platform', dest='platform'),
)
def handle(self, **options):
from django.conf import settings
from sentry.constants import PLATFORM_LIST
from sentry.models import Project
from sentry.utils.samples import create_sample_event
if not options['project']:
project = Project.objects.get(settings.SENTRY_DEFAULT_PROJECT)
else:
if options['project'].isdigit():
project = Project.objects.get(id=options['project'])
elif '/' in options['project']:
t_slug, p_slug = options['project'].split('/', 1)
project = Project.objects.get(slug=p_slug, team__slug=t_slug)
else:
raise CommandError('Project must be specified as team-slug/project-slug or a project id')
if options['platform'] not in PLATFORM_LIST:
raise CommandError('Invalid platform. Must specify one of: %s' % ', '.join(PLATFORM_LIST))
platform = options['platform'] or project.platform
event = create_sample_event(project, platform)
if not event:
raise CommandError('Unable to create an event for platform %r' % (str(platform),))
self.stdout.write('Event created: %s' % (event.group.get_absolute_url(),))
|
Python
| 0.000026
|
@@ -846,16 +846,19 @@
cts.get(
+id=
settings
@@ -869,16 +869,8 @@
TRY_
-DEFAULT_
PROJ
|
1abc26a5db9ed323d0f81fe11135627dfab1d22e
|
Bump pipeline version. Closes #104.
|
pipeline/pipeline.py
|
pipeline/pipeline.py
|
import atexit
from distutils.version import StrictVersion
from os import environ as env
import os
import subprocess
import sys
import seesaw
from seesaw.externalprocess import WgetDownload, RsyncUpload
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import LimitConcurrent
from seesaw.util import find_executable
# FIXME: This is a bit of a hack.
#
# Pipeline scripts are run with pwd set to their directory, which is why
# getcwd will (often) return the Right Thing. A more robust solution would be
# nice, though.
sys.path.append(os.getcwd())
from archivebot import control
from archivebot import shared_config
from archivebot.seesaw import extensions
from archivebot.seesaw import monitoring
from archivebot.seesaw.wpull import WpullArgs
from archivebot.seesaw.tasks import GetItemFromQueue, StartHeartbeat, \
SetFetchDepth, PreparePaths, WriteInfo, DownloadUrlFile, \
RelabelIfAborted, MoveFiles, SetWarcFileSizeInRedis, StopHeartbeat, \
MarkItemAsDone
VERSION = "20141219.02"
PHANTOMJS_VERSION = '1.9.8'
EXPIRE_TIME = 60 * 60 * 48 # 48 hours between archive requests
WPULL_EXE = find_executable('Wpull', None, [ './wpull' ])
PHANTOMJS = find_executable('PhantomJS', PHANTOMJS_VERSION,
['phantomjs', './phantomjs', '../phantomjs'], '-v')
version_integer = (sys.version_info.major * 10) + sys.version_info.minor
assert version_integer >= 33, \
"This pipeline requires Python >= 3.3. You are running %s." % \
sys.version
assert WPULL_EXE, 'No usable Wpull found.'
assert PHANTOMJS, 'PhantomJS %s was not found.' % PHANTOMJS_VERSION
assert 'RSYNC_URL' in env, 'RSYNC_URL not set.'
assert 'REDIS_URL' in env, 'REDIS_URL not set.'
assert 'FINISHED_WARCS_DIR' in env, 'FINISHED_WARCS_DIR not set.'
assert 'TMUX' in env or 'STY' in env or env.get('NO_SCREEN') == "1", \
"Refusing to start outside of screen or tmux, set NO_SCREEN=1 to override"
assert 'TMUX' in env or 'STY' in env or env.get('NO_SCREEN') == "1", \
"Refusing to start outside of screen or tmux, set NO_SCREEN=1 to override"
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.8b1"):
raise Exception(
"Needs seesaw@python3/development version 0.1.8b1 or higher. "
"You have version {0}".format(seesaw.__version__)
)
assert downloader not in ('ignorednick', 'YOURNICKHERE'), 'please use a real nickname'
RSYNC_URL = env['RSYNC_URL']
REDIS_URL = env['REDIS_URL']
LOG_CHANNEL = shared_config.log_channel()
PIPELINE_CHANNEL = shared_config.pipeline_channel()
# ------------------------------------------------------------------------------
# CONTROL CONNECTION
# ------------------------------------------------------------------------------
control = control.Control(REDIS_URL, LOG_CHANNEL, PIPELINE_CHANNEL)
# ------------------------------------------------------------------------------
# SEESAW EXTENSIONS
# ------------------------------------------------------------------------------
extensions.install_stdout_extension(control)
# ------------------------------------------------------------------------------
# PIPELINE
# ------------------------------------------------------------------------------
project = Project(
title = "ArchiveBot request handler"
)
def wpull_version():
output = subprocess.check_output([WPULL_EXE, '--version'],
stderr=subprocess.STDOUT)
return output.decode('utf-8').strip()
class AcceptAny:
def __contains__(self, item):
return True
DEFAULT_USER_AGENT = \
'ArchiveTeam ArchiveBot/%s (wpull %s) and not Mozilla/5.0 ' \
'(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/38.0.2125.101 Safari/537.36' % (VERSION, wpull_version())
_, _, _, pipeline_id = monitoring.pipeline_id()
pipeline = Pipeline(
GetItemFromQueue(control, pipeline_id, ao_only=env.get('AO_ONLY')),
StartHeartbeat(control),
SetFetchDepth(),
PreparePaths(),
WriteInfo(),
DownloadUrlFile(control),
WgetDownload(
WpullArgs(default_user_agent=DEFAULT_USER_AGENT, wpull_exe=WPULL_EXE,
phantomjs_exe=PHANTOMJS, finished_warcs_dir=os.environ["FINISHED_WARCS_DIR"]),
accept_on_exit_code=AcceptAny(),
env={
'ITEM_IDENT': ItemInterpolation('%(ident)s'),
'LOG_KEY': ItemInterpolation('%(log_key)s'),
'REDIS_URL': REDIS_URL,
'PATH': os.environ['PATH']
}
),
RelabelIfAborted(control),
WriteInfo(),
MoveFiles(),
SetWarcFileSizeInRedis(control),
LimitConcurrent(2,
RsyncUpload(
target = RSYNC_URL,
target_source_path = ItemInterpolation("%(data_dir)s"),
files=ItemValue("all_target_files"),
extra_args = [
'--partial',
'--partial-dir', '.rsync-tmp'
]
)
),
StopHeartbeat(),
MarkItemAsDone(control, EXPIRE_TIME)
)
def stop_control():
control.unregister_pipeline(pipeline_id)
pipeline.on_cleanup += stop_control
# Activate system monitoring.
monitoring.start(pipeline, control, VERSION, downloader)
print('*' * 60)
print('Pipeline ID: %s' % pipeline_id)
if env.get('AO_ONLY'):
print('!ao-only mode enabled')
print('*' * 60)
print()
# vim:ts=4:sw=4:et:tw=78
|
Python
| 0
|
@@ -1083,13 +1083,13 @@
1412
-19.02
+23.01
%22%0APH
|
893e05540c640c4598477a39688a773556bebad9
|
Update HDF5 to 1.8.16
|
var/spack/packages/hdf5/package.py
|
var/spack/packages/hdf5/package.py
|
from spack import *
class Hdf5(Package):
"""HDF5 is a data model, library, and file format for storing and managing
data. It supports an unlimited variety of datatypes, and is designed for
flexible and efficient I/O and for high volume and complex data.
"""
homepage = "http://www.hdfgroup.org/HDF5/"
url = "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-1.8.13/src/hdf5-1.8.13.tar.gz"
list_url = "http://www.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
version('1.8.13', 'c03426e9e77d7766944654280b467289')
depends_on("mpi")
depends_on("zlib")
# TODO: currently hard-coded to use OpenMPI
def install(self, spec, prefix):
configure(
"--prefix=%s" % prefix,
"--with-zlib=%s" % spec['zlib'].prefix,
"--enable-parallel",
"--enable-shared",
"CC=%s" % spec['mpich'].prefix.bin + "/mpicc",
"CXX=%s" % spec['mpich'].prefix.bin + "/mpic++")
make()
make("install")
def url_for_version(self, version):
v = str(version)
if version == Version("1.2.2"):
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + ".tar.gz"
elif version < Version("1.7"):
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + version.up_to(2) + "/hdf5-" + v + ".tar.gz"
else:
return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + "/src/hdf5-" + v + ".tar.gz"
|
Python
| 0
|
@@ -499,16 +499,74 @@
3%0A %0A
+ version('1.8.16', 'b8ed9a36ae142317f88b0c7ef4b9c618')%0A
vers
|
d703a2047c9ab6f1c4e8154782f5d503d9701550
|
Allow empty strings in body
|
oauthlib/signature.py
|
oauthlib/signature.py
|
from __future__ import absolute_import
"""
3.4. Signature
This module represents a direct implementation of section 3.4 of the spec.
http://tools.ietf.org/html/rfc5849#section-3.4
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri query, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
"""
import binascii
import hashlib
import hmac
import urlparse
from . import utils
def construct_base_string(http_method, base_string_uri,
normalized_encoded_request_parameters):
"""Construct the final base string to use for signing.
Per `section 3.4.1.1`_ of the spec.
.. _`section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
return '&'.join((
utils.escape(http_method.upper()),
utils.escape(base_string_uri),
utils.escape(normalized_encoded_request_parameters),
))
def normalize_base_string_uri(uri):
"""Normalize a uri for use in constructing a base string.
Per `section 3.4.1.2`_ of the spec.
.. _`section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
"""
if not isinstance(uri, unicode):
raise ValueError('uri must be a unicode object.')
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# scheme and netloc must be lowercase (3.4.1.2 #1)
scheme = scheme.lower()
netloc = netloc.lower()
# strip port 80 from host if expliticly present (3.4.1.2 #3)
default_ports = (
(u'http', u'80'),
(u'https', u'443'),
)
if u':' in netloc:
host, port = netloc.split(u':', 1)
if (scheme, port) in default_ports:
netloc = host
return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
def collect_parameters(uri_query='', body='', headers=None,
exclude_oauth_signature=True):
"""Collect parameters from the uri query, authorization header, and request
body.
String paramters will be decoded into unicode using utf-8.
Parameters starting with `oauth_` will be unescaped.
Per `section 3.4.1.3.1`_ of the spec.
.. _`section 3.4.1.3.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
if uri_query:
params.extend(urlparse.parse_qsl(uri_query, True))
if headers:
# look for an authorization header (case-insensitive)
headers_lower = dict((k.lower(), v) for k,v in headers.items())
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend(utils.parse_authorization_header(
authorization_header))
if body:
params.extend(urlparse.parse_qsl(body))
# ensure all paramters are unicode and not escaped
unicode_params = []
for k, v in params:
if isinstance(k, str):
k = k.decode('utf-8')
if isinstance(v, str):
if v.startswith('oauth_'):
v = utils.unescape(v)
else:
v = v.decode('utf-8')
unicode_params.append((k, v))
# exclude particular parameters according to the spec
exclude_params = [
u'realm',
]
if exclude_oauth_signature:
exclude_params.append(u'oauth_signature')
return filter(lambda i: i[0] not in exclude_params, unicode_params)
def normalize_parameters(params):
"""Normalize querystring parameters for use in constructing a base string.
Per `section 3.4.1.3.2`_ of the spec.
.. _`section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# Escape key values before sorting
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string and return.
return u'&'.join([u'{0}={1}'.format(k, v) for k, v in key_values])
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""Sign a request using HMAC-SHA1.
Per `section 3.4.2`_ of the spec.
.. _`section 3.4.2`: http://tools.ietf.org/html/rfc5849#section-3.4.2
"""
key = '&'.join((utils.escape(client_secret),
utils.escape(resource_owner_secret)))
signature = hmac.new(key, base_string, hashlib.sha1)
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_rsa_sha1(base_string, rsa_private_key):
"""Sign a request using RSASSA-PKCS #1 v1.5.
Per `section 3.4.3`_ of the spec.
Note this method requires the PyCrypto library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
"""
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
key = RSA.importKey(rsa_private_key)
h = SHA.new(base_string)
p = PKCS1_v1_5.new(key)
return binascii.b2a_base64(p.sign(h))[:-1].decode('utf-8')
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
.. _`section 3.4.4`: http://tools.ietf.org/html/rfc5849#section-3.4.4
"""
return u'&'.join((utils.escape(client_secret),
utils.escape(resource_owner_secret)))
|
Python
| 0.999999
|
@@ -3047,16 +3047,22 @@
qsl(body
+, True
))%0A%0A
|
f28daad980dd95584dabe83a102ecdd0e1cac517
|
remove reference to unused summaries file
|
music_spectrogram_diffusion/__init__.py
|
music_spectrogram_diffusion/__init__.py
|
# Copyright 2022 The Music Spectrogram Diffusion Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base module for Music Spectrogram Diffusion."""
from music_spectrogram_diffusion import audio_codecs
from music_spectrogram_diffusion import datasets
from music_spectrogram_diffusion import event_codec
from music_spectrogram_diffusion import inference
from music_spectrogram_diffusion import layers
from music_spectrogram_diffusion import metrics
from music_spectrogram_diffusion import note_sequences
from music_spectrogram_diffusion import preprocessors
from music_spectrogram_diffusion import run_length_encoding
from music_spectrogram_diffusion import summaries
from music_spectrogram_diffusion import tasks
from music_spectrogram_diffusion import vocabularies
|
Python
| 0
|
@@ -1122,58 +1122,8 @@
ing%0A
-from music_spectrogram_diffusion import summaries%0A
from
|
5eb85dcc98adde698001405c60c44732964ec04a
|
Fix user messages not being sent
|
txircd/modules/cmd_privmsg_notice.py
|
txircd/modules/cmd_privmsg_notice.py
|
from twisted.words.protocols import irc
from txircd.modbase import Command
class MessageCommand(object):
def __init__(self, ircd):
self.ircd = ircd
def onUse(self, cmd, user, data):
if ("targetchan" not in data or not data["targetchan"]) and ("targetuser" not in data or not data["targetuser"]):
return
if "message" not in data or not data["message"]:
user.sendMessage(irc.ERR_NOTEXTTOSEND, ":No text to send")
return
targetChans = data["targetchan"]
targetUsers = data["targetuser"]
channelModifiers = data["chanmod"]
message = data["message"]
for index, channel in enumerate(data["targetchan"]):
if channelModifiers[index]:
prefixLevel = self.prefixes[self.prefix_symbols[channelModifiers[index]]][0]
for u in channels.users:
if u != user and u.channels[channel.name]["status"] and self.prefixes[u.channels[channel.name]["status"][0]][0] >= prefixLevel:
u.sendMessage(cmd, ":{}".format(message), to="{}{}".format(channelModifiers[index], channel.name), prefix=user.prefix())
else:
for u in channel.users:
if u != user:
u.sendMessage(cmd, ":{}".format(message), to=channel.name, prefix=user.prefix())
def processParams(self, cmd, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, cmd, ":You have not registered")
return {}
if not params:
user.sendMessage(irc.ERR_NEEDMOREPARAMS, cmd, ":Not enough parameters")
return {}
if len(params) < 2:
user.sendMessage(irc.ERR_NOTEXTTOSEND, ":No text to send")
return {}
targetChans = []
targetUsers = []
targetChanModifiers = []
for target in params[0].split(","):
if target in self.ircd.users:
targetUsers.append(self.ircd.users[target])
elif target in self.ircd.channels:
targetChans.append(self.ircd.channels[target])
targetChanModifiers.append("")
elif target[0] in self.ircd.prefix_symbols and target[1:] in self.ircd.channels:
targetChans.append(self.ircd.channels[target[1:]])
targetChanModifiers.append(target[0])
else:
user.sendMessage(irc.ERR_NOSUCHNICK, target, ":No such nick/channel")
return {
"user": user,
"targetchan": targetChans,
"chanmod": targetChanModifiers,
"targetuser": targetUsers,
"message": params[1]
}
class PrivMsgCommand(Command):
def __init__(self, msgHandler):
self.msg_handler = msgHandler
def onUse(self, user, data):
self.msg_handler.onUse("PRIVMSG", user, data)
def processParams(self, user, params):
return self.msg_handler.processParams("PRIVMSG", user, params)
class NoticeCommand(Command):
def __init__(self, msgHandler):
self.msg_handler = msgHandler
def onUse(self, user, data):
self.msg_handler.onUse("NOTICE", user, data)
def processParams(self, user, params):
return self.msg_handler.processParams("NOTICE", user, params)
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
messageHandler = MessageCommand(self.ircd)
return {
"commands": {
"PRIVMSG": PrivMsgCommand(messageHandler),
"NOTICE": NoticeCommand(messageHandler)
}
}
def cleanup(self):
del self.ircd.commands["PRIVMSG"]
del self.ircd.commands["NOTICE"]
|
Python
| 0.000188
|
@@ -435,78 +435,8 @@
urn%0A
-%09%09targetChans = data%5B%22targetchan%22%5D%0A%09%09targetUsers = data%5B%22targetuser%22%5D%0A
%09%09ch
@@ -1095,16 +1095,122 @@
efix())%0A
+%09%09for udata in data%5B%22targetuser%22%5D:%0A%09%09%09udata.sendMessage(cmd, %22:%7B%7D%22.format(message), prefix=user.prefix())%0A
%09%0A%09def p
|
376b8aa5b77066e06c17f41d65fe32a3c2bdef1f
|
Add a default value to the header limit
|
geo.py
|
geo.py
|
#! /usr/bin/python3
# -*- coding-utf-8 -*-
"""
This script transform a md into a plain html in the context of a
documentation for Kit&Pack.
"""
import mmap
import yaml
print("---------------------------- geo --")
print("-- by antoine.delhomme@espci.org --")
print("-----------------------------------")
doc_in = "./001-v2-doc.md"
class geoReader():
def __init__(self, doc_in):
self.doc_in = doc_in
self.header = None
def __enter__(self):
"""Open the file.
"""
self.f = open(self.doc_in, 'r')
return self
def __exit__(self, type, value, traceback):
"""Close the file.
"""
self.f.close()
def parseHeader(self):
"""Parse the header of the file.
"""
s = mmap.mmap(self.f.fileno(), 0, access=mmap.ACCESS_READ)
self.header_limit = s.find(b'---')
if self.header_limit != -1:
self.header = yaml.load(s[0:self.header_limit])
print(self.header['name'])
else:
print("Cannot load the header")
# Read the document
with geoReader(doc_in) as g:
g.parseHeader()
|
Python
| 0.000001
|
@@ -435,16 +435,47 @@
r = None
+%0A self.header_limit = -1
%0A%0A de
|
8f3ff0cfd70bfe4eaa9e017323971bad453c93f5
|
set edit as bot
|
trunk/toolserver/pui.py
|
trunk/toolserver/pui.py
|
#!usr/bin/python
# -*- coding: utf-8 -*
#
# (C) Legoktm 2008-2009, MIT License
#
import re, sys, os
sys.path.append(os.environ['HOME'] + '/pythonwikibot')
#sys.path.append('/Users/kman/projects/pywikibot')
import wiki
wiki.setUser('Legobot')
page = wiki.Page('Wikipedia:Possibly unfree images')
try:
wikitext = state0 = page.get()
except wiki.IsRedirectPage:
page = wiki.Page('Wikipedia:Possibly unfree files')
wikitext = state0 = page.get()
wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext)
EditMsg = 'Adding new day to holding cell'
wiki.showDiff(state0, wikitext)
page.put(wikitext,EditMsg)
|
Python
| 0.000002
|
@@ -680,10 +680,20 @@
,EditMsg
+, bot=True
)%0A
|
050cc7a74e68b0515ceac1f53cbc20aa6e6cd498
|
Create a Session object.
|
octohat/connection.py
|
octohat/connection.py
|
# Copyright (c) 2013 Alon Swartz <alon@turnkeylinux.org>
#
# This file is part of OctoHub.
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
import requests
from .response import parse_response
class Pager(object):
def __init__(self, conn, uri, params, max_pages=0):
"""Iterator object handling pagination of Connection.send (method: GET)
conn (octohub.Connection): Connection object
uri (str): Request URI (e.g., /user/issues)
params (dict): Parameters to include in request
max_pages (int): Maximum amount of pages to get (0 for all)
"""
self.conn = conn
self.uri = uri
self.params = params
self.max_pages = max_pages
self.count = 0
def __iter__(self):
while True:
self.count += 1
response = self.conn.send('GET', self.uri, self.params)
yield response
if self.count == self.max_pages:
break
if not 'next' in list(response.parsed_link.keys()):
break
self.uri = response.parsed_link.next.uri
self.params = response.parsed_link.next.params
class Connection(object):
def __init__(self, token=None):
"""OctoHub connection
token (str): GitHub Token (anonymous if not provided)
"""
self.endpoint = 'https://api.github.com'
self.headers = {'User-Agent': 'octohub'}
if token:
self.headers['Authorization'] = 'token %s' % token
def send(self, method, uri, params={}, data=None):
"""Prepare and send request
method (str): Request HTTP method (e.g., GET, POST, DELETE, ...)
uri (str): Request URI (e.g., /user/issues)
params (dict): Parameters to include in request
data (str | file type object): data to include in request
returns: requests.Response object, including:
response.parsed (AttrDict): parsed response when applicable
response.parsed_link (AttrDict): parsed header link when applicable
http://docs.python-requests.org/en/latest/api/#requests.Response
"""
url = self.endpoint + uri
kwargs = {'headers': self.headers, 'params': params, 'data': data}
response = requests.request(method, url, **kwargs)
return parse_response(response)
|
Python
| 0
|
@@ -1638,16 +1638,58 @@
ctohub'%7D
+%0A self.session = requests.Session()
%0A%0A
@@ -2558,24 +2558,28 @@
ponse =
-requests
+self.session
.request
|
d65aaf7984bc8b6247105c6acce78bd2d50e7b93
|
add exception error messages
|
pycoal/environment.py
|
pycoal/environment.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import call
import spectral
import numpy
from os.path import abspath, dirname, basename, splitext
import pycoal
class EnvironmentalCorrelation:
def __init__(self):
"""
Construct a new ``EnvironmentalCorrelation`` object.
"""
pass
def intersectProximity(self, miningFilename, vectorFilename, proximity, correlatedFilename):
"""
Generate an environmental correlation image containing pixels from the
mining classified image detected within a given distance of features
within a vector layer.
Args:
miningImage (str): filename of the mining classified image
vectorLayer (str): filename of vector layer
proximity (float): distance in meters
correlatedImage (str): filename of the correlated image
"""
# get path and file names
outputDirectory = dirname(abspath(correlatedFilename))
miningName = splitext(basename(abspath(miningFilename)))[0]
vectorName = splitext(basename(abspath(vectorFilename)))[0]
# rasterize the vector features to the same dimensions as the mining image
featureHeaderName = outputDirectory + '/' + miningName + '_' + vectorName + '.hdr'
self.createEmptyCopy(miningFilename, featureHeaderName)
featureImageName = featureHeaderName[:-4] + '.img'
self.rasterize(vectorFilename, featureImageName)
# generate a proximity map from the features
proximityHeaderName = outputDirectory + '/' + miningName + '_' + vectorName + '_proximity.hdr'
proximityImageName = proximityHeaderName[:-4] + '.img'
self.proximity(featureImageName, proximityImageName)
# load mining and proximity images and initialize environmental correlation array
miningImage = spectral.open_image(miningFilename)
proximityImage = spectral.open_image(proximityHeaderName)
correlatedImage = numpy.zeros(shape=miningImage.shape, dtype=numpy.uint16)
# get average pixel size
if miningImage.metadata.get('map info')[10][-6:].lower() == 'meters':
xPixelSize = float(miningImage.metadata.get('map info')[5])
yPixelSize = float(miningImage.metadata.get('map info')[6])
pixelSize = (xPixelSize + yPixelSize) / 2
else:
raise ValueError('Mining image units not in meters.')
# intersect features within proximity
for x in range(miningImage.shape[0]):
for y in range(miningImage.shape[1]):
if miningImage[x,y,0]==1 and proximityImage[x,y,0]*pixelSize<=proximity:
correlatedImage[x,y,0] = miningImage[x,y,0]
# save the environmental correlation image
spectral.io.envi.save_classification(
correlatedFilename,
correlatedImage,
class_names=['No data','Data'],
metadata={
'data ignore value': 0,
'description': 'PyCOAL '+pycoal.version+' environmental correlation image.',
'map info': miningImage.metadata.get('map info')
})
def createEmptyCopy(self, sourceFilename, destinationFilename):
"""
Create an empty copy of a PyCOAL classified image with the same size.
Args:
sourceFilename (str): filename of the source image
destinationFilename (str): filename of the destination image
"""
# open the source image
source = spectral.open_image(sourceFilename)
# create an empty array of the same dimensions
destination = numpy.zeros(shape=source.shape, dtype=numpy.uint16)
# save it with source metadata
spectral.io.envi.save_classification(
destinationFilename,
destination,
class_names=['No data','Data'],
metadata={
'data ignore value': 0,
'map info': source.metadata.get('map info')
})
def rasterize(self, vectorFilename, featureFilename):
"""
Burn features from a vector image onto a raster image.
Args:
vectorFilename (str): filename of the vector image
featureFilename (str): filename of the raster image
"""
# assume the layer has the same name as the image
layerName = splitext(basename(vectorFilename))[0]
# convert vector features into nonzero pixels of the output file
returncode = call(['gdal_rasterize',
'-burn', '1',
'-l', layerName,
vectorFilename,
featureFilename])
# detect errors
if returncode != 0:
raise RuntimeError
def proximity(self, featureFilename, proximityFilename):
"""
Generate a proximity map from the features.
Args:
featureFilename (str): filename of the feature image
proximityFilename (str): filename of the proximity image
"""
# generate an ENVI proximity map with georeferenced units
returncode = call(['gdal_proximity.py',
featureFilename,
proximityFilename,
'-of', 'envi'])
# detect errors
if returncode != 0:
raise RuntimeError
|
Python
| 0.000001
|
@@ -5347,16 +5347,47 @@
imeError
+('Could not rasterize vector.')
%0A%0A de
@@ -5999,9 +5999,46 @@
imeError
+('Could not generate proximity map.')
%0A
|
20cf699df5d81eec071254cdaac13ad7ad49909a
|
fix reducer dereference if replace_reducer is used
|
pydux/create_store.py
|
pydux/create_store.py
|
"""
python + redux == pydux
Redux: http://redux.js.org
A somewhat literal translation of Redux.
Closures in Python are over references, as opposed to
names in JavaScript, so they are read-only. Single-
element arrays are used to create read/write closures.
"""
class ActionTypes(object):
INIT = '@@redux/INIT'
class StoreDict(dict):
def get_state(self):
return self['get_state']()
def subscribe(self, listener):
return self['subscribe'](listener)
def dispatch(self, action):
return self['dispatch'](action)
def replace_reducer(self, next_reducer):
return self['replace_reducer'](next_reducer)
def create_store(reducer, initial_state=None, enhancer=None):
"""
redux in a nutshell.
observable has been omitted.
Args:
reducer: root reducer function for the state tree
initial_state: optional initial state data
enhancer: optional enhancer function for middleware etc.
Returns:
a Pydux store
"""
if enhancer is not None:
if not hasattr(enhancer, '__call__'):
raise TypeError('Expected the enhancer to be a function.')
return enhancer(create_store)(reducer)
if not hasattr(reducer, '__call__'):
raise TypeError('Expected the reducer to be a function.')
# single-element arrays for r/w closure
current_reducer = [reducer]
current_state = [initial_state]
current_listeners = [[]]
next_listeners = [current_listeners[0]]
is_dispatching = [False]
def ensure_can_mutate_next_listeners():
if next_listeners[0] == current_listeners[0]:
next_listeners[0] = current_listeners[0][:]
def get_state():
return current_state[0]
def subscribe(listener):
if not hasattr(listener, '__call__'):
raise TypeError('Expected listener to be a function.')
is_subscribed = [True] # r/w closure
ensure_can_mutate_next_listeners()
next_listeners[0].append(listener)
def unsubcribe():
if not is_subscribed[0]:
return
is_subscribed[0] = False
ensure_can_mutate_next_listeners()
index = next_listeners[0].index(listener)
next_listeners[0].pop(index)
return unsubcribe
def dispatch(action):
if not isinstance(action, dict):
raise TypeError('Actions must be a dict. '
'Use custom middleware for async actions.')
if 'type' not in action:
raise ValueError('Actions must have a "type" property. '
'Have you misspelled a constant?')
if is_dispatching[0]:
raise Exception('Reducers may not dispatch actions.')
try:
is_dispatching[0] = True
current_state[0] = reducer(current_state[0], action)
finally:
is_dispatching[0] = False
listeners = current_listeners[0] = next_listeners[0]
for listener in listeners:
listener()
return action
def replace_reducer(next_reducer):
if not hasattr(next_reducer, '__call__'):
raise TypeError('Expected the next_reducer to be a function')
current_reducer[0] = next_reducer
dispatch({'type': ActionTypes.INIT})
dispatch({'type': ActionTypes.INIT})
return StoreDict(
dispatch=dispatch,
subscribe=subscribe,
get_state=get_state,
replace_reducer=replace_reducer,
)
|
Python
| 0
|
@@ -2844,23 +2844,34 @@
te%5B0%5D =
+current_
reducer
+%5B0%5D
(current
|
fb51b056ce909028ee4b0cc3e790ae202d8711af
|
Update P06_factorialLog disable logging
|
books/AutomateTheBoringStuffWithPython/Chapter10/P06_factorialLog.py
|
books/AutomateTheBoringStuffWithPython/Chapter10/P06_factorialLog.py
|
# This program calculates factorial and logs debug messages
import logging
logging.basicConfig(level=logging.DEBUG, format=" %(asctime)s - %(levelname)s - %(message)s")
logging.debug("Start of program")
def factorial(n):
logging.debug("Start of factorial(%s%%)" % n)
total = 1
for i in range(1, n + 1):
total *= i
logging.debug("i is " + str(i) + ", total is " + str(total))
logging.debug("End of factorial(%s%%)" % n)
return total
print(factorial(5))
logging.debug("End of program")
|
Python
| 0
|
@@ -163,16 +163,88 @@
age)s%22)%0A
+logging.disable(logging.CRITICAL) # Stop logging, comment out to debug%0A
logging.
|
acee3e41b45198af4b8a11f5a75bcd62e49864e2
|
fix path
|
tumblr/spiders/index.py
|
tumblr/spiders/index.py
|
# encoding:utf-8
import json
import requests
import scrapy
from lxml import etree
from scrapy.http.request import Request
stream_cursor = "eyJGb2xsb3dlZFNlYXJjaFBvc3QiOltdLCJiZWZvcmVfaWQiOiIxNjI2ODY4NDM3NDMifQ%3D%3D"
with open("../../config.json", "r") as f:
configData = json.loads(f.read(-1))
default_cookie = configData["cookies"]
maxPage = configData["maxPage"]
cookieObj = {}
cookieList = default_cookie.split(";")
for pair in cookieList:
cookieObj[pair.split("=")[0]] = pair.split("=")[1]
video_url_list = set()
start_url_list = []
def fetch_stream(url, file_name):
r = requests.get(url)
with open("../../download" + file_name, "wb") as code:
code.write(r.content)
class Index(scrapy.spiders.Spider):
name = "index"
allowed_domains = ["tumblr.com", "taobao.com", "tmall.com"]
start_urls = [
"https://www.tumblr.com/dashboard"
]
def start_requests(self):
for url in self.start_urls:
yield Request(url, cookies=cookieObj)
def parse(self, response):
if len(response.url.split("svc")) == 1:
body = response.body
html = etree.HTML(body)
video_list = html.xpath("//source")
for video in video_list:
video_name = video.xpath("@src")[0].split("tumblr_")[1].split("/")[0]
video_url = "https://vtt.tumblr.com/tumblr_" + video_name + ".mp4"
video_url_list.add(video_url)
next_index = "2"
next_timestamp = response.body.split("/dashboard/2")[1].split("\"")[0][1:]
url = "https://www.tumblr.com/svc/dashboard/" + next_index + "/" + next_timestamp + \
"?nextAdPos=8&stream_cursor=" + stream_cursor
yield Request(url, callback=self.parse, cookies=cookieObj)
else:
body = json.loads(response.body)['response']['DashboardPosts']['body']
html = etree.HTML(body)
video_list = html.xpath("//source")
for video in video_list:
video_name = video.xpath("@src")[0].split("tumblr_")[1].split("/")[0]
video_url = "https://vtt.tumblr.com/tumblr_" + video_name + ".mp4"
video_url_list.add(video_url)
with open("../../data.json", 'wb') as f:
try:
f.write(json.dumps(list(video_url_list)))
except Exception, e:
print("error in result", e)
try:
next_index = json.loads(response.body)['meta']['tumblr_next_page'].split('/')[3]
if int(next_index) > int(maxPage):
if autoDownload:
yield self.final()
return
next_timestamp = json.loads(response.body)['meta']['tumblr_next_page'].split('/')[4]
url = "https://www.tumblr.com/svc/dashboard/" + next_index + "/" + next_timestamp + \
"?nextAdPos=8&stream_cursor=" + stream_cursor
yield Request(url, callback=self.parse, cookies=cookieObj)
except Exception, e:
print("error in result", e)
|
Python
| 0.000017
|
@@ -27,24 +27,8 @@
on%0A%0A
-import requests%0A
impo
@@ -37,16 +37,16 @@
scrapy%0A
+
from lxm
@@ -208,22 +208,16 @@
h open(%22
-../../
config.j
@@ -510,16 +510,16 @@
= set()%0A
+
start_ur
@@ -536,159 +536,8 @@
%5D%0A%0A%0A
-def fetch_stream(url, file_name):%0A r = requests.get(url)%0A with open(%22../../download%22 + file_name, %22wb%22) as code:%0A code.write(r.content)%0A%0A%0A
clas
@@ -2094,14 +2094,8 @@
en(%22
-../../
data
@@ -2451,88 +2451,8 @@
e):%0A
- if autoDownload:%0A yield self.final()%0A
|
b202b5faa2f378d3c2b771914c043255e8e66a61
|
Update venmo meta
|
modules/sfp_venmo.py
|
modules/sfp_venmo.py
|
#-------------------------------------------------------------------------------
# Name: sfp_venmo
# Purpose: Gather user information from Venmo API.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-07-16
# Copyright: (c) bcoles 2019
# Licence: GPL
#-------------------------------------------------------------------------------
import json
import time
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_venmo(SpiderFootPlugin):
"""Venmo:Footprint,Investigate,Passive:Social Media::Gather user information from Venmo API."""
meta = {
'name': "Venmo",
'summary': "Gather user information from Venmo API.",
'flags': [ "" ],
'useCases': [ "Footprint", "Investigate", "Passive" ],
'categories': [ "Social Media" ]
}
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return [ 'USERNAME' ]
# What events this module produces
def producedEvents(self):
return [ 'RAW_RIR_DATA' ]
# Query Venmo API
def query(self, qry):
res = self.sf.fetchUrl('https://api.venmo.com/v1/users/' + qry,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.sf.debug('No response from api.venmo.com')
return None
try:
data = json.loads(res['content'])
except BaseException as e:
self.sf.debug('Error processing JSON response: ' + str(e))
return None
json_data = data.get('data')
if not json_data:
self.sf.debug(qry + " is not a valid Venmo username")
return None
return json_data
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
self.results[eventData] = True
self.sf.debug("Received event, %s, from %s" % (eventName, srcModuleName))
data = self.query(eventData)
if not data:
return None
e = SpiderFootEvent('RAW_RIR_DATA', str(data), self.__name__, event)
self.notifyListeners(e)
display_name = data.get('display_name')
if display_name:
evt = SpiderFootEvent('RAW_RIR_DATA',
'Possible full name: ' + display_name,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_venmo class
|
Python
| 0.000001
|
@@ -588,18 +588,24 @@
eta = %7B%0A
-%09%09
+
'name':
@@ -613,18 +613,24 @@
Venmo%22,%0A
-%09%09
+
'summary
@@ -675,18 +675,24 @@
API.%22,%0A
-%09%09
+
'flags':
@@ -700,18 +700,24 @@
%5B %22%22 %5D,%0A
-%09%09
+
'useCase
@@ -767,10 +767,16 @@
%5D,%0A
-%09%09
+
'cat
@@ -807,10 +807,569 @@
a%22 %5D
-%0A%09
+,%0A 'dataSource': %7B%0A 'website': %22https://venmo.com/%22,%0A 'model': %22FREE_NOAUTH_UNLIMITED%22,%0A 'references': %5B%5D,%0A 'favIcon': %22https://d1v6x81qdeozhc.cloudfront.net/static/images/logo/apple-touch-icon-1a10ee4b947b728d54265ac8c5084f78.png%22,%0A 'logo': %22https://d1v6x81qdeozhc.cloudfront.net/static/images/logo/apple-touch-icon-1a10ee4b947b728d54265ac8c5084f78.png%22,%0A 'description': %22Venmo is a digital wallet that allows you to send money and make purchases at approved merchants%5Cn%22,%0A %7D%0A
%7D%0A%0A
|
78647441c861eb59d1d25bd6284a2814903a7783
|
Read real tab_size from settings instead of using constant value
|
plugin/formatting.py
|
plugin/formatting.py
|
import sublime_plugin
from .core.protocol import Request, Range
from .core.url import filename_to_uri
from .core.clients import client_for_view
from .core.configurations import is_supported_view
class LspFormatDocumentCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentFormattingProvider'):
return True
return False
def run(self, edit):
client = client_for_view(self.view)
if client:
pos = self.view.sel()[0].begin()
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"options": {
"tabSize": 4, # TODO: Fetch these from the project settings / global settings
"insertSpaces": True
}
}
request = Request.formatting(params)
client.send_request(
request, lambda response: self.handle_response(response, pos))
def handle_response(self, response, pos):
self.view.run_command('lsp_apply_document_edit',
{'changes': response})
class LspFormatDocumentRangeCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentRangeFormattingProvider'):
if len(self.view.sel()) == 1:
region = self.view.sel()[0]
if region.begin() != region.end():
return True
return False
def run(self, _):
client = client_for_view(self.view)
if client:
region = self.view.sel()[0]
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"range": Range.from_region(self.view, region).to_lsp(),
"options": {
"tabSize": 4, # TODO: Fetch these from the project settings / global settings
"insertSpaces": True
}
}
client.send_request(Request.rangeFormatting(params),
lambda response: self.view.run_command('lsp_apply_document_edit',
{'changes': response}))
|
Python
| 0
|
@@ -834,75 +834,48 @@
e%22:
-4, # TODO: Fetch these from the project settings / global settings
+self.view.settings().get(%22tab_size%22, 4),
%0A
@@ -2138,75 +2138,48 @@
e%22:
-4, # TODO: Fetch these from the project settings / global settings
+self.view.settings().get(%22tab_size%22, 4),
%0A
|
3b9417b3d3d0d1a9d7c16d99c7f8441040d359bc
|
Add support for NETBLOCKV6_OWNER (#1411)
|
modules/sfp_whois.py
|
modules/sfp_whois.py
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_whois
# Purpose: SpiderFoot plug-in for searching Whois servers for domain names
# and netblocks identified.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 06/04/2015
# Copyright: (c) Steve Micallef 2012
# Licence: GPL
# -------------------------------------------------------------------------------
import ipwhois
from netaddr import IPAddress
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
import whois
class sfp_whois(SpiderFootPlugin):
meta = {
'name': "Whois",
'summary': "Perform a WHOIS look-up on domain names and owned netblocks.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Public Registries"]
}
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["DOMAIN_NAME", "DOMAIN_NAME_PARENT", "NETBLOCK_OWNER",
"CO_HOSTED_SITE_DOMAIN", "AFFILIATE_DOMAIN_NAME", "SIMILARDOMAIN"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["DOMAIN_WHOIS", "NETBLOCK_WHOIS", "DOMAIN_REGISTRAR",
"CO_HOSTED_SITE_DOMAIN_WHOIS", "AFFILIATE_DOMAIN_WHOIS",
"SIMILARDOMAIN_WHOIS"]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return
self.results[eventData] = True
self.sf.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventName.startswith("DOMAIN_NAME"):
typ = "DOMAIN_WHOIS"
elif eventName.startswith("NETBLOCK"):
typ = "NETBLOCK_WHOIS"
elif eventName.startswith("AFFILIATE_DOMAIN_NAME"):
typ = "AFFILIATE_DOMAIN_WHOIS"
elif eventName.startswith("CO_HOSTED_SITE_DOMAIN"):
typ = "CO_HOSTED_SITE_DOMAIN_WHOIS"
elif eventName == "SIMILARDOMAIN":
typ = "SIMILARDOMAIN_WHOIS"
else:
self.sf.error(f"Invalid event type: {eventName}")
return
data = None
if eventName == "NETBLOCK_OWNER":
qry = eventData.split("/")[0]
ip = IPAddress(qry) + 1
self.sf.debug(f"Sending RDAP query for IP address: {ip}")
try:
# TODO: this should use the configured proxy
r = ipwhois.IPWhois(qry)
data = r.lookup_rdap(depth=1)
except Exception as e:
self.sf.error(f"Unable to perform WHOIS query on {qry}: {e}")
else:
self.sf.debug(f"Sending WHOIS query for domain: {eventData}")
try:
whoisdata = whois.whois(eventData)
data = str(whoisdata.text)
except Exception as e:
self.sf.error(f"Unable to perform WHOIS query on {eventData}: {e}")
if not data:
self.sf.error(f"No WHOIS record for {eventData}")
return
# This is likely to be an error about being throttled rather than real data
if len(data) < 250:
self.sf.error(f"WHOIS data ({len(data)} bytes) is smaller than 250 bytes. Throttling from WHOIS server is probably happening. Ignoring response.")
return
rawevt = SpiderFootEvent(typ, data, self.__name__, event)
self.notifyListeners(rawevt)
if eventName.startswith("DOMAIN_NAME"):
if whoisdata:
registrar = whoisdata.get('registrar')
if registrar:
evt = SpiderFootEvent("DOMAIN_REGISTRAR", registrar, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_whois class
|
Python
| 0
|
@@ -490,21 +490,22 @@
ois%0A
-%0Afrom
+import
netaddr
imp
@@ -504,24 +504,20 @@
addr
-
+%0A
import
-IPAddres
+whoi
s%0A%0Af
@@ -573,30 +573,16 @@
Plugin%0A%0A
-import whois%0A%0A
%0Aclass s
@@ -1326,16 +1326,36 @@
_OWNER%22,
+ %22NETBLOCKV6_OWNER%22,
%0A
@@ -2723,18 +2723,37 @@
entName
-==
+in %5B%22NETBLOCK_OWNER%22,
%22NETBLO
@@ -2754,23 +2754,26 @@
NETBLOCK
+V6
_OWNER%22
+%5D
:%0A
@@ -2782,73 +2782,224 @@
-qry = eventData.split(%22/%22)%5B0%5D%0A ip = IPAddress(qry) + 1
+try:%0A netblock = netaddr.IPNetwork(eventData)%0A except Exception as e:%0A self.sf.error(f%22Invalid netblock %7BeventData%7D: %7Be%7D%22)%0A return%0A%0A ip = netblock%5B0%5D
%0A
@@ -3061,24 +3061,25 @@
ess: %7Bip%7D%22)%0A
+%0A
@@ -3180,19 +3180,18 @@
IPWhois(
-qry
+ip
)%0A
@@ -3207,16 +3207,20 @@
data =
+str(
r.lookup
@@ -3233,16 +3233,17 @@
depth=1)
+)
%0A
@@ -3340,11 +3340,10 @@
on %7B
-qry
+ip
%7D: %7B
@@ -3868,21 +3868,26 @@
if len(
+str(
data)
+)
%3C 250:%0A
|
aad2232c2dadf309d83ad38978d26f80c2bb5782
|
Return more informative message when either path or filename is not found
|
pysteps/io/archive.py
|
pysteps/io/archive.py
|
"""Utilities for finding archived files that match the given criteria."""
from datetime import datetime, timedelta
import fnmatch
import os
def find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext, timestep,
num_prev_files=0, num_next_files=0):
"""List input files whose timestamp matches the given date.
Parameters
----------
date : datetime.datetime
The given date.
root_path : str
The root path to search the input files.
path_fmt : str
Path format. It may consist of directory names separated by '/',
date/time specifiers beginning with '%' (e.g. %Y/%m/%d) and wildcards
(?) that match any single character.
fn_pattern : str
The name pattern of the input files without extension. The pattern can
contain time specifiers (e.g. %H, %M and %S).
fn_ext : str
Extension of the input files.
timestep : float
Time step between consecutive input files (minutes).
num_prev_files : int
Optional, number of previous files to find before the given timestamp.
num_next_files : int
Optional, number of future files to find after the given timestamp.
Returns
-------
out : tuple
If num_prev_files=0 and num_next_files=0, return a pair containing the
found file name and the corresponding timestamp as a datetime.datetime
object. Otherwise, return a tuple of two lists, the first one for the
file names and the second one for the corresponding timestemps. The lists
are sorted in ascending order with respect to timestamp. A None value is
assigned if a file name corresponding to a given timestamp is not found.
"""
filenames = []
timestamps = []
for i in range(num_prev_files+num_next_files+1):
curdate = date + timedelta(minutes=num_next_files*timestep) - timedelta(minutes=i*timestep)
fn = _find_matching_filename(curdate, root_path, path_fmt, fn_pattern, fn_ext)
filenames.append(fn)
timestamps.append(curdate)
if all(filename is None for filename in filenames):
raise IOError("no input data found in %s" % root_path)
if (num_prev_files+num_next_files) > 0:
return (filenames[::-1], timestamps[::-1])
else:
return (filenames, timestamps)
def _find_matching_filename(date, root_path, path_fmt, fn_pattern, fn_ext):
path = _generate_path(date, root_path, path_fmt)
fn = None
if os.path.exists(path):
fn = datetime.strftime(date, fn_pattern) + '.' + fn_ext
# test for wildcars
if '?' in fn:
filenames = os.listdir(path)
if len(filenames) > 0:
for filename in filenames:
if fnmatch.fnmatch(filename, fn):
fn = filename
break
fn = os.path.join(path, fn)
fn = fn if os.path.exists(fn) else None
return fn
def _generate_path(date, root_path, path_fmt):
f = lambda t: datetime.strftime(date, t) if t[0] == '%' else t
if path_fmt != "":
tokens = [f(t) for t in path_fmt.split('/')]
subpath = os.path.join(*tokens)
return os.path.join(root_path, subpath)
else:
return root_path
|
Python
| 0.000005
|
@@ -2911,15 +2911,17 @@
-fn = fn
+ %0A
if
@@ -2942,19 +2942,198 @@
(fn)
- else None%0A
+:%0A fn = fn%0A else:%0A print('filename for date %25s not found in %25s' %25 (date,path))%0A fn = None%0A else:%0A print('path', path, 'not found.')%0A
%0A
|
a42b779bcbf7977d59337064047ca3d366db3ca9
|
Add clear command for board
|
python/games/board.py
|
python/games/board.py
|
import itertools
import threading
import datetime
import graphics
import random
import driver
import flask
from flask import request
import game
import time
import sys
from wsgiref import simple_server
class Message:
def __init__(self, text, priority=5, expiration=None, effects=[]):
self.text = text
self.label = graphics.TextSprite(text, width=5, height=7, y=4)
self.label.x = (112 - self.label.size()) // 2
self.priority = priority
self.expiration = expiration or 2147483647
self.effects = []
for effect_type in effects:
effect = None
if effect_type == "scroll":
self.label.x = 112
effect = graphics.Animator(self.label, attr="x", max=112,
min=-self.label.size(),
loop=True, delay=.04, step=-1)
elif effect_type == "scroll_y":
effect = graphics.Animator(self.label, attr="y", max=15,
min=-self.label.height,
loop=True, delay=.4)
elif effect_type == "bounce_x":
self.label.x = 112
effect = graphics.Animator(
self.label, attr="x",
min=(112 - self.label.size() if self.label.size() > 112 else 0),
max=(0 if self.label.size() > 112 else 112 - self.label.size()),
delay=.04,
reverse=True)
elif effect_type == "bounce_y":
effect = graphics.Animator(self.label, attr="y",
max=15-self.label.height, min=0,
reverse=True, delay=.4)
elif effect_type == "blink":
effect = graphics.Animator(self.label, attr="visible", max=1, min=0,
reverse=True, delay=1.5)
elif effect_type == "blink_fast":
effect = graphics.Animator(self.label, attr="visible", max=1, min=0,
reverse=True, delay=.25)
elif effect_type == "shake":
effect = graphics.Animator(self.label, attr="y", max=6, min=2,
delay=.01, reverse=True)
if effect:
self.effects.append(effect)
class MessageBoard(game.Game):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.messages = {}
modes = [
self.display_messages
]
self.frame_lock = threading.Lock()
self.api = flask.Flask(__name__)
self.api.debug = True
self.api.add_url_rule('/add_message', 'add_message', self.add_message, methods=['POST'])
self.api.add_url_rule('/remove_message/<id>', 'remove_message', self.remove_message, methods=['GET', 'POST'])
self.server = simple_server.make_server('', 8800, self.api)
self.server_thread = threading.Thread(target=self.server.serve_forever, daemon=True)
self.server_thread.start()
self.ids = 0
self.cycle = itertools.chain.from_iterable(
mode() for mode in itertools.chain.from_iterable(
random.shuffle(x) or x for x in itertools.repeat(modes)))
def stop(self):
super().stop()
self.server.shutdown()
self.server.server_close()
def add_message(self):
text = request.form.get("text", "?")
priority = int(request.form.get("priority", 5))
expiration = int(request.form.get("expiration", 0))
effects = filter(bool, request.form.get("effects", "").split(","))
name = request.form.get("name", None)
if name is None:
name = str(self.ids)
self.ids += 1
with self.frame_lock:
self.messages[name] = Message(text, priority, expiration, effects)
return name
def remove_message(self, id):
with self.frame_lock:
del self.messages[id]
return ''
def loop(self):
super().loop()
next(self.cycle)
def display_messages(self):
messages = None
with self.frame_lock:
messages = list(self.messages.values())
for message in messages:
self.sprites.add(message.label)
for effect in message.effects:
self.sprites.add(effect)
run_until = time.time() + message.priority
while time.time() < run_until:
yield
for effect in message.effects:
self.sprites.remove(effect)
self.sprites.remove(message.label)
self.graphics.clear()
with self.frame_lock:
self.messages = {k: m for k, m in self.messages.items() if m.expiration > time.time()}
GAME = MessageBoard
|
Python
| 0.000002
|
@@ -2983,16 +2983,95 @@
'POST'%5D)
+%0A self.api.add_url_rule('/clear', 'clear', self.clear, methods=%5B'POST'%5D)
%0A%0A
@@ -4226,16 +4226,99 @@
urn ''%0A%0A
+ def clear(self):%0A with self.frame_lock:%0A self.messages = %7B%7D%0A%0A
def
|
6facb0f33a8cf53041d9fa1562376e43e6d6194f
|
add init for smiles2graph
|
ogb/utils/__init__.py
|
ogb/utils/__init__.py
|
Python
| 0.000003
|
@@ -0,0 +1,67 @@
+try:%0A from .mol import smiles2graph%0Aexcept ImportError:%0A pass
|
|
fdae17a50223c2f9b8ba4a665fc24726e2c2ce14
|
Add auth header to the fixture loader
|
tests/lib/es_tools.py
|
tests/lib/es_tools.py
|
""" Commands for interacting with Elastic Search """
# pylint: disable=broad-except
from os.path import join
import requests
from lib.tools import TEST_FOLDER
def es_is_available():
""" Test if Elastic Search is running """
try:
return (
requests.get("http://localhost:9200").json()["tagline"]
== "You Know, for Search"
)
except Exception:
return False
def load_json_file(filename):
""" Load JSON file into Elastic Search """
url = "http://localhost:9200/_bulk"
path = join(TEST_FOLDER, "data", filename)
headers = {"Content-Type": "application/x-ndjson"}
with open(path, "r") as handle:
body = handle.read().encode(encoding="utf-8")
return requests.post(url, headers=headers, data=body)
|
Python
| 0
|
@@ -305,26 +305,86 @@
200%22
-).json()%5B%22tagline%22
+, auth=(%22elastic%22, %22changeme%22)).json()%5B%0A %22tagline%22%0A
%5D%0A
@@ -814,16 +814,29 @@
ts.post(
+%0A
url, hea
@@ -858,10 +858,49 @@
ata=body
+, auth=(%22elastic%22, %22changeme%22)%0A
)%0A
|
13f802e959013cf31148399321dd84cc4070bf28
|
Make input image update on change
|
qtgui/panels/panel.py
|
qtgui/panels/panel.py
|
import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QComboBox
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QGroupBox, QSplitter
from qtgui.widgets import QActivationView
from qtgui.widgets import QInputSelector, QInputInfoBox, QImageView
from qtgui.widgets import QNetworkView, QNetworkInfoBox
class Panel(QWidget):
'''Base class for different visualisation panels. In the future, the type of
visualisation should be a component in the panel, not a separate panel
class.
'''
def __init__(self, parent=None):
'''Initialization of the ActivationsView.
Parameters
----------
parent : QWidget
The parent argument is sent to the QWidget constructor.
'''
super().__init__(parent)
def initUI(self):
'''Initialise all UI elements. These are
* The ``QImageView`` showing the current input image
* A ``QInputSelector`` to show input controls
* A ``QNetworkView``, a widget to select a layer in a network
* A ``QInputInfoBox`` to display information about the input
'''
########################################################################
# User input #
########################################################################
self._input_view = QImageView(self)
# FIXME[layout]
# keep image view square (TODO: does this make sense for every input?)
self._input_view.heightForWidth = lambda w: w
self._input_view.hasHeightForWidth = lambda: True
# QNetworkInfoBox: a widget to select the input to the network
# (data array, image directory, webcam, ...)
# the 'next' button: used to load the next image
self._input_selector = QInputSelector()
self._input_info = QInputInfoBox()
# FIXME[layout]
self._input_info.setMinimumWidth(300)
input_layout = QVBoxLayout()
# FIXME[layout]
input_layout.setSpacing(0)
input_layout.setContentsMargins(0, 0, 0, 0)
input_layout.addWidget(self._input_view)
input_layout.addWidget(self._input_info)
input_layout.addWidget(self._input_selector)
input_box = QGroupBox('Input')
input_box.setLayout(input_layout)
self._input_box = input_box
########################################################################
# Network #
########################################################################
# networkview: a widget to select a network
self._network_view = QNetworkView()
self._network_selector = QComboBox()
self._network_layout = QVBoxLayout()
self._network_layout.addWidget(self._network_selector)
self._network_layout.addWidget(self._network_view)
self._network_box = QGroupBox('Network')
self._network_box.setLayout(self._network_layout)
|
Python
| 0.000004
|
@@ -3077,16 +3077,233 @@
network_layout)%0A
+%0A def updateInput(self, data):%0A self._input_view.setImage(data)%0A%0A def modelChanged(self, model):%0A current_input = model.get_input(model._current_index)%0A self.updateInput(current_input.data)%0A
|
19e347716b5efcbaaf857a2805bd5f7ed5d5ec04
|
Patch waagent unit to kill process instead of entire control group
|
VMEncryption/main/oscrypto/encryptstates/PrereqState.py
|
VMEncryption/main/oscrypto/encryptstates/PrereqState.py
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
from OSEncryptionState import *
from pprint import pprint
class PrereqState(OSEncryptionState):
def __init__(self, context):
super(PrereqState, self).__init__('PrereqState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter prereq state")
if not super(PrereqState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for prereq state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering prereq state")
distro_info = self.context.distro_patcher.distro_info
self.context.logger.log("Distro info: {0}, {1}".format(distro_info[0], distro_info[1]))
if distro_info[0] == 'redhat' and distro_info[1] == '7.2':
self.context.logger.log("Enabling OS volume encryption on RHEL 7.2")
else:
raise Exception("OS volume encryption is not supported for distro {0} {1}".format(distro_info[0],
distro_info[1]))
self.context.distro_patcher.install_extras()
def should_exit(self):
self.context.logger.log("Verifying if machine should exit prereq state")
return super(PrereqState, self).should_exit()
|
Python
| 0
|
@@ -1882,16 +1882,118 @@
tras()%0A%0A
+ self._patch_waagent()%0A self.command_executor.Execute('systemctl daemon-reload', True)%0A%0A
def
@@ -2143,12 +2143,481 @@
ould_exit()%0A
+%0A def _patch_waagent(self):%0A self.context.logger.log(%22Patching waagent%22)%0A%0A contents = None%0A%0A with open('/usr/lib/systemd/system/waagent.service', 'r') as f:%0A contents = f.read()%0A%0A contents = re.sub(r'%5C%5BService%5C%5D%5Cn', '%5BService%5D%5CnKillMode=process%5Cn', contents)%0A%0A with open('/usr/lib/systemd/system/waagent.service', 'w') as f:%0A f.write(contents)%0A%0A self.context.logger.log(%22waagent patched successfully%22)%0A
|
455874cae74a34e610650e5b5618b64fe808ea1c
|
fix docstring syntax error
|
ncharts/ncharts/templatetags/filters.py
|
ncharts/ncharts/templatetags/filters.py
|
from django import template
from ncharts import models as nc_models
register = template.Library()
@register.filter
def get_long_name(vs, v):
"""Get 'long_name' value of vs[v] """
try:
return vs[v]['long_name']
except:
return ''
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def get_key_values(var_name, variables):
for var in variables:
if var.choice_label == var_name:
return var
@register.filter
def make_tabs(variables, dset):
return dset.make_tabs(variables)
@register.filter
"""Get the dictionary of years and projects from models.py"""
def make_project_tabs(projects):
return nc_models.Project.make_tabs(projects)
|
Python
| 0.000001
|
@@ -585,16 +585,49 @@
.filter%0A
+def make_project_tabs(projects):%0A
%22%22%22G
@@ -688,41 +688,8 @@
%22%22%22%0A
-def make_project_tabs(projects):%0A
|
c912f7d25906ba00325c36b93ba6fcc1384d7b9a
|
Version 1.3.2
|
jupytext/version.py
|
jupytext/version.py
|
"""Jupytext's version number"""
__version__ = '1.3.1'
|
Python
| 0
|
@@ -45,11 +45,11 @@
= '1.3.
-1
+2
'%0A
|
f7d4be60dd246193fe269dc1caaf8208bd4dba22
|
improve output of compare_dfa.py.
|
src/trusted/validator_ragel/unreviewed/compare_dfa.py
|
src/trusted/validator_ragel/unreviewed/compare_dfa.py
|
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import dfa_parser
visited_pairs = set()
def Traverse(state1, state2, path):
if (state1, state2) in visited_pairs:
return
if state1.is_accepting != state2.is_accepting:
print map(hex, path)
print state1.is_accepting
print state2.is_accepting
sys.exit(1)
visited_pairs.add((state1, state2))
for byte in range(256):
new_path = path + [byte]
t1 = state1.forward_transitions.get(byte)
t2 = state2.forward_transitions.get(byte)
if (t1 is None) != (t2 is None):
print map(hex, new_path)
print t1 is not None
print t2 is not None
sys.exit(1)
if t1 is None:
continue
Traverse(t1.to_state, t2.to_state, new_path)
def main():
filename1, filename2 = sys.argv[1:]
_, start_state1 = dfa_parser.ParseXml(filename1)
_, start_state2 = dfa_parser.ParseXml(filename2)
Traverse(start_state1, start_state2, [])
print 'automata are equivalent'
if __name__ == '__main__':
main()
|
Python
| 0.403608
|
@@ -472,26 +472,17 @@
x, path)
-%0A print
+,
state1.
@@ -503,16 +503,32 @@
print
+ map(hex, path),
state2.
@@ -791,44 +791,239 @@
-print map(hex, new_path)%0A print
+t = t1 or t2%0A s = t.to_state%0A path_to_accepting = new_path%0A while not s.is_accepting:%0A b = min(s.forward_transitions)%0A path_to_accepting.append(b)%0A s = s.forward_transitions%5Bb%5D.to_state%0A%0A if
t1
@@ -1037,34 +1037,202 @@
None
+:
%0A
+
print
-t2 is not Non
+map(hex, path_to_accepting), True%0A print map(hex, path), '...', False%0A else:%0A print map(hex, path), '...', False%0A print map(hex, path_to_accepting), Tru
e%0A
|
a76915d31937f31e5d5fd7ed090198e311cffaa1
|
fix csvrecorder
|
pikos/recorders/csv_recorder.py
|
pikos/recorders/csv_recorder.py
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: recorders/csv_recorder.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import csv
from pikos.recorders.abstract_recorder import AbstractRecorder, RecorderError
class CSVRecorder(AbstractRecorder):
""" The CSV Recorder is a simple text based recorder that records the
tuple of values using a scv writer.
Private
-------
_filter : callable
Used to check if the set `record` should be `recorded`. The function
accepts a tuple of the `record` values and return True is the input
sould be recored.
_writer : csv.writer
The `writer` object is owned by the CSVRecorder and exports the record
values according to the configured dialect.
_ready : bool
Singify that the Recorder is ready to accept data.
"""
def __init__(self, stream, filter_=None, **csv_kwargs):
""" Class initialization.
Parameters
----------
stream : file
A *file*-like object to use for output.
filter_ : callable
A callable function that accepts a data tuple and returns True
if the input sould be recorded.
**csv_kwargs :
Key word arguments to be passed to the *cvs.writer*.
"""
self._filter = (lambda x: True) if filter_ is None else filter_
self._writer = csv.writer(stream, **csv_kwargs)
self._ready = False
def prepare(self, fields):
""" Write the header in the csv file the first time it is called. """
if not self._ready:
self._writer.writerow(fields)
self._ready = True
def finalize(self):
""" Finalize the recorder.
A do nothing method.
Raises
------
RecorderError :
Raised if the method is called without the recorder been ready to
accept data.
"""
if not self._ready:
msg = 'Method called while recorder has not been prepared'
raise RecorderError(msg)
def record(self, data):
""" Record the data entry when the filter function returns True.
Parameters
----------
values : NamedTuple
The record entry.
Raises
------
RecorderError :
Raised if the method is called without the recorder been ready to
accept data.
"""
if self._ready:
if self._filter(data):
self._writer.writerow(data)
else:
msg = 'Method called while recorder is not ready to record'
raise RecorderError(msg)
|
Python
| 0.000013
|
@@ -780,16 +780,17 @@
s
+h
ould be
@@ -794,16 +794,17 @@
be recor
+d
ed.%0A%0A
@@ -985,18 +985,18 @@
Si
-n
g
+n
ify that
@@ -1834,16 +1834,24 @@
w(fields
+._fields
)%0A
|
1a75b6e516020635d885546da79ab73fe881893e
|
Change host and port of HostAlias.TEST2
|
scripts/deployment/fab/config/values/host.py
|
scripts/deployment/fab/config/values/host.py
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
class HostAlias(object):
CI = 'ci'
DATA = 'data'
LIVE = 'live'
MEDIA = 'media'
TEST = 'test'
TEST2 = 'test2'
UAT = 'uat'
class SSHConnection(object):
connection_map = { HostAlias.CI: 'ci.akvo.org:2275',
HostAlias.DATA: 'www.akvo.org:22',
HostAlias.LIVE: 'www.akvo.org:22',
HostAlias.MEDIA: '89.233.254.43:2268',
HostAlias.TEST: 'test.akvo.org:2270',
HostAlias.TEST2: 'test2.akvo.org:2273',
HostAlias.UAT: 'uat.akvo.org:2279' }
@staticmethod
def for_host(host_alias):
if host_alias not in SSHConnection.connection_map:
raise LookupError('No SSH connection details for: %s' % host_alias)
return SSHConnection.connection_map[host_alias]
class HostPathValues(object):
DEFAULT = { 'config_home': '/usr/local/etc/akvo',
'repo_checkout_home': '/var/git',
'virtualenvs_home': '/var/virtualenvs',
'static_media_home': '/var/www',
'logging_home': '/var/log/akvo',
'deployment_processing_home': '/var/tmp/rsr' }
LIVE = { 'config_home': DEFAULT['config_home'],
'repo_checkout_home': '/var/lib/django',
'virtualenvs_home': DEFAULT['virtualenvs_home'],
'static_media_home': DEFAULT['static_media_home'],
'logging_home': DEFAULT['logging_home'],
'deployment_processing_home': DEFAULT['deployment_processing_home'] }
TEST2 = { 'config_home': '/usr/local/etc/akvo/test2',
'repo_checkout_home': '/var/dev/test2',
'virtualenvs_home': '/var/dev/virtualenvs/test2',
'static_media_home': '/var/www/test2',
'logging_home': '/var/log/akvo',
'deployment_processing_home': '/var/tmp/rsr/test2' }
class DataHostPaths(object):
def __init__(self, host_paths=HostPathValues.LIVE):
self.config_home = host_paths['config_home']
self.django_apps_home = host_paths['repo_checkout_home']
self.virtualenvs_home = host_paths['virtualenvs_home']
self.logging_home = host_paths['logging_home']
self.deployment_processing_home = host_paths['deployment_processing_home']
def __eq__(self, host_paths):
return (self.config_home == host_paths.config_home and
self.django_apps_home == host_paths.django_apps_home and
self.virtualenvs_home == host_paths.virtualenvs_home and
self.logging_home == host_paths.logging_home and
self.deployment_processing_home == host_paths.deployment_processing_home)
def __ne__(self, host_paths):
return not self.__eq__(host_paths)
class DeploymentHostPaths(object):
host_paths_map = { HostAlias.CI: HostPathValues.DEFAULT,
HostAlias.LIVE: HostPathValues.LIVE,
HostAlias.TEST: HostPathValues.DEFAULT,
HostAlias.TEST2: HostPathValues.TEST2,
HostAlias.UAT: HostPathValues.DEFAULT }
def __init__(self, host_paths):
self.config_home = host_paths['config_home']
self.repo_checkout_home = host_paths['repo_checkout_home']
self.virtualenvs_home = host_paths['virtualenvs_home']
self.static_media_home = host_paths['static_media_home']
self.logging_home = host_paths['logging_home']
self.deployment_processing_home = host_paths['deployment_processing_home'] # temp directory for handling deployment activity
@staticmethod
def default():
return DeploymentHostPaths(HostPathValues.DEFAULT)
@staticmethod
def for_host(host_alias):
if host_alias not in DeploymentHostPaths.host_paths_map:
raise LookupError('No host path configuration for: %s' % host_alias)
return DeploymentHostPaths(DeploymentHostPaths.host_paths_map[host_alias])
def __eq__(self, host_paths):
return (self.config_home == host_paths.config_home and
self.repo_checkout_home == host_paths.repo_checkout_home and
self.virtualenvs_home == host_paths.virtualenvs_home and
self.static_media_home == host_paths.static_media_home and
self.logging_home == host_paths.logging_home and
self.deployment_processing_home == host_paths.deployment_processing_home)
def __ne__(self, host_paths):
return not self.__eq__(host_paths)
def __repr__(self):
return repr([self.config_home, self.repo_checkout_home, self.virtualenvs_home,
self.static_media_home, self.logging_home, self.deployment_processing_home])
|
Python
| 0
|
@@ -851,17 +851,16 @@
'test
-2
.akvo.or
@@ -864,17 +864,17 @@
.org:227
-3
+0
',%0A
@@ -2112,32 +2112,26 @@
'/
-usr/local/etc/akvo/test2
+var/test2_akvo/etc
',%0A
@@ -2187,17 +2187,23 @@
var/
-dev/
test2
+_akvo/repo
',%0A
@@ -2259,11 +2259,18 @@
var/
-dev
+test2_akvo
/vir
@@ -2277,22 +2277,16 @@
tualenvs
-/test2
',%0A
@@ -2334,16 +2334,27 @@
'/var/
+test2_akvo/
www/test
@@ -2407,32 +2407,38 @@
'/var/
-log/akvo
+test2_akvo/log
',%0A
@@ -2489,22 +2489,16 @@
/tmp/rsr
-/test2
' %7D%0A%0A%0Acl
|
7331fa69c6cd2f09b1711272278a9684af5cf9c1
|
fix attachfilename
|
intelmq/bots/collectors/mail/mail-attach.py
|
intelmq/bots/collectors/mail/mail-attach.py
|
import re
import imbox
import zipfile
from intelmq.lib.bot import Bot, sys
from intelmq.bots.collectors.mail.lib import Mail
class MailAttachCollectorBot(Bot):
def process(self):
mailbox = imbox.Imbox(self.parameters.mail_host, self.parameters.mail_user, self.parameters.mail_password, self.parameters.mail_ssl)
emails = mailbox.messages(folder=self.parameters.folder, unread=True)
if emails:
for uid, message in emails:
if self.parameters.subject_regex and not re.search(self.parameters.subject_regex, message.subject):
continue
self.logger.info("Reading email report")
for attach in message.attachments:
if not attach:
continue
if re.search(self.parameters.attach_regex, attach['filename']):
if self.parameters.attach_unzip:
zipped = zipfile.ZipFile(attach['content'])
report = zipped.read(zipped.namelist()[0])
else:
report = attach['content']
self.send_message(report)
mailbox.mark_seen(uid)
self.logger.info("Email report read")
if __name__ == "__main__":
bot = MailAttachCollectorBot(sys.argv[1])
bot.start()
|
Python
| 0.000003
|
@@ -810,32 +810,170 @@
%0A
+ attach_name = attach%5B'filename'%5D%5B1:len(attach%5B'filename'%5D)-1%5D # remove quote marks from filename%0A %0A
@@ -1025,28 +1025,21 @@
, attach
-%5B'file
+_
name
-'%5D
):%0A%0A
|
b481426e52661b702fa014a86c68b015f46feb1f
|
remove deprecated test suite declarations
|
account_invoice_constraint_chronology/tests/__init__.py
|
account_invoice_constraint_chronology/tests/__init__.py
|
# -*- coding: utf-8 -*-
#
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import test_account_constraint_chronology
fast_suite = [
test_account_constraint_chronology,
]
checks = [
test_account_constraint_chronology,
]
|
Python
| 0
|
@@ -1276,116 +1276,4 @@
ogy%0A
-%0Afast_suite = %5B%0A test_account_constraint_chronology,%0A%5D%0A%0Achecks = %5B%0A test_account_constraint_chronology,%0A%5D%0A
|
7fef3fbd8f4a68a2cf584721930c276eb49b16ee
|
Fix issue in infer_android_package_name with tests in //javatests
|
build_extensions/infer_android_package_name.bzl
|
build_extensions/infer_android_package_name.bzl
|
"""A rule for inferring an android package name."""
def infer_android_package_name():
"""Infer an android package name based on current path below 'javatests'"""
path = native.package_name()
javatests_index = path.rindex("/javatests/") + len("/javatests/")
return path[javatests_index:].replace("/", ".")
|
Python
| 0.000021
|
@@ -228,17 +228,16 @@
rindex(%22
-/
javatest
@@ -248,17 +248,16 @@
+ len(%22
-/
javatest
|
c2f668b6b403bde09485595c2ac2852220739c93
|
Fix docstring.
|
morepath/toposort.py
|
morepath/toposort.py
|
from .error import TopologicalSortError
def topological_sort(l, get_depends):
result = []
marked = set()
temporary_marked = set()
def visit(n):
if n in marked:
return
if n in temporary_marked:
raise TopologicalSortError("Not a DAG")
temporary_marked.add(n)
for m in get_depends(n):
visit(m)
marked.add(n)
result.append(n)
for n in l:
visit(n)
return result
def toposorted(infos):
"""Sort infos topologically.
Info object must have a key attribute, and before and after
methods that returns a list of keys.
"""
key_to_info = {}
depends = {}
for info in infos:
key_to_info[info.key] = info
depends[info.key] = []
for info in infos:
for after in info.after:
after_info = key_to_info[after]
depends[info.key].append(after_info)
for before in info.before:
before_info = key_to_info[before]
depends[before_info.key].append(info)
return topological_sort(
infos, lambda info: depends[info.key])
|
Python
| 0.000001
|
@@ -600,14 +600,17 @@
-method
+attribute
s th
|
24bbdd828253e5a1df92dac31f271dde6739ef8d
|
Refactor and fix some things
|
vega/polestar.py
|
vega/polestar.py
|
import os
import json
import cgi
import codecs
from IPython import display
JS = ['polestar/scripts/vendor-13742e93f0.js', 'polestar/scripts/app-512c772610.js']
CSS = ['polestar/styles/app-a696a065c6.css', 'polestar/scripts/vendor-e4b58aff85.css']
TEAMPLATE = 'index.html'
IFRAME_STYLE = 'border: none; width: 100%; min-height: 580px;'
def publish(dataframe):
"""Create and immediately display even if it is not the last line."""
display.display(create(dataframe))
def create(dataframe):
"""Creates polestar from a dataframe"""
return Polestar(dataframe.columns, dataframe.values)
class Polestar(display.DisplayObject):
"""Defines Polestar widget"""
def __init__(self, columns, data):
"""Constructor
Args:
columns: a list of column names
data: list of rows"""
self.data = data
self.columns = columns
def __get_content(self, path):
path = os.path.join('static', path)
abs_path = os.path.abspath(path)
with codecs.open(abs_path, encoding='utf-8') as f:
return path, f.read()
def __styles(self, paths):
out = []
for p in paths:
path, body = self.__get_content(p)
out.append(u'<style>/*# sourceURL={path} */\n{body}</style>'.format(
path=path, body=body))
return u'\n'.join(out)
def __scripts(self, paths):
out = []
for p in paths:
path, body = self.__get_content(p)
out.append((u'<script type="text/javascript">//@ sourceURL={path}'
'\n{body}</script>').format(path=path, body=body))
return u'\n'.join(out)
def __data(self):
return self.data.tolist()
def __escape(self, body):
return cgi.escape(body, quote=True)
def _repr_html_(self):
"""Used by the frontend to show html for polestar."""
_, template = self.__get_content(TEAMPLATE)
body = template.format(
styles=self.__styles(CSS),
scripts=self.__scripts(JS),
data=json.dumps(self.__data()))
output = u'<iframe srcdoc="{srcdoc}" style="{style}"></iframe>'.format(
srcdoc=self.__escape(body), style=IFRAME_STYLE)
return output
|
Python
| 0.000023
|
@@ -74,24 +74,31 @@
ay%0A%0A%0AJS = %5B'
+static/
polestar/scr
@@ -113,25 +113,38 @@
dor-
-13742e93f0.js', '
+6292494709.js',%0A 'static/
pole
@@ -164,18 +164,18 @@
app-
-512c772610
+ddc64cf3e9
.js'
@@ -184,16 +184,23 @@
CSS = %5B'
+static/
polestar
@@ -205,70 +205,84 @@
ar/s
-tyles/app-a696a065c6.css', 'polestar/scripts/vendor-e4b58aff85
+cripts/vendor-5779b264ab.css',%0A 'static/polestar/styles/app-767140e98a
.css
@@ -297,16 +297,23 @@
LATE = '
+static/
index.ht
@@ -974,52 +974,8 @@
h):%0A
- path = os.path.join('static', path)%0A
@@ -1088,22 +1088,16 @@
return
- path,
f.read(
@@ -1152,32 +1152,35 @@
%5B%5D%0A for p
+ath
in paths:%0A
@@ -1178,55 +1178,8 @@
hs:%0A
- path, body = self.__get_content(p)%0A
@@ -1279,36 +1279,56 @@
path=path, body=
-body
+self.__get_content(path)
))%0A retur
@@ -1408,16 +1408,19 @@
for p
+ath
in path
@@ -1426,55 +1426,8 @@
hs:%0A
- path, body = self.__get_content(p)%0A
@@ -1556,28 +1556,72 @@
mat(
-path=path, body=body
+%0A path=path, body=self.__get_content(path)
))%0A
@@ -1687,31 +1687,138 @@
re
-turn self.data.tolist()
+s = %5B%5D%0A for row in self.data.tolist():%0A res.append(%7Bk: v for k, v in zip(self.columns, row)%7D)%0A return res
%0A%0A
@@ -1990,11 +1990,8 @@
- _,
tem
|
03ce4490d3bb7a0bc29ba807dd7de0694b7b244e
|
Fix (plugin) job description
|
bulletin/tools/plugins/views/plugin.py
|
bulletin/tools/plugins/views/plugin.py
|
import braces.views
import django.core.urlresolvers
import django.shortcuts
import django.views.generic
from bulletin.views import SidebarView
from bulletin.tools.plugins.forms import plugin as forms
from bulletin.tools.plugins.utils import get_active_plugins
class PluginSubmitView(braces.views.LoginRequiredMixin,
braces.views.SetHeadlineMixin,
django.views.generic.FormView):
"""Allow user to select one of the installed plugin types,
then redirect to the submission form for that type of Post.
"""
template_name = 'plugins/choose_post_type.html'
headline = 'Which type of Post?'
form_class = forms.ChoosePostTypeForm
def form_valid(self, form):
selected_type = form.cleaned_data['post_types']
type_url = django.core.urlresolvers.reverse(
'bulletin:plugins:{selected_type}-submit'.format(
selected_type=selected_type))
return django.shortcuts.redirect(type_url)
class PluginUpdateView(braces.views.LoginRequiredMixin,
braces.views.SetHeadlineMixin,
django.views.generic.RedirectView):
"""An UpdateView that dispatches to an update view
specific to the type of Post specified.
"""
query_string = True
def get_redirect_url(self, *args, **kwargs):
post_type = kwargs['post_type']
url = django.core.urlresolvers.reverse(
'bulletin:plugins:{post_type}-update'.format(
post_type=post_type),
kwargs={'pk': kwargs['pk']})
next_page = self.request.GET.get('next')
if next_page:
url += '?next={next}'.format(next=next_page)
return url
class PluginListView(SidebarView,
braces.views.SetHeadlineMixin,
django.views.generic.TemplateView):
"""List the installed plugins.
"""
template_name = 'plugins/plugin_list.html'
headline = 'Submit an Item'
def get_context_data(self, *args, **kwargs):
context = super(PluginListView, self).get_context_data(*args, **kwargs)
context['post_types'] = [
{'name': 'News',
'description': (
"""
News submitted should be directly relevant to higher education
sustainability. The publication date of the news release or
article cannot be older than two months from date of
submission.
"""),
'button_caption': 'Submit a News Story',
'submit_url': django.core.urlresolvers.reverse(
'bulletin:plugins:story-submit')},
{'name': 'Opportunities',
'description': (
"""
Opportunities for the campus sustainability
community. We welcome submissions on a national,
regional and local level. Surveys directly related to
research on campus sustainability - both announcements
of such surveys as well as results available online -
will be considered for publication. Jobs, as well as
internships and fellowships, should not be submitted
as Opportunities; they should be submitted as Jobs.
"""),
'button_caption': 'Submit an Opportunity',
'submit_url': django.core.urlresolvers.reverse(
'bulletin:plugins:opportunity-submit')},
{'name': 'New Resources',
'description': (
"""
New resources must be accessible to the higher education
sustainability community at large. Examples include
campus sustainability reports, white papers, case
studies, new magazines or journals, original AASHE
content, websites, newsletters and videos.
"""),
'button_caption': 'Submit a New Resource',
'submit_url': django.core.urlresolvers.reverse(
'bulletin:plugins:newresource-submit')},
{'name': 'Events',
'description': (
"""
Events should involve significant participation beyond the
city or state/province, and should be focused on higher
education sustainability or have a major track clearly
dedicated to higher education.
"""),
'button_caption': 'Submit an Event',
'submit_url': django.core.urlresolvers.reverse(
'bulletin:plugins:event-submit')},
{'name': 'Jobs',
'description': (
"""
Job postings are free for member organizations.
Jobs should be directly related to furthering campus
sustainability (faculty positions in a
sustainability-related field can be included) and have
sustainability concepts as part of their job descriptions.
Jobs are included on the Bulletin website and in one
issue of the newsletter.
Internships and fellowships should be submitted as Jobs,
not as Opportunities.
We do not include job opportunities available to
students/faculty/staff at only one school, or programs that
are not new programs but are just seeking graduates. That's
what our ads are for. (<a
href="http://www.aashe.org/about/advertising-opportunities#advertising">
http://www.aashe.org/about/advertising-opportunities#advertising</a>)
"""),
'button_caption': 'Submit a Job',
'submit_url': django.core.urlresolvers.reverse(
'bulletin:plugins:job-submit')}
]
return context
class ModerationView(braces.views.SetHeadlineMixin,
django.views.generic.TemplateView):
headline = 'Unmoderated Posts'
template_name = 'plugins/moderation.html'
def get_context_data(self, **kwargs):
context = super(ModerationView, self).get_context_data(**kwargs)
context['unmoderated_posts'] = []
for plugin in get_active_plugins():
for unmoderated_post in plugin.model_class().objects.filter(
approved=None).order_by('-date_submitted'):
context['unmoderated_posts'].append(unmoderated_post)
context['next'] = self.request.get_full_path()
return context
|
Python
| 0.000004
|
@@ -5532,14 +5532,8 @@
es.
-That's
%0A
@@ -5550,50 +5550,10 @@
-what our ads are for. (%3Ca%0A
+%3Ca
hre
@@ -5643,77 +5643,40 @@
-http://www.aashe.org/about/advertising-opportunities#advertising
+That's what our ads are for
%3C/a%3E
-)
+.
%0A
|
619d2d7c1f0a02d4a24283acc859d295aeb73205
|
Improve coverage support documentation.
|
hunittest/coveragelib.py
|
hunittest/coveragelib.py
|
# -*- encoding: utf-8 -*-
"""Integration of the coverage package.
"""
import os
import re
import tempfile
import sys
import textwrap
from glob import glob
from warnings import warn
import argparse
from hunittest.collectlib import get_test_spec_last_pkg
from hunittest.utils import silent_stderr
try:
import coverage
except ImportError:
COVERAGE_ENABLED = False
else:
COVERAGE_ENABLED = True
def get_user_test_files(test_names, top_level_dir):
s = set()
for test_spec in test_names:
pkgname = get_test_spec_last_pkg(test_spec)
if pkgname is None:
# This is a top level module
mod = test_spec.partition(".")[0]
path = os.path.join(top_level_dir, mod + ".py")
if os.path.isfile(path):
s.add(path)
else:
path = re.subn(r"\.", "/", pkgname)[0]
path = os.path.join(path, "*")
s.add(path)
return s
def get_my_test_files():
# My own files
return [os.path.join(os.path.dirname(__file__), "*")]
def get_test_files_to_omit(test_names, top_level_dir):
l = get_my_test_files()
l.append(os.path.join(tempfile.gettempdir(), "*"))
l.append("/tmp/*")
if test_names is not None:
l.extend(get_user_test_files(test_names, top_level_dir))
return l
def write_sitecustomize(path):
with open(path, "w") as stream:
stream.write(textwrap.dedent("""\
import coverage
coverage.process_startup()
"""))
class CoverageInstrument(object):
"""Integration of the 'coverage' package.
This class is in charge of setting up the environment variable and site
customization script as well as clearing old data and generating the
coverage report.
We create the site customization script in the user project directory
(generally it is the top level directory) because this directory available
in the path and it avoid to clutter the site-package directory with a
script that is loaded by many projects that do not need it.
Most of the configuration of the coverage package does not happen here.
The user must create a suitable configuration file at the root of its
project (generally named .coveragerc). The run:parallel option must be set
to true regardless that the test will or will not be run in parallel.
"""
def __init__(self,
top_level_dir=os.getcwd(),
config_file='.coveragerc',
reporters=None,
top_level_test_specs=None):
if not COVERAGE_ENABLED or reporters is None:
self.cov = None
return
self.top_level_dir = top_level_dir
self.SITECUSTOMIZE = os.path.join(top_level_dir, "sitecustomize.py")
if not os.path.isabs(config_file):
self.config_file = os.path.join(top_level_dir, config_file)
self.top_level_test_specs = top_level_test_specs
self.omit = get_test_files_to_omit(self.top_level_test_specs,
self.top_level_dir)
self.cov = coverage.Coverage(config_file=self.config_file,
omit=self.omit)
assert self.cov.get_option("run:parallel") == True, \
"The run:parallel option must be set to True in your coverage "\
"configuration file regardless you are "\
"running your test in parallel or not"
self.test_names = None
self.write_sitecustomize()
self.reporters = reporters
def __del__(self):
if self.cov is None:
return
try:
os.remove(self.SITECUSTOMIZE)
except FileNotFoundError:
pass
def write_sitecustomize(self):
if not os.path.exists(self.SITECUSTOMIZE):
write_sitecustomize(self.SITECUSTOMIZE)
def set_env(self):
os.environ["COVERAGE_PROCESS_START"] = self.config_file
def unset_env(self):
del os.environ["COVERAGE_PROCESS_START"]
def erase(self):
if self.cov is None:
return
self.cov.erase()
def start(self):
if self.cov is None:
return
self.set_env()
self.write_sitecustomize()
self.cov.start()
def stop(self):
if self.cov is None:
return
self.cov.stop()
self.unset_env()
# Coverage is confused because it collect no data when we load
# the test module whereas the code at module level is important. We
# silent its warning.
with silent_stderr():
self.cov.save()
def _drop_combined_data_suffix(self):
"""Get rid of the combined data file suffix.
In parallel mode, coverage always add a suffix to the saved file.
Surprisingly, it also does it for the combined data file which by
definition should not be suffixed. Anyway, the coveralls package
needs a combined data file named .coverage to successfully make its
report. This function do the file renaming.
"""
filename = self.cov.data_files.filename
pattern = filename+".*"
filenames = glob(pattern)
nfilenames = len(filenames)
if nfilenames == 1:
os.rename(filenames[0], filename)
else:
warn("found exactly {} coverage file(s) matching '{}' from '{}'; "\
"combination may have failed".format(nfilenames,
pattern,
os.getcwd()))
def combine(self):
if self.cov is None:
return
self.cov.combine()
self.cov.save()
self._drop_combined_data_suffix()
self.combined = True
def report(self):
if self.cov is None:
return
if "term" in self.reporters:
self.cov.report(show_missing=False)
if "term-missing" in self.reporters:
self.cov.report(show_missing=True)
if "annotate" in self.reporters:
self.cov.annotate()
if "html" in self.reporters:
self.cov.html_report()
if "xml" in self.reporters:
self.cov.xml_report()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
return False # Tell to re-raise the exception if there was one.
def add_coverage_cmdline_arguments(parser):
DEFAULT_REPORTER = "term"
VALID_REPORTERS = set((DEFAULT_REPORTER, "term-missing",
"annotate", "html", "xml"))
if COVERAGE_ENABLED:
help_msg = "Comma separated list of report types to generate "\
"(any combination of: {})"\
.format(", ".join(VALID_REPORTERS))
def coverage_param(param_str):
reporters = set(param_str.split(","))
invalid_reporters = reporters - VALID_REPORTERS
if invalid_reporters:
raise argparse.ArgumentTypeError(
"invalid coverage reporters: {}"
.format(", ".join(invalid_reporters)))
return reporters
else:
help_msg = "install 'coverage' package to enable this option"
def coverage_param(param_str):
raise argparse.ArgumentTypeError("'coverage' package is installed")
parser.add_argument(
"--coverage",
type=coverage_param,
action='store',
metavar="REPORTERS",
nargs='?',
const=DEFAULT_REPORTER,
default=None,
help=help_msg)
|
Python
| 0
|
@@ -2340,16 +2340,260 @@
arallel.
+%0A%0A Although we automatically compute a quiet accurate list of files to omit,%0A we encourage users to set the 'source' option in their configuration files%0A so that the coverage package can warn them when a file is not covered at%0A all.
%0A %22%22%22
|
679549fd5bd51d853658b8d7bcd9c55227b4f9c8
|
fix typo
|
jira_integration_example/utilities/alert.py
|
jira_integration_example/utilities/alert.py
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from google.cloud import monitoring_v3
from google.protobuf.duration_pb2 import Duration
PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT']
TRIGGER_VALUE = 3.0
class TestCustomMetricClient():
"""Client that can create and modify custom metrics
used as monitoring data for testing purposes.
Attributes:
_project_id: The id of the project to store metrics
_client: A monitoring_v3.MetricServiceClient instance to modify metric data
"""
def __init__(self, project_id):
self._project_id = project_id
self._client = monitoring_v3.MetricServiceClient()
def create_custom_metric(self, metric_name):
# [START monitoring_create_metric]
project_name = self._client.project_path(self._project_id)
descriptor = monitoring_v3.types.MetricDescriptor()
descriptor.type = 'custom.googleapis.com/' + metric_name
descriptor.metric_kind = (
monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)
descriptor.value_type = (
monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE)
descriptor.description = 'A custom metric meant for testing purposes'
descriptor = self._client.create_metric_descriptor(project_name, descriptor)
print('Created {}.'.format(descriptor.name))
# [END monitoring_create_metric]
def append_to_time_series(self, metric_name, point_value):
# [START monitoring_write_timeseries]
project_name = self._client.project_path(self._project_id)
series = monitoring_v3.types.TimeSeries()
series.metric.type = 'custom.googleapis.com/' + metric_name
series.resource.type = 'gce_instance'
series.resource.labels['instance_id'] = '1234567890123456789'
series.resource.labels['zone'] = 'us-central1-f'
point = series.points.add()
point.value.double_value = point_value
now = time.time()
point.interval.end_time.seconds = int(now)
point.interval.end_time.nanos = int(
(now - point.interval.end_time.seconds) * 10**9)
self._client.create_time_series(project_name, [series])
# [END monitoring_write_timeseries]
class TestPolicyClient():
"""Client that can create and modify alerting policies and
trigger/resolve incidents for testing purposes.
Attributes:
_project_id: The id of the project to add or modify policies to
_policy_client: A monitoring_v3.AlertPolicyServiceClient instance to call the alertPolicies API
_metric_client: A TestCustomMetricClient() instance to modify metric data
to trigger/resolve incidents
_threshold_value: Value above which a policy triggers an incident
"""
def __init__(self, project_id):
self._project_id = project_id
self._policy_client = monitoring_v3.AlertPolicyServiceClient()
self._metric_client = TestCustomMetricClient(self._project_id)
self._threshold_value = 3.0
def create_policy(self, display_name, metric_name):
"""Creates an alert policy with the given display name.
By default, a policy is made with a single condition that triggers
if a custom metric with the given metric name is above the threshold value.
Args:
display_name: the name to identify the policy by
metric_name: metric to attach the policy to
"""
name = self._policy_client.project_path(self._project_id)
# TODO: fill in
condition_threshold = monitoring_v3.types.AlertPolicy.Condition.MetricThreshold(
filter=f'metric.type = "custom.googleapis.com/${metric_name}" AND resource.type = "gce_instance"',
comparison=monitoring_v3.enums.ComparisonType.COMPARISON_GT,
threshold_value=self._threshold_value,
duration=Duration(seconds=0)
)
condition = monitoring_v3.types.AlertPolicy.Condition(
display_name='test condition'
condition_threshold=condition_threshold
)
alert_policy = monitoring_v3.types.AlertPolicy(
display_name=display_name,
conditions=[condition]
)
self._policy_client.create_alert_policy(name, alert_policy)
def trigger_incident(self, metric_name):
append_to_time_series(PROJECT_ID, metric_name, TRIGGER_VALUE + 1)
def resolve_incident(self, metric_name):
append_to_time_series(PROJECT_ID, metric_name, TRIGGER_VALUE - 1)
def main():
client = TestPolicyClient('alertmanager-cloudmon-test')
client.create_policy('test', 'custom_metric')
if __name__ == '__main__':
main()
|
Python
| 0.999991
|
@@ -4621,16 +4621,17 @@
ndition'
+,
%0A
|
87244598ed08e790835818656ecba0178bb7ca89
|
Upgrade to a better version
|
fsplit/__init__.py
|
fsplit/__init__.py
|
#!/usr/bin/env python2
##
# fsplit
# https://github.com/leosartaj/fsplit.git
#
# Copyright (c) 2014 Sartaj Singh
# Licensed under the MIT license.
##
from info import __version__ # define __version__ variable
from info import __desc__ # define __desc__ variable for description
|
Python
| 0
|
@@ -146,24 +146,25 @@
e.%0A##%0A%0Afrom
+.
info import
@@ -210,16 +210,17 @@
le%0Afrom
+.
info imp
|
1492c853f2efab4b0a3f2097e09fd2d1fc875748
|
Add Python 3.x compatibility.
|
pipeline_browserify/compiler.py
|
pipeline_browserify/compiler.py
|
from pipeline.compilers import SubProcessCompiler
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from pipeline.exceptions import CompilerError
from warnings import warn
class BrowserifyCompiler(SubProcessCompiler):
output_extension = 'browserified.js'
def match_file(self, path):
if self.verbose:
print('matching file:', path)
return path.endswith('.browserify.js')
# similar to old removed in https://github.com/jazzband/django-pipeline/commit/1f6b48ae74026a12f955f2f15f9f08823d744515
def simple_execute_command(self, cmd, **kwargs):
import subprocess
try:
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
except OSError as e:
raise CompilerError("Compiler failed to execute. (%s)" % e, command=cmd, error_output="Executing the compiler resulted in an %s from the system.\n\nThis is most likely due to the `browserify` executable not being found in your PATH (if it is installed globally), or a misconfigured BROWSERIFY_BINARY setting (if you are using a non-global install)." % repr(e))
stdout, stderr = pipe.communicate()
if self.verbose:
print stdout
print stderr
if pipe.returncode != 0:
raise CompilerError("Compiler returned non-zero exit status %i" % pipe.returncode, command=cmd, error_output=stderr)
return stdout
def _get_cmd_parts(self):
pipeline_settings = getattr(settings, 'PIPELINE', {})
tool = pipeline_settings.get('BROWSERIFY_BINARY', "browserify")
old_args = pipeline_settings.get('BROWSERIFY_ARGUMENTS', '')
if old_args:
warn("You are using the string-based BROWSERIFY_ARGUMENTS setting. Please migrate to providing a list of arguments via BROWSERIFY_ARGS instead.", DeprecationWarning)
args = old_args.split()
else:
args = pipeline_settings.get('BROWSERIFY_ARGS', [])
old_env = pipeline_settings.get('BROWSERIFY_VARS', '')
if old_env:
warn("You are using the string-based BROWSERIFY_VARS setting. Please migrate to providing a dict of environment variables via BROWSERIFY_ENV instead.", DeprecationWarning)
env = dict(map(lambda s: s.split('='), old_env.split()))
else:
env = pipeline_settings.get('BROWSERIFY_ENV', None)
if env:
# when there's custom variables, we need to explicitly pass along the original environment
import os
_env = {}
_env.update(os.environ)
_env.update(env)
env = _env
return tool, args, env
def compile_file(self, infile, outfile, outdated=False, force=False):
if not force and not outdated:
return
tool, args, env = self._get_cmd_parts()
args.extend([infile, '--outfile', outfile])
cmd = [tool] + args
if self.verbose:
print "compile_file command:", cmd, env
self.simple_execute_command(cmd, env=env)
def is_outdated(self, infile, outfile):
"""Check if the input file is outdated.
The difficulty with the default implementation is that any file that is
`require`d from the entry-point file will not trigger a recompile if it
is modified. This overloaded version of the method corrects this by generating
a list of all required files that are also a part of the storage manifest
and checking if they've been modified since the last compile.
The command used to generate the list of dependencies is the same as the compile
command but uses the `--list` option instead of `--outfile`.
WARNING: It seems to me that just generating the dependencies may take just
as long as actually compiling, which would mean we would be better off just
forcing a compile every time.
"""
# Preliminary check for simply missing file or modified entry-point file.
if super(BrowserifyCompiler, self).is_outdated(infile, outfile):
return True
# Otherwise we need to see what dependencies there are now, and if they're modified.
tool, args, env = self._get_cmd_parts()
args.extend(['--list', infile])
cmd = [tool] + args
if self.verbose:
print "is_outdated command:", cmd, env
dep_list = self.simple_execute_command(cmd, env=env)
if self.verbose:
print "dep_list is:", dep_list
for dep_file in dep_list.strip().split('\n'):
if super(BrowserifyCompiler, self).is_outdated(dep_file, outfile):
if self.verbose:
print "Found dep_file \"%s\" updated." % dep_file
return True
return False
|
Python
| 0
|
@@ -1277,23 +1277,24 @@
print
-
+(
stdout
+)
%0A
@@ -1303,23 +1303,24 @@
print
-
+(
stderr
+)
%0A
@@ -3095,17 +3095,17 @@
print
-
+(
%22compile
@@ -3121,32 +3121,33 @@
mand:%22, cmd, env
+)
%0A self.si
@@ -4528,17 +4528,17 @@
print
-
+(
%22is_outd
@@ -4557,24 +4557,25 @@
:%22, cmd, env
+)
%0A dep
@@ -4666,17 +4666,17 @@
print
-
+(
%22dep_lis
@@ -4691,16 +4691,17 @@
dep_list
+)
%0A
@@ -4884,17 +4884,17 @@
print
-
+(
%22Found d
@@ -4928,16 +4928,17 @@
dep_file
+)
%0A
|
d879440cef6bc1985ab4e8bf8f81163b661beb1b
|
change locale and admin settings
|
censusreporter/config/base/settings.py
|
censusreporter/config/base/settings.py
|
# Django settings for censusreporter project.
import os
dirname = os.path.dirname
PROJECT_ROOT = os.path.abspath(os.path.join(dirname(__file__),"..",".."))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# should be set by each settings file
# ROOT_URLCONF = 'config.dev.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '%s/census_app_db' % PROJECT_ROOT,
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'census',
)
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
USE_TZ = True
SECRET_KEY = '!%j-u4&(q8qu4@dq=ukth27+q!v-!h^jck14bf=spqht847$4q'
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = PROJECT_ROOT + '/static/'
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'censusreporter.wsgi.application'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
ADMINS = (
('Ian Dees', 'ian.dees@gmail.com'),
('Joe Germuska', 'joegermuska@gmail.com'),
('Ryan Pitts', 'ryan.a.pitts@gmail.com'),
)
MANAGERS = ADMINS
API_URL = 'http://api.censusreporter.org'
|
Python
| 0
|
@@ -719,22 +719,26 @@
= 'A
-me
+f
rica/
-Chicago
+Johannesburg
'%0ALA
@@ -756,17 +756,17 @@
= 'en-u
-s
+k
'%0ASITE_I
@@ -2911,121 +2911,75 @@
('
-Ian Dees', 'ian.dees@gmail.com'),%0A ('Joe Germuska', 'joegermuska@gmail.com'),%0A ('Ryan Pitts', 'ryan.a.
+Greg Kempe', 'greg@kempe.net'),%0A ('Rizmari Versfeld', 'rizzie
pit
-ts
@gma
|
85124382cd3f90b439b27c8dd5a82f47925ddab7
|
fix settings print
|
mppsolar/__init__.py
|
mppsolar/__init__.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
import logging
from argparse import ArgumentParser
# import mppcommands
import mpputils
logger = logging.getLogger()
# if __name__ == '__main__':
def main():
parser = ArgumentParser(description='MPP Solar Command Utility')
parser.add_argument('-c', '--command', help='Command to run', default='QID')
parser.add_argument('-ll', '--loglevel',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level')
parser.add_argument('-d', '--device', type=str, help='Serial device to communicate with', default='/dev/ttyUSB0')
parser.add_argument('-b', '--baud', type=int, help='Baud rate for serial communications', default=2400)
parser.add_argument('-l', '--listknown', action='store_true', help='List known commands')
parser.add_argument('-s', '--getStatus', action='store_true', help='Get Inverter Status')
parser.add_argument('-t', '--getSettings', action='store_true', help='Get Inverter Settings')
parser.add_argument('-H', '--makepretty', action='store_true', help='Display result with descriptions etc if possible')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
logging.debug('command %s', args.command)
logging.debug('Serial device used: %s, baud rate: %d', args.device, args.baud)
# mp = mppcommands.mppCommands(args.device, args.baud)
mp = mpputils.mppUtils(args.device, args.baud)
if(args.listknown):
for line in mp.getKnownCommands():
print line
elif(args.getStatus):
fullStatus = mp.getFullStatus()
for key in fullStatus:
print key, fullStatus[key]['value'], fullStatus[key]['unit']
elif(args.getSettings):
for line in mp.getSettings():
print line, line['value'], line['unit']
else:
# TODO: check if command is valid
# maybe check if query or setter and ...
if(args.makepretty):
for line in mp.getResponsePretty(args.command):
print line
else:
print mp.getResponse(args.command)
|
Python
| 0.000001
|
@@ -1724,16 +1724,35 @@
print
+%22%7B%7D%5Ct%7B%7D %7B%7D%22.format(
key, ful
@@ -1793,24 +1793,25 @@
key%5D%5B'unit'%5D
+)
%0A elif(ar
@@ -1831,35 +1831,34 @@
s):%0A
-for line in
+settings =
mp.getSetti
@@ -1866,61 +1866,237 @@
gs()
-:%0A print line, line%5B'value'%5D, line
+%0A for key in settings:%0A print %22%7B%7D%5Ct%7B%7D%5Ct%7B%7D %7B%7D%22.format(key, settings%5Bkey%5D%5B'default'%5D,%0A settings%5Bkey%5D%5B'value'%5D,%0A settings%5Bkey%5D
%5B'unit'%5D
%0A
@@ -2091,16 +2091,17 @@
%5B'unit'%5D
+)
%0A els
|
bafdbd28e35d80d28bfb82c23532533cb2915066
|
Add docs for MissingInputFiles 'message' arg.
|
fuel/exceptions.py
|
fuel/exceptions.py
|
class AxisLabelsMismatchError(ValueError):
"""Raised when a pair of axis labels tuples do not match."""
class ConfigurationError(Exception):
"""Error raised when a configuration value is requested but not set."""
class MissingInputFiles(Exception):
"""Exception raised by a converter when input files are not found.
Parameters
----------
filenames : list
A list of filenames that were not found.
"""
def __init__(self, message, filenames):
self.filenames = filenames
super(MissingInputFiles, self).__init__(message, filenames)
class NeedURLPrefix(Exception):
"""Raised when a URL is not provided for a file."""
|
Python
| 0
|
@@ -356,16 +356,98 @@
-------%0A
+ message : str%0A The error message to be associated with this exception.%0A
file
|
d2eda42f9f5769d0d42ab38d6bfd912ccff53327
|
remove Q1 from status data (as doesnt work on all inverters and is undocumented)
|
mppsolar/mpputils.py
|
mppsolar/mpputils.py
|
"""
MPP Solar Inverter Command Library
library of utility and helpers for MPP Solar PIP-4048MS inverters
mpputils.py
"""
import logging
from .mppinverter import mppInverter
from .mppinverter import NoDeviceError
log = logging.getLogger('MPP-Solar')
def getVal(_dict, key, ind=None):
if key not in _dict:
return ""
if ind is None:
return _dict[key]
else:
return _dict[key][ind]
class mppUtils:
"""
MPP Solar Inverter Utility Library
"""
def __init__(self, serial_device=None, baud_rate=2400, inverter_model='standard'):
if (serial_device is None):
raise NoDeviceError("A serial device must be supplied, e.g. /dev/ttyUSB0")
self.inverter = mppInverter(serial_device, baud_rate, inverter_model)
def getKnownCommands(self):
return self.inverter.getAllCommands()
def getResponseDict(self, cmd):
return self.inverter.getResponseDict(cmd)
def getResponse(self, cmd):
return self.inverter.getResponse(cmd)
def getSerialNumber(self):
return self.inverter.getSerialNumber()
def getFullStatus(self):
"""
Helper function that returns all the status data
"""
status = {}
# serial_number = self.getSerialNumber()
data = self.getResponseDict("Q1")
data.update(self.getResponseDict("QPIGS"))
# Need to get 'Parallel' info, but dont know what the parallel number for the correct inverter is...
# parallel_data = self.mp.getResponseDict("QPGS0")
# This 'hack' only works for 2 inverters in parallel.
# if parallel_data['serial_number'][0] != self.getSerialNumber():
# parallel_data = self.mp.getResponseDict("QPGS1")
# status_data.update(parallel_data)
for item in data.keys():
key = '{}'.format(item).replace(" ", "_")
status[key] = {"value": data[key][0], "unit": data[key][1]}
# Still have 'Device Status' from QPIGS
# Still have QPGSn
return status
def getSettings(self):
"""
Query inverter for all current settings
"""
# serial_number = self.getSerialNumber()
default_settings = self.getResponseDict("QDI")
current_settings = self.getResponseDict("QPIRI")
flag_settings = self.getResponseDict("QFLAG")
# current_settings.update(flag_settings) # Combine current and flag settings dicts
settings = {}
# {"Battery Bulk Charge Voltage": {"unit": "V", "default": 56.4, "value": 57.4}}
for item in current_settings.keys():
key = '{}'.format(item).replace(" ", "_")
settings[key] = {"value": getVal(current_settings, key, 0),
"unit": getVal(current_settings, key, 1),
"default": getVal(default_settings, key, 0)}
for key in flag_settings:
_key = '{}'.format(key).replace(" ", "_")
if _key in settings:
settings[_key]['value'] = getVal(flag_settings, key, 0)
else:
settings[_key] = {'value': getVal(flag_settings, key, 0), "unit": "", "default": ""}
return settings
|
Python
| 0
|
@@ -1314,17 +1314,20 @@
eDict(%22Q
-1
+PIGS
%22)%0A
@@ -1324,24 +1324,26 @@
GS%22)%0A
+ #
data.update
@@ -1366,20 +1366,17 @@
eDict(%22Q
-PIGS
+1
%22))%0A%0A
|
0da95bdfc184614edca41b41ac3409295352fff6
|
Update days_between.py
|
checkio/python/oreilly/days_between.py
|
checkio/python/oreilly/days_between.py
|
Python
| 0.000003
|
@@ -0,0 +1,440 @@
+import datetime%0D%0A%0D%0Adef days_diff(date1, date2):%0D%0A d1 = datetime.datetime(*date1)%0D%0A d2 = datetime.datetime(*date2)%0D%0A return abs((d2-d1).days)%0D%0A%0D%0Aif __name__ == '__main__':%0D%0A #These %22asserts%22 using only for self-checking and not necessary for auto-testing%0D%0A assert days_diff((1982, 4, 19), (1982, 4, 22)) == 3%0D%0A assert days_diff((2014, 1, 1), (2014, 8, 27)) == 238%0D%0A assert days_diff((2014, 8, 27), (2014, 1, 1)) == 238%0D%0A
|
|
e35649188f10e99381926318192c856e85245ef9
|
update apk version to support toast
|
uiautomator2/version.py
|
uiautomator2/version.py
|
# coding: utf-8
#
__apk_version__ = '1.0.11'
# 1.0.11 add auto install support
# 1.0.10 fix service not started bug
# 1.0.9 fix apk version code and version name
# ERR: 1.0.8 bad version number. show ip on notification
# ERR: 1.0.7 bad version number. new input method, some bug fix
__atx_agent_version__ = '0.2.1'
# 0.2.1 support occupy /minicap connection
# 0.2.0 add session support
# 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell
# 0.1.7 fix dns resolve error in /install
# 0.1.6 change download logic. auto fix orientation
# 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10
# 0.1.4 phone remote control
# 0.1.2 /download support
# 0.1.1 minicap buildin
|
Python
| 0
|
@@ -36,18 +36,45 @@
= '1.0.1
-1'
+2'%0A# 1.0.12 add toast support
%0A# 1.0.1
|
2dd60e236929c7b1f3e922f4ab3b04fac586fdfa
|
use --reuse-port
|
pifpaf/drivers/gnocchi.py
|
pifpaf/drivers/gnocchi.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils import spawn
import os
import shutil
import uuid
import six.moves.urllib.parse as urlparse
from pifpaf import drivers
from pifpaf.drivers import postgresql
from pifpaf.drivers import redis
class GnocchiDriver(drivers.Driver):
DEFAULT_PORT = 8041
DEFAULT_PORT_INDEXER = 9541
DEFAULT_PORT_COORDINATOR = 9542
def __init__(self, port=DEFAULT_PORT, indexer_port=DEFAULT_PORT_INDEXER,
statsd_port=None,
indexer_url=None,
storage_url=None,
coordination_driver="default",
coordination_port=DEFAULT_PORT_COORDINATOR,
**kwargs):
super(GnocchiDriver, self).__init__(**kwargs)
self.port = port
self.indexer_port = indexer_port
self.indexer_url = indexer_url
self.storage_url = storage_url
self.statsd_port = statsd_port
self.coordination_driver = coordination_driver
self.coordination_port = coordination_port
@classmethod
def get_parser(cls, parser):
parser.add_argument("--port",
type=int,
default=cls.DEFAULT_PORT,
help="port to use for Gnocchi HTTP API")
parser.add_argument("--statsd-port",
type=int,
help="port to use for gnocchi-statsd")
parser.add_argument("--indexer-port",
type=int,
default=cls.DEFAULT_PORT_INDEXER,
help="port to use for Gnocchi indexer")
parser.add_argument("--coordination-port",
type=int,
default=cls.DEFAULT_PORT_COORDINATOR,
help="port to use for Gnocchi coordination")
parser.add_argument("--coordination-driver",
default="default",
choices=["default", "redis"],
nargs="?",
help="Select a coordination driver")
parser.add_argument("--indexer-url", help="indexer URL to use")
parser.add_argument("--storage-url", help="storage URL to use")
return parser
def _setUp(self):
super(GnocchiDriver, self)._setUp()
try:
shutil.copy(self.find_config_file("gnocchi/api-paste.ini"),
self.tempdir)
except RuntimeError:
pass
try:
shutil.copy(self.find_config_file("gnocchi/policy.json"),
self.tempdir)
except RuntimeError:
pass
if self.indexer_url is None:
pg = self.useFixture(
postgresql.PostgreSQLDriver(port=self.indexer_port))
self.indexer_url = pg.url
if self.storage_url is None:
self.storage_url = "file://%s" % self.tempdir
conffile = os.path.join(self.tempdir, "gnocchi.conf")
storage_parsed = urlparse.urlparse(self.storage_url)
storage_driver = storage_parsed.scheme
if storage_driver == "s3":
storage_config = {
"s3_access_key_id": (urlparse.unquote(storage_parsed.username
or "gnocchi")),
"s3_secret_access_key": (
urlparse.unquote(storage_parsed.password
or "whatever")),
"s3_endpoint_url": "http://%s:%s/%s" % (
storage_parsed.hostname,
storage_parsed.port,
storage_parsed.path,
)
}
elif storage_driver == "swift":
storage_config = {
"swift_auth_url": "http://%s:%s/%s" % (
storage_parsed.hostname,
storage_parsed.port,
storage_parsed.path,
),
"swift_user": (urlparse.unquote(storage_parsed.username
or "admin:admin")),
"swift_key": (urlparse.unquote(storage_parsed.password
or "admin")),
}
elif storage_driver == "ceph":
storage_config = {
"ceph_conffile": storage_parsed.path,
}
elif storage_driver == "redis":
storage_config = {
"redis_url": self.storage_url,
}
elif storage_driver == "file":
storage_config = {
"file_basepath": (storage_parsed.path
or self.tempdir),
}
else:
raise RuntimeError("Storage driver %s is not supported" %
storage_driver)
if self.coordination_driver == "redis":
r = self.useFixture(redis.RedisDriver(port=self.coordination_port))
storage_config["coordination_url"] = r.url
storage_config_string = "\n".join(
"%s = %s" % (k, v)
for k, v in storage_config.items()
)
statsd_resource_id = str(uuid.uuid4())
with open(conffile, "w") as f:
f.write("""[DEFAULT]
debug = %s
verbose = True
[storage]
driver = %s
%s
[metricd]
metric_processing_delay = 1
metric_cleanup_delay = 1
workers = 1
[statsd]
resource_id = %s
creator = admin
user_id = admin
project_id = admin
[indexer]
url = %s""" % (self.debug,
storage_driver,
storage_config_string,
statsd_resource_id,
self.indexer_url))
self._exec(["gnocchi-upgrade", "--config-file=%s" % conffile])
c, _ = self._exec(["gnocchi-metricd", "--config-file=%s" % conffile],
wait_for_line="metrics wait to be processed")
self.addCleanup(self._kill, c.pid)
c, _ = self._exec(["gnocchi-statsd", "--config-file=%s" % conffile],
wait_for_line=("(Resource .* already exists"
"|Created resource )"))
self.addCleanup(self._kill, c.pid)
c, _ = self._exec([
"uwsgi",
"--http", "localhost:%d" % self.port,
"--wsgi-file", spawn.find_executable("gnocchi-api"),
"--master",
"--die-on-term",
"--lazy-apps",
"--no-orphans",
"--enable-threads",
"--chdir", self.tempdir,
"--add-header", "Connection: close",
"--pyargv", "--config-file=%s" % conffile,
], wait_for_line="WSGI app 0 \(mountpoint=''\) ready")
self.addCleanup(self._kill, c.pid)
self.http_url = "http://localhost:%d" % self.port
self.putenv("GNOCCHI_PORT", str(self.port))
self.putenv("URL", "gnocchi://localhost:%d" % self.port)
self.putenv("GNOCCHI_HTTP_URL", self.http_url)
self.putenv("GNOCCHI_ENDPOINT", self.http_url, True)
self.putenv("OS_AUTH_TYPE", "gnocchi-basic", True)
self.putenv("GNOCCHI_STATSD_RESOURCE_ID", statsd_resource_id, True)
self.putenv("GNOCCHI_USER", "admin", True)
|
Python
| 0.000001
|
@@ -6845,24 +6845,52 @@
cchi-api%22),%0A
+ %22--reuse-port%22,%0A
|
f7034b2988d9ebcb3836b3e12b609f6098269f2c
|
remove unused shebang
|
potty_oh/waveform.py
|
potty_oh/waveform.py
|
#!/usr/bin/env python
# Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Waveform or Signal Generator Library for creating audio waveforms."""
import math
import numpy
class Generator(object):
def __init__(self, length=1.0, framerate=44100, verbose=False):
self.length = length
self.framerate = framerate
self.verbose = verbose
def _init(self, length=None, framerate=None, verbose=None, **kwargs):
if length:
self.length = length
if framerate:
self.framerate = framerate
if verbose:
self.verbose = verbose
# framecount = frames / sec * sec
self.framecount = int(self.framerate * self.length)
# rectify length to actual framecount
self.length = float(self.framecount) / self.framerate
self.dprint('framecount = %s' % self.framecount)
self.dprint('rectified length = %s' % self.length)
self.wavedata = numpy.zeros((self.framecount, 1))
def dprint(self, msg):
"""Conditionally print a debugging message."""
if self.verbose:
print(msg)
def whitenoise(self, *args, **kwargs):
"""Random Gaussian White Noise."""
self._init(*args, **kwargs)
self.wavedata = numpy.random.randn(self.framecount, 1)
return self.wavedata
def _sinusoid_amplitude(self, frame, frequency):
"""Calculate the amplitude of a sinusoid wave at a given frequency."""
# avoid divide by zero
frame = 0.001 if frame is 0 else frame
return math.sin(frame /
((self.framerate / frequency) / math.pi))
def sin_constant(self, frequency, *args, **kwargs):
"""Sinusoid wave of constant frequency."""
self._init(*args, **kwargs)
frequency = float(frequency)
for frame in range(self.framecount):
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
def sin_linear(self, start_freq, end_freq, *args, **kwargs):
"""Sinusoid wave of linearly changing frequency."""
self._init(*args, **kwargs)
for frame in range(self.framecount):
# freq = start_freq + frame * freq_rate
# freq_rate = total_freq_change / framecount
frequency = start_freq + frame * (
float(end_freq - start_freq) / self.framecount)
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
|
Python
| 0.000002
|
@@ -1,26 +1,4 @@
-#!/usr/bin/env python%0A
# Co
|
c3ff6aa43fd058153887996c02fcc60d7262e23b
|
Update comments and whitespace
|
update_aimpoint_data.py
|
update_aimpoint_data.py
|
#!/usr/bin/env python
import os
import re
import shelve
import argparse
import tables
import numpy as np
from Chandra.Time import DateTime
from astropy.table import Table, Column, vstack
from astropy.time import Time
from mica.archive import asp_l1
from Ska.DBI import DBI
from mica.common import MICA_ARCHIVE
import pyyaks.logger
def get_opt():
parser = argparse.ArgumentParser(description='Get aimpoint drift data '
'from aspect solution files')
parser.add_argument("--data-root",
default=".",
help="Root directory for asol and index files")
parser.add_argument("--start",
help="Start time for processing (default=stop - 30 days)")
parser.add_argument("--stop",
help="Processing stop date (default=NOW)")
parser.add_argument("--dt",
default=1.0,
help="Sample delta time (ksec, default=1.0)")
return parser.parse_args()
def get_asol(obsid, asol_files, dt):
logger.info('Reading...\n{}'.format('\n'.join(asol_files)))
asols = [Table.read(asol_file) for asol_file in asol_files]
# Check to see if they all have raw columns
has_raws = ['ady' in asol.colnames for asol in asols]
if np.any(has_raws) and not np.all(has_raws):
raise ValueError("Some asol files have raw cols and some do not")
# Get just the useful columns
if np.any(has_raws):
cols = ('time', 'ady', 'adz', 'adtheta')
else:
cols = ('time', 'dy', 'dz', 'dtheta')
asols = [asol[cols] for asol in asols]
asol = vstack(asols)
# And rename any raw columns to use the old names
if np.any(has_raws):
asol.rename_column('ady', 'dy')
asol.rename_column('adz', 'dz')
asol.rename_column('adtheta', 'dtheta')
t0, t1 = asol['time'][[10, -10]]
n_times = 2 + int((t1 - t0) // (dt * 1000))
times = np.linspace(t0, t1, n_times)
idx = np.searchsorted(asol['time'], times)
asol = asol[idx]
asol = Table([col.astype(col.dtype.str[1:]) for col in asol.columns.values()])
asol.add_column(Column([obsid] * len(asol), name='obsid'), index=0)
return asol
def add_asol_to_h5(filename, asol):
asol = asol.as_array()
with tables.openFile(filename, mode='a',
filters=tables.Filters(complevel=5, complib='zlib')) as h5:
try:
logger.info('Appending {} records to {}'.format(len(asol), filename))
h5.root.data.append(asol)
except tables.NoSuchNodeError:
logger.info('Creating {}'.format(filename))
h5.createTable(h5.root, 'data', asol, "Aimpoint drift", expectedrows=1e6)
h5.root.data.flush()
# Set up logging
loglevel = pyyaks.logger.INFO
logger = pyyaks.logger.get_logger(name='update_aimpoint_data', level=loglevel,
format="%(asctime)s %(message)s")
# Get options
opt = get_opt()
stop = DateTime(opt.stop)
start = stop - 10 if (opt.start is None) else DateTime(opt.start)
logger.info('Processsing from {} to {}'.format(start.date, stop.date))
# Define file names
h5_file = os.path.join(opt.data_root, 'aimpoint_asol_values.h5')
# When we go to PY3, just remove '.shelve' to make this work.
obsid_file = os.path.join(opt.data_root, 'aimpoint_obsid_index.shelve')
# Get obsids in date range
mica_obspar_db = os.path.join(MICA_ARCHIVE, 'obspar', 'archfiles.db3')
with DBI(dbi='sqlite', server=mica_obspar_db) as db:
obs = db.fetchall('select obsid, tstart from archfiles where tstart > {}'
' and tstart < {}'
.format(start.secs, stop.secs))
# Get unique obsids and then sort by tstart
idx = np.unique(obs['obsid'], return_index=True)[1]
obs = Table(obs[idx])
obs.sort('tstart')
obs['datestart'] = Time(obs['tstart'], format='cxcsec').yday
obs.pprint(max_lines=-1)
obsid_index = shelve.open(obsid_file)
# Go through obsids and either process or skip
for obsid in obs['obsid']:
if str(obsid) in obsid_index:
logger.info('Skipping obsid {} - already in archive'.format(obsid))
continue
logger.info('Processing obsid {}'.format(obsid))
asol_files = sorted(asp_l1.get_files(obsid=obsid, content='ASPSOL'))
if not asol_files:
logger.info('Skipping obsid {} - no asol files'.format(obsid))
continue
asol = get_asol(obsid, asol_files, opt.dt)
add_asol_to_h5(h5_file, asol)
obsid_index[str(obsid)] = asol_files
obsid_index.close()
logger.info('File {} updated'.format(h5_file))
logger.info('File {} updated'.format(obsid_file))
# Write out to FITS
fits_file = re.sub(r'\.h5$', '.fits', h5_file)
dat = Table.read(h5_file, path='data')
dat.meta.clear()
dat.write(fits_file, overwrite=True)
logger.info('File {} updated'.format(fits_file))
|
Python
| 0
|
@@ -1190,16 +1190,17 @@
_files%5D%0A
+%0A
# Ch
@@ -1220,13 +1220,19 @@
the
-y
a
-ll
+sol files
hav
@@ -1240,24 +1240,40 @@
raw columns
+ ( %3E= DS 10.8.3)
%0A has_raw
@@ -1447,17 +1447,24 @@
t%22)%0A
+%0A
#
-Get
+Reduce to
jus
@@ -1473,22 +1473,34 @@
the
-useful
columns
+ needed by the tool
%0A
@@ -1665,24 +1665,24 @@
l in asols%5D%0A
-
asol = v
@@ -1694,16 +1694,17 @@
(asols)%0A
+%0A
# An
|
927d762daf41a32be37bd056c889e170d6efbb93
|
Update foreign key references for django 2.0. This change was made in Django core 3 years ago.
|
watson/models.py
|
watson/models.py
|
"""Models used by django-watson."""
from __future__ import unicode_literals
import uuid
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible, force_text
from django.utils.functional import cached_property
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
def has_int_pk(model):
"""Tests whether the given model has an integer primary key."""
pk = model._meta.pk
return (
(
isinstance(pk, (models.IntegerField, models.AutoField)) and
not isinstance(pk, models.BigIntegerField)
) or (
isinstance(pk, models.ForeignKey) and has_int_pk(pk.rel.to)
)
)
def get_str_pk(obj, connection):
return obj.pk.hex if isinstance(obj.pk, uuid.UUID) and connection.vendor != "postgresql" else force_text(obj.pk)
META_CACHE_KEY = "_meta_cache"
@python_2_unicode_compatible
class SearchEntry(models.Model):
"""An entry in the search index."""
engine_slug = models.CharField(
max_length=200,
db_index=True,
default="default",
)
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
)
object_id = models.TextField()
object_id_int = models.IntegerField(
blank=True,
null=True,
db_index=True,
)
object = GenericForeignKey()
title = models.CharField(
max_length=1000,
)
description = models.TextField(
blank=True,
)
content = models.TextField(
blank=True,
)
url = models.CharField(
max_length=1000,
blank=True,
)
meta_encoded = models.TextField()
def _deserialize_meta(self):
from watson.search import SearchEngine
engine = SearchEngine._created_engines[self.engine_slug]
model = ContentType.objects.get_for_id(self.content_type_id).model_class()
adapter = engine.get_adapter(model)
return adapter.deserialize_meta(self.meta_encoded)
@cached_property
def meta(self):
"""Returns the meta information stored with the search entry."""
# Attempt to use the cached value.
if hasattr(self, META_CACHE_KEY):
return getattr(self, META_CACHE_KEY)
# Decode the meta.
meta_value = self._deserialize_meta()
setattr(self, META_CACHE_KEY, meta_value)
return meta_value
def get_absolute_url(self):
"""Returns the URL of the referenced object."""
return self.url
def __str__(self):
"""Returns a string representation."""
return self.title
class Meta:
verbose_name_plural = "search entries"
app_label = 'watson'
|
Python
| 0
|
@@ -815,12 +815,24 @@
k.re
-l.to
+mote_field.model
)%0A
|
0c6c9de85161a6c15f84b19d473d992c90a31ea5
|
add per transaction coverage increase metrics
|
mythril/laser/ethereum/plugins/implementations/coverage.py
|
mythril/laser/ethereum/plugins/implementations/coverage.py
|
from mythril.laser.ethereum.svm import LaserEVM
from mythril.laser.ethereum.plugins.plugin import LaserPlugin
from mythril.laser.ethereum.state.global_state import GlobalState
from typing import Dict, Tuple, List
import logging
log = logging.getLogger(__name__)
class InstructionCoveragePlugin(LaserPlugin):
"""InstructionCoveragePlugin
This plugin measures the instruction coverage of mythril.
The instruction coverage is the ratio between the instructions that have been executed
and the total amount of instructions.
Note that with lazy constraint solving enabled that this metric will be "unsound" as
reachability will not be considered for the calculation of instruction coverage.
"""
def initialize(self, symbolic_vm: LaserEVM):
"""Initializes the instruction coverage plugin
Introduces hooks for each instruction
:param symbolic_vm:
:return:
"""
coverage = {} # type: Dict[str, Tuple[int, List[bool]]]
@symbolic_vm.laser_hook("stop_sym_exec")
def stop_sym_exec_hook():
# Print results
for code, code_cov in coverage.items():
cov_percentage = sum(code_cov[1]) / float(code_cov[0]) * 100
log.info(
"Achieved {:.2f}% coverage for code: {}".format(
cov_percentage, code
)
)
@symbolic_vm.laser_hook("execute_state")
def execute_state_hook(global_state: GlobalState):
# Record coverage
code = global_state.environment.code.bytecode
if code not in coverage.keys():
number_of_instructions = len(
global_state.environment.code.instruction_list
)
coverage[code] = (
number_of_instructions,
[False] * number_of_instructions,
)
coverage[code][1][global_state.mstate.pc] = True
|
Python
| 0
|
@@ -720,16 +720,168 @@
%22%22%22%0A%0A
+ def __init__(self):%0A self.coverage = %7B%7D # type: Dict%5Bstr, Tuple%5Bint, List%5Bbool%5D%5D%5D%0A self.initial_coverage = 0%0A self.tx_id = 0%0A%0A
def
@@ -921,16 +921,16 @@
erEVM):%0A
-
@@ -1080,32 +1080,37 @@
%22%22%22%0A
+self.
coverage = %7B%7D #
@@ -1110,51 +1110,65 @@
= %7B%7D
- # type: Dict%5Bstr, Tuple%5Bint, List%5Bbool%5D%5D%5D
+%0A self.initial_coverage = 0%0A self.tx_id = 0
%0A%0A
@@ -1306,24 +1306,29 @@
code_cov in
+self.
coverage.ite
@@ -1816,16 +1816,21 @@
not in
+self.
coverage
@@ -1977,32 +1977,37 @@
+self.
coverage%5Bcode%5D =
@@ -2138,16 +2138,21 @@
+self.
coverage
@@ -2192,8 +2192,876 @@
= True%0A
+%0A @symbolic_vm.laser_hook(%22start_sym_trans%22)%0A def execute_start_sym_trans_hook():%0A self.initial_coverage = self._get_covered_instructions()%0A%0A @symbolic_vm.laser_hook(%22stop_sym_trans%22)%0A def execute_stop_sym_trans_hook():%0A end_coverage = self._get_covered_instructions()%0A log.info(%0A %22Number of new instructions covered in tx %25d: %25d%22%0A %25 (self.tx_id, end_coverage - self.initial_coverage)%0A )%0A self.tx_id += 1%0A%0A def _get_covered_instructions(self) -%3E int:%0A %22%22%22Gets the total number of covered instructions for all accounts in%0A the svm.%0A :return:%0A %22%22%22%0A total_covered_instructions = 0%0A for _, cv in self.coverage.items():%0A total_covered_instructions += sum(cv%5B1%5D)%0A return total_covered_instructions%0A
|
3494ed60343760d3ee520b37b793afa49f225f35
|
Add function to check if loop is dummy or not
|
pitch/lib/logic/control_flow.py
|
pitch/lib/logic/control_flow.py
|
from __future__ import unicode_literals
import itertools
import yaml
from ..common.utils import to_iterable
from ..templating.structures import PitchTemplate, JinjaExpressionResolver
class ControlFlowStatement(object):
def __init__(self, statement_type):
self.__statement_type = statement_type
@property
def type(self):
return self.__statement_type
class Conditional(ControlFlowStatement):
def __init__(self, step_context_proxy):
self.__step_context_proxy = step_context_proxy
self.__conditional_default = PitchTemplate('true')
self.__expression = None
self.__value = None
super(Conditional, self).__init__('conditional')
def __reinitialize(self):
self.__expression = None
self.__value = None
@property
def value(self):
return self.__value
@property
def expression(self):
return self.__expression
def evaluate(self):
self.__reinitialize()
context = self.__step_context_proxy.get_context()
default = self.__conditional_default
step_conditional = context.step.get('when', default)
if isinstance(step_conditional, bool):
evaluated_value = step_conditional
self.__expression = str(step_conditional)
else:
resolver = JinjaExpressionResolver(step_context=context)
resolved_value = resolver(step_conditional)
evaluated_value = yaml.safe_load(context.renderer(resolved_value))
self.__expression = step_conditional.as_string()
self.__value = evaluated_value
return self.__value
class Loop(ControlFlowStatement):
def __init__(self, step_context_proxy):
self.__step_context_proxy = step_context_proxy
self.__items = None
self.__command_iterable = None
self.__command = None
self.__command_details = None
super(Loop, self).__init__('loop')
def __reinitialize(self):
self.__items = None
@property
def items(self):
return self.__command_iterable
@property
def command(self):
return self.__command
def set_loop_variable(self, item):
active_context = self.__step_context_proxy.get_context()
active_context.template_context['item'] = item
return item
def evaluate(self):
self.__reinitialize()
step_context = self.__step_context_proxy.get_context()
step = step_context.step
loop_command_key, loop_command_details = step.get_any_item_by_key(
'with_items',
'with_indexed_items',
'with_nested'
)
loop_command_details = to_iterable(loop_command_details)
if loop_command_key is None:
with_items_iterable = [(None,)]
else:
loop_command_details = map(
step_context.renderer,
loop_command_details
)
with_items_iterable = filter(
None,
list(
itertools.product(
*filter(
None,
map(
step_context.template_context.nested_get,
loop_command_details
)
)
)
)
)
if loop_command_key == 'with_indexed_items':
with_items_iterable = enumerate(with_items_iterable)
self.__command_iterable = map(
lambda item: item if len(item) > 1 else item[0],
with_items_iterable
)
self.__command = loop_command_key
self.__command_details = loop_command_details
|
Python
| 0.000005
|
@@ -2000,32 +2000,103 @@
__items = None%0A%0A
+ def is_effective(self):%0A return self.__command is not None%0A%0A
@property%0A
|
28c6e6747a6d0bd924f838ed4f846b01c247012b
|
Remove unused tempproject and tempscene
|
previz/testsutils.py
|
previz/testsutils.py
|
import functools
from . import PrevizProject
class Decorators(object):
def __init__(self, api_token, api_root, new_project_prefix = 'cf-'):
self.api_root = api_root
self.api_token = api_token
self.new_project_prefix = new_project_prefix
def project(self, project_id):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
p = PrevizProject(self.api_root, self.api_token, project_id)
project = p.project(include=['scenes'])
func(project=project, *args, **kwargs)
return wrapper
return decorator
def tempproject(self):
'''Returning an existing project while the API v2 is being worked on'''
return self.project('8d9e684f-0763-4756-844b-d0219a4f3f9a')
def scene(self, scene_id):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
project_id = kwargs['project']['id']
p = PrevizProject(self.api_root, self.api_token, project_id)
scene = p.scene(scene_id, include=[])
func(scene=scene, *args, **kwargs)
#p = PrevizProject(self.api_root, self.api_token, project_id)
#func(project=p.project(include=['scenes']), *args, **kwargs)
return wrapper
return decorator
def tempscene(self):
'''Returning an existing scene while the API v2 is being worked on'''
return self.scene('5a56a895-46ef-4f0f-862c-38ce14f6275b')
def tempproject(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
api_root = os.environ[PREVIZ_API_ROOT_ENVVAR]
api_token = os.environ[PREVIZ_API_TOKEN_ENVVAR]
project_name = 'cf-' + func.__qualname__
p = PrevizProject(api_root, api_token)
#p.project_id = p.new_project(project_name)['id']
p.project_id = 'a5ff9cef-4904-4dc3-8a3c-821a219c891e' # p.project_id
func(project_id=p.project_id, *args, **kwargs)
#p.delete_project()
return wrapper
def tempscene(func):
pass
|
Python
| 0
|
@@ -1573,567 +1573,4 @@
b')%0A
-%0A%0Adef tempproject(func):%0A @functools.wraps(func)%0A def wrapper(*args, **kwargs):%0A api_root = os.environ%5BPREVIZ_API_ROOT_ENVVAR%5D%0A api_token = os.environ%5BPREVIZ_API_TOKEN_ENVVAR%5D%0A project_name = 'cf-' + func.__qualname__%0A%0A p = PrevizProject(api_root, api_token)%0A #p.project_id = p.new_project(project_name)%5B'id'%5D%0A p.project_id = 'a5ff9cef-4904-4dc3-8a3c-821a219c891e' # p.project_id%0A%0A func(project_id=p.project_id, *args, **kwargs)%0A%0A #p.delete_project()%0A return wrapper%0A%0Adef tempscene(func):%0A pass%0A
|
2528f4ad76dad2915be1d51483cea7693ea453ce
|
fix test
|
tests/server4_test.py
|
tests/server4_test.py
|
# -*- coding: utf-8 -*-
u"""Test background processes
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
import pytest
def test_elegant(fc):
_r(
fc,
'Compact Storage Ring',
'twissReport',
)
def test_synergia(fc):
_r(
fc,
'Simple FODO',
'bunchReport1',
shared_model='bunchReport2',
)
def test_warppba(fc):
_r(
fc,
'Electron Beam',
'beamPreviewReport',
)
_r(
fc,
'Laser Pulse',
'laserPreviewReport',
)
def _r(fc, sim_name, analysis_model, shared_model):
from pykern.pkdebug import pkdp, pkdlog
from sirepo import srunit
from pykern import pkunit
import re
import time
data = fc.sr_sim_data(sim_name)
cancel = None
try:
run = fc.sr_post(
'runSimulation',
PKDict(
forceRun=False,
models=data.models,
report=analysis_model,
simulationId=data.models.simulation.simulationId,
simulationType=data.simulationType,
),
)
import sirepo.sim_data
s = sirepo.sim_data.get_class(fc.sr_sim_type)
pkunit.pkeq('pending', run.state, 'not pending, run={}', run)
cancel = next_request = run.nextRequest
for _ in range(7):
if run.state in ('completed', 'error'):
cancel = None
break
run = fc.sr_post('runStatus', run.nextRequest)
time.sleep(1)
else:
pkunit.pkfail('did not complete: runStatus={}', run)
pkunit.pkeq('completed', run.state)
if shared_model:
next_request.report = shared_model
run = fc.sr_post('runStatus', next_request)
pkunit.pkeq('completed', run.state)
finally:
try:
if cancel:
fc.sr_post('runCancel', cancel)
except Exception:
pass
|
Python
| 0.000002
|
@@ -395,32 +395,33 @@
Report',%0A )%0A%0A
+%0A
def test_synergi
@@ -538,16 +538,17 @@
%0A )%0A%0A
+%0A
def test
@@ -771,16 +771,21 @@
ed_model
+=None
):%0A f
|
1b16467c9d24c770578d5f94e9715d754885de98
|
correct output at switch on
|
printStatusChange.py
|
printStatusChange.py
|
import re
def readStatus():
status = ''
f = open('/proc/asound/card0/pcm0p/sub0/status', 'r')
for line in f:
matchObj = re.match(r'state.*', line)
if matchObj:
status = matchObj.group()
break
matchObj = re.match(r'closed', line)
if matchObj:
status = matchObj.group()
break
return status
import time
SHUTDOWN_TIME = 10
def do_main_program( console ):
loop = True
shutdownTimer = SHUTDOWN_TIME
power = 0
while loop:
status = readStatus()
if status == "closed":
if power == 1:
if shutdownTimer == 0:
print("Power OFF")
power = 0
else:
shutdownTimer = shutdownTimer-1
print("count down... " + str(shutdownTimer))
else:
if power == 0:
print("Power ON")
power = 1
if shutdownTimer != SHUTDOWN_TIME:
shutdownTimer = SHUTDOWN_TIME
print("(stop count down) Power still ON")
time.sleep(1)
#fp = open('status.log','a')
#fp.write(status+"\n")
#fp.close()
import os
from optparse import OptionParser
import daemon
if __name__ == "__main__":
parser = OptionParser( os.path.relpath(__file__) + " [-c] | [-d]" )
parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=False, help="start as daemon")
parser.add_option("-c", "--console", action="store_true", dest="console", default=False, help="output on console")
(optionen, args) = parser.parse_args()
if optionen.daemon:
with daemon.DaemonContext():
do_main_program(False)
else:
do_main_program(optionen.console)
sys.exit(0)
|
Python
| 0.000011
|
@@ -961,16 +961,63 @@
wer = 1%0A
+ shutdownTimer = SHUTDOWN_TIME%0A%0A
@@ -1128,13 +1128,16 @@
nt(%22
-(s
+S
top
+ping
cou
@@ -1147,24 +1147,27 @@
down
-)
+(
Power
+is
still ON
%22)%0A%0A
@@ -1162,16 +1162,17 @@
still ON
+)
%22)%0A%0A
|
f54802514b6d3ba66269c4e09640d2de7a7dade2
|
Fix regression in filesystem watcher
|
watcher/straight_watch_callback.py
|
watcher/straight_watch_callback.py
|
#!/usr/bin/env python3 -u
import os
import pathlib
import sys
WATCHEXEC_VAR_COMMON = "WATCHEXEC_COMMON_PATH"
WATCHEXEC_VARS = [
"WATCHEXEC_CREATED_PATH",
"WATCHEXEC_REMOVED_PATH",
"WATCHEXEC_RENAMED_PATH",
"WATCHEXEC_WRITTEN_PATH",
"WATCHEXEC_META_CHANGED_PATH",
]
def die(message):
print(message, file=sys.stderr)
sys.exit(1)
def usage():
return "usage: python -m straight_watch_callback <repos-dir> <modified-dir>"
def path_contains(parent, child):
parent = pathlib.Path(parent).resolve()
child = pathlib.Path(child).resolve()
return parent in child.parents
def path_strip(parent, child):
parent = pathlib.Path(parent).parts
child = pathlib.Path(child).parts
return child[len(parent)]
def main(args):
if len(args) != 2:
die(usage())
repos_dir, modified_dir = args
repos_dir = pathlib.Path(repos_dir).resolve()
modified_dir = pathlib.Path(modified_dir).resolve()
paths = []
for var in WATCHEXEC_VARS:
if var in os.environ:
for path in os.environ[var].split(os.pathsep):
paths.append(path)
if not paths:
die("straight_watch_callback.py: watchexec gave no modified files")
if WATCHEXEC_VAR_COMMON in os.environ:
common = os.environ[WATCHEXEC_VAR_COMMON]
# Yes, string concatentation. For some reason when a common
# prefix is used, the individual paths start with a slash even
# though they're actually relative to the prefix.
paths = [common + path for path in paths]
paths = [pathlib.Path(path).resolve() for path in paths]
paths = sorted(set(paths))
repos = set()
for path in paths:
print("detect modification: {}".format(path), file=sys.stderr)
if repos_dir in path.parents:
repo = path.relative_to(repos_dir).parts[0]
repos.add(repo)
if repos:
modified_dir.mkdir(parents=True, exist_ok=True)
repos = sorted(repos)
for repo in repos:
print("--> mark for rebuild: {}".format(repo), file=sys.stderr)
with open(modified_dir / repo, "w"):
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
Python
| 0.000019
|
@@ -8,16 +8,19 @@
bin/env
+-S
python3
|
66e9e8a35831e603509d96ac8f1cab7bc8b9a3fc
|
enforce lot locking when using quick-create in web client
|
stock_lock_lot/models/stock_production_lot.py
|
stock_lock_lot/models/stock_production_lot.py
|
# -*- coding: utf-8 -*-
# © 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# © 2015 AvanzOsc (http://www.avanzosc.es)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, exceptions, _
class StockProductionLot(models.Model):
_name = 'stock.production.lot'
_inherit = ['stock.production.lot', 'mail.thread']
_mail_post_access = 'read'
_track = {
'locked': {
'stock_lock_lot.mt_lock_lot': lambda self, cr, uid, obj,
ctx=None: obj.locked,
'stock_lock_lot.mt_unlock_lot': lambda self, cr, uid, obj,
ctx=None: not obj.locked,
},
}
def _get_product_locked(self, product):
"""Should create locked? (including categories and parents)
@param product: browse-record for product.product
@return True when the category of the product or one of the parents
demand new lots to be locked"""
_locked = product.categ_id.lot_default_locked
categ = product.categ_id.parent_id
while categ and not _locked:
_locked = categ.lot_default_locked
categ = categ.parent_id
return _locked
@api.one
def _get_locked_value(self):
return self._get_product_locked(self.product_id)
locked = fields.Boolean(string='Blocked', default='_get_locked_value',
readonly=True)
@api.one
@api.onchange('product_id')
def onchange_product_id(self):
'''Instruct the client to lock/unlock a lot on product change'''
self.locked = self._get_product_locked(self.product_id)
@api.multi
def button_lock(self):
'''Lock the lot if the reservations allow it'''
stock_quant_obj = self.env['stock.quant']
for lot in self:
cond = [('lot_id', '=', lot.id),
('reservation_id', '!=', False)]
for quant in stock_quant_obj.search(cond):
if quant.reservation_id.state not in ('cancel', 'done'):
raise exceptions.Warning(
_('Error! Serial Number/Lot "%s" currently has '
'reservations.')
% (lot.name))
return self.write({'locked': True})
@api.multi
def button_unlock(self):
return self.write({'locked': False})
# Kept in old API to maintain compatibility
def create(self, cr, uid, vals, context=None):
'''Force the locking/unlocking, ignoring the value of 'locked'.'''
product = self.pool['product.product'].browse(
cr, uid, vals.get('product_id'))
vals['locked'] = self._get_product_locked(product)
return super(StockProductionLot, self).create(
cr, uid, vals, context=context)
@api.multi
def write(self, values):
'''Lock the lot if changing the product and locking is required'''
if 'product_id' in values:
product = self.env['product.product'].browse(
values.get('product_id'))
values['locked'] = self._get_product_locked(product)
return super(StockProductionLot, self).write(values)
|
Python
| 0
|
@@ -2590,157 +2590,345 @@
-product = self.pool%5B'product.product'%5D.browse(%0A cr, uid, vals.get('product_id'))%0A vals%5B'locked'%5D = self._get_product_locked(product
+# %C2%A0Web quick-create doesn't provide product_id in vals, but in context%0A product_id = vals.get('product_id', context.get('product_id', False))%0A if product_id:%0A vals%5B'locked'%5D = self._get_product_locked(%0A self.pool%5B'product.product'%5D.browse(%0A cr, uid, product_id, context=context)
)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.