commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
29c36b24a81607eec5275e84f4c6f3d33a318511
Add ow2_asm license
tools_webrtc/libs/generate_licenses.py
tools_webrtc/libs/generate_licenses.py
#!/usr/bin/env python # Copyright 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. """Generates license markdown for a prebuilt version of WebRTC.""" import sys import argparse import cgi import json import logging import os import re import subprocess LIB_TO_LICENSES_DICT = { 'abseil-cpp': ['third_party/abseil-cpp/LICENSE'], 'android_tools': ['third_party/android_tools/LICENSE'], 'auto': ['third_party/auto/src/LICENSE.txt'], 'bazel': ['third_party/bazel/LICENSE'], 'boringssl': ['third_party/boringssl/src/LICENSE'], 'errorprone': ['third_party/errorprone/LICENSE'], 'expat': ['third_party/expat/files/COPYING'], 'fiat': ['third_party/boringssl/src/third_party/fiat/LICENSE'], 'guava': ['third_party/guava/LICENSE'], 'ijar': ['third_party/ijar/LICENSE'], 'jsoncpp': ['third_party/jsoncpp/LICENSE'], 'jsr-305': ['third_party/jsr-305/src/ri/LICENSE'], 'libc++': ['buildtools/third_party/libc++/trunk/LICENSE.TXT'], 'libc++abi': ['buildtools/third_party/libc++abi/trunk/LICENSE.TXT'], 'libevent': ['base/third_party/libevent/LICENSE'], 'libjpeg_turbo': ['third_party/libjpeg_turbo/LICENSE.md'], 'libsrtp': ['third_party/libsrtp/LICENSE'], 'libvpx': ['third_party/libvpx/source/libvpx/LICENSE'], 'libyuv': ['third_party/libyuv/LICENSE'], 'opus': ['third_party/opus/src/COPYING'], 'protobuf': ['third_party/protobuf/LICENSE'], 'rnnoise': ['third_party/rnnoise/COPYING'], 'usrsctp': ['third_party/usrsctp/LICENSE'], 'webrtc': ['LICENSE', 'LICENSE_THIRD_PARTY'], 'zlib': ['third_party/zlib/LICENSE'], # Compile time dependencies, no license needed: 'yasm': [], } SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0])) CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir)) sys.path.append(os.path.join(CHECKOUT_ROOT, 'build')) import find_depot_tools THIRD_PARTY_LIB_REGEX = r'^.*/third_party/([\w\-+]+).*$' class LicenseBuilder(object): def __init__(self, buildfile_dirs, targets): self.buildfile_dirs = buildfile_dirs self.targets = targets @staticmethod def _ParseLibrary(dep): """ Returns a regex match containing library name after third_party Input one of: //a/b/third_party/libname:c //a/b/third_party/libname:c(//d/e/f:g) //a/b/third_party/libname/c:d(//e/f/g:h) Outputs match with libname in group 1 or None if this is not a third_party dependency. """ return re.match(THIRD_PARTY_LIB_REGEX, dep) @staticmethod def _RunGN(buildfile_dir, target): cmd = [ sys.executable, os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), 'desc', '--all', '--format=json', os.path.abspath(buildfile_dir), target, ] logging.debug("Running: %r", cmd) output_json = subprocess.check_output(cmd, cwd=CHECKOUT_ROOT) logging.debug("Output: %s", output_json) return output_json @staticmethod def _GetThirdPartyLibraries(buildfile_dir, target): output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target)) libraries = set() for target in output.values(): third_party_matches = ( LicenseBuilder._ParseLibrary(dep) for dep in target['deps']) libraries |= set(match.group(1) for match in third_party_matches if match) return libraries def GenerateLicenseText(self, output_dir): # Get a list of third_party libs from gn. For fat libraries we must consider # all architectures, hence the multiple buildfile directories. third_party_libs = set() for buildfile in self.buildfile_dirs: for target in self.targets: third_party_libs |= LicenseBuilder._GetThirdPartyLibraries( buildfile, target) assert len(third_party_libs) > 0 missing_licenses = third_party_libs - set(LIB_TO_LICENSES_DICT.keys()) if missing_licenses: error_msg = 'Missing licenses: %s' % ', '.join(missing_licenses) logging.error(error_msg) raise Exception(error_msg) # Put webrtc at the front of the list. license_libs = sorted(third_party_libs) license_libs.insert(0, 'webrtc') logging.info("List of licenses: %s", ', '.join(license_libs)) # Generate markdown. output_license_file = open(os.path.join(output_dir, 'LICENSE.md'), 'w+') for license_lib in license_libs: if len(LIB_TO_LICENSES_DICT[license_lib]) == 0: logging.info("Skipping compile time dependency: %s", license_lib) continue # Compile time dependency output_license_file.write('# %s\n' % license_lib) output_license_file.write('```\n') for path in LIB_TO_LICENSES_DICT[license_lib]: license_path = os.path.join(CHECKOUT_ROOT, path) with open(license_path, 'r') as license_file: license_text = cgi.escape(license_file.read(), quote=True) output_license_file.write(license_text) output_license_file.write('\n') output_license_file.write('```\n\n') output_license_file.close() def main(): parser = argparse.ArgumentParser(description='Generate WebRTC LICENSE.md') parser.add_argument('--verbose', action='store_true', default=False, help='Debug logging.') parser.add_argument('--target', required=True, action='append', default=[], help='Name of the GN target to generate a license for') parser.add_argument('output_dir', help='Directory to output LICENSE.md to.') parser.add_argument('buildfile_dirs', nargs="+", help='Directories containing gn generated ninja files') args = parser.parse_args() logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) builder = LicenseBuilder(args.buildfile_dirs, args.target) builder.GenerateLicenseText(args.output_dir) if __name__ == '__main__': sys.exit(main())
Python
0
@@ -2001,16 +2001,35 @@ m': %5B%5D,%0A + 'ow2_asm': %5B%5D,%0A %7D%0A%0ASCRIP
d1568f2420322a19041dbdde734964ea39e66943
content-length should be a string.
VMBackup/main/snapshotter.py
VMBackup/main/snapshotter.py
#!/usr/bin/env python # # VM Backup extension # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.7+ # import urlparse import httplib import traceback from common import CommonVariables from HttpUtil import HttpUtil class SnapshotError(object): def __init__(self): self.errorcode = 0 self.sasuri = None def __str__(self): return 'errorcode:' + str(self.errorcode) + 'sasuri:' + str(self.sasuri) class SnapshotResult(object): def __init__(self): self.errors = [] def __str__(self): error_str = "" for error in self.errors: error_str+=(str(error)) + "\n" return error_str class Snapshotter(object): """description of class""" def __init__(self, logger): self.logger = logger def snapshot(self, sasuri, meta_data): result = None snapshot_error = SnapshotError() if(sasuri is None): self.logger.log("Failed to do the snapshot because sasuri is none",False,'Error') snapshot_error.errorcode = -1 snapshot_error.sasuri = sasuri try: sasuri_obj = urlparse.urlparse(sasuri) if(sasuri_obj is None or sasuri_obj.hostname is None): self.logger.log("Failed to parse the sasuri",False,'Error') snapshot_error.errorcode = -1 snapshot_error.sasuri = sasuri else: body_content = '' headers = {} headers["Content-Length"] = 0 for meta in meta_data: key = meta['Key'] value = meta['Value'] headers["x-ms-meta-" + key] = value self.logger.log(str(headers)) http_util = HttpUtil(self.logger) sasuri_obj = urlparse.urlparse(sasuri + '&comp=snapshot') result = http_util.Call('PUT',sasuri_obj, body_content, headers = headers) if(result != 0): snapshot_error.errorcode = result snapshot_error.sasuri = sasuri except Exception as e: errorMsg = "Failed to do the snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc()) self.logger.log(errorMsg, False, 'Error') snapshot_error.errorcode = -1 snapshot_error.sasuri = sasuri return snapshot_error def snapshotall(self, paras): self.logger.log("doing snapshotall now...") snapshot_result = SnapshotResult() blobs = paras.blobs for blob in blobs: snapshotError = self.snapshot(blob, paras.backup_metadata) if(snapshotError.errorcode != CommonVariables.success): snapshot_result.errors.append(snapshotError) return snapshot_result
Python
0.999987
@@ -1,8 +1,9 @@ +%EF%BB%BF #!/usr/b @@ -2131,17 +2131,19 @@ gth%22%5D = -0 +'0' %0D%0A
42110ae58a799c7cbb4152f091e2d53a26049839
Add regression test for #2
sasstests.py
sasstests.py
from __future__ import with_statement from attest import assert_hook import collections import os.path import re import shutil import tempfile from attest import Tests, raises from werkzeug.test import Client from werkzeug.wrappers import Response import sass from sassutils.builder import Manifest, build_directory from sassutils.wsgi import SassMiddleware suite = Tests() @suite.test def version(): assert re.match(r'^\d+\.\d+\.\d+$', sass.__version__) @suite.test def output_styles(): if hasattr(collections, 'Mapping'): assert isinstance(sass.OUTPUT_STYLES, collections.Mapping) assert 'nested' in sass.OUTPUT_STYLES @suite.test def compile_required_arguments(): with raises(TypeError): sass.compile() @suite.test def compile_takes_only_keywords(): with raises(TypeError): sass.compile('a { color: blue; }') @suite.test def compile_exclusive_arguments(): with raises(TypeError): sass.compile(string='a { color: blue; }', filename='test/a.sass') with raises(TypeError): sass.compile(string='a { color: blue; }', dirname='test/') with raises(TypeError): sass.compile(filename='test/a.sass', dirname='test/') @suite.test def compile_invalid_output_style(): with raises(TypeError): sass.compile(string='a { color: blue; }', output_style=['compact']) with raises(TypeError): sass.compile(string='a { color: blue; }', output_style=123j) with raises(ValueError): sass.compile(string='a { color: blue; }', output_style='invalid') @suite.test def compile_invalid_image_path(): with raises(TypeError): sass.compile(string='a { color: blue; }', image_path=[]) with raises(TypeError): sass.compile(string='a { color: blue; }', image_path=123) @suite.test def compile_string(): actual = sass.compile(string='a { b { color: blue; } }') assert actual == 'a b {\n color: blue; }\n' with raises(sass.CompileError): sass.compile(string='a { b { color: blue; }') # sass.CompileError should be a subtype of ValueError with raises(ValueError): sass.compile(string='a { b { color: blue; }') with raises(TypeError): sass.compile(string=1234) with raises(TypeError): sass.compile(string=[]) A_EXPECTED_CSS = '''\ body { background-color: green; } body a { color: blue; } ''' B_EXPECTED_CSS = '''\ b i { font-size: 20px; } ''' C_EXPECTED_CSS = '''\ body { background-color: green; } body a { color: blue; } h1 a { color: green; } ''' @suite.test def compile_filename(): actual = sass.compile(filename='test/a.sass') assert actual == A_EXPECTED_CSS actual = sass.compile(filename='test/c.sass') assert actual == C_EXPECTED_CSS with raises(IOError): sass.compile(filename='test/not-exist.sass') with raises(TypeError): sass.compile(filename=1234) with raises(TypeError): sass.compile(filename=[]) @suite.test def builder_build_directory(): temp_path= tempfile.mkdtemp() sass_path = os.path.join(temp_path, 'sass') css_path = os.path.join(temp_path, 'css') shutil.copytree('test', sass_path) result_files = build_directory(sass_path, css_path) assert len(result_files) == 3 assert result_files['a.sass'] == 'a.sass.css' with open(os.path.join(css_path, 'a.sass.css')) as f: css = f.read() assert css == A_EXPECTED_CSS assert result_files['b.sass'] == 'b.sass.css' with open(os.path.join(css_path, 'b.sass.css')) as f: css = f.read() assert css == B_EXPECTED_CSS assert result_files['c.sass'] == 'c.sass.css' with open(os.path.join(css_path, 'c.sass.css')) as f: css = f.read() assert css == C_EXPECTED_CSS shutil.rmtree(temp_path) @suite.test def normalize_manifests(): manifests = Manifest.normalize_manifests({ 'package': 'sass/path', 'package.name': ('sass/path', 'css/path'), 'package.name2': Manifest('sass/path', 'css/path') }) assert len(manifests) == 3 assert isinstance(manifests['package'], Manifest) assert manifests['package'].sass_path == 'sass/path' assert manifests['package'].css_path == 'sass/path' assert isinstance(manifests['package.name'], Manifest) assert manifests['package.name'].sass_path == 'sass/path' assert manifests['package.name'].css_path == 'css/path' assert isinstance(manifests['package.name2'], Manifest) assert manifests['package.name2'].sass_path == 'sass/path' assert manifests['package.name2'].css_path == 'css/path' def sample_wsgi_app(environ, start_response): start_response('200 OK', [('Content-Type', 'text/plain')]) return environ['PATH_INFO'], @suite.test def wsgi_sass_middleware(): css_dir = tempfile.mkdtemp() app = SassMiddleware(sample_wsgi_app, { __name__: ('test', css_dir, '/static') }) client = Client(app, Response) r = client.get('/asdf') assert r.status_code == 200 assert r.data == '/asdf' assert r.mimetype == 'text/plain' r = client.get('/static/a.sass.css') assert r.status_code == 200 assert r.data == A_EXPECTED_CSS assert r.mimetype == 'text/css' r = client.get('/static/not-exists.sass.css') assert r.status_code == 200 assert r.data == '/static/not-exists.sass.css' assert r.mimetype == 'text/plain' shutil.rmtree(css_dir)
Python
0.000013
@@ -5475,8 +5475,365 @@ ss_dir)%0A +%0A%0Aregression = Tests()%0A%0A%0A@regression.test%0Adef regression_issue_2():%0A actual = sass.compile(string='''%0A @media (min-width: 980px) %7B%0A a %7B%0A color: red;%0A %7D%0A %7D%0A ''')%0A normalized = re.sub(r'%5Cs+', '', actual)%0A assert normalized == '@media(min-width:980px)%7Ba%7Bcolor:red;%7D%7D'%0A%0A%0Asuite.register(regression)%0A
cf439125f2350597f66796c89a088cd443ed77cd
Fix search results disappearing after they are viewed
igd_exporter/exporter.py
igd_exporter/exporter.py
import cgi import socket import urllib import wsgiref.util import prometheus_client from . import igd def wsgi_app(environ, start_response): ''' Base WSGI application that routes requests to other applications. ''' name = wsgiref.util.shift_path_info(environ) if name == '': return front(environ, start_response) if name == 'probe': return probe(environ, start_response) elif name == 'metrics': return prometheus_app(environ, start_response) return not_found(environ, start_response) def front(environ, start_response): ''' Front page, containing links to the expoter's own metrics, as well as links to probe discovered devices. ''' global targets start_response('200 OK', [('Content-Type', 'text/html')]) if environ['REQUEST_METHOD'] == 'POST': form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, strict_parsing=1, encoding='latin1') if form.getfirst('search') == '1': targets = igd.search(5) return [ b'<html>' b'<head><title>WSG exporter</title></head>' b'<body>' b'<h1>IGD Exporter</h1>', b'<form method="post"><p><input type="hidden" name="search" value="1"><button type="submit">Search</button> for devices on local network (5 second timeout)</input></form>', *[b'<p><a href="/probe?target=%s">Probe %s</a>' % (urllib.parse.quote_plus(target).encode('latin1'), target.encode('latin1')) for target in targets], b'<p><a href="/metrics">Metrics</a>' b'</body>' b'</html>' ] # Discovered devices are kept in this list. targets = [] def probe(environ, start_response): ''' Performs a probe using the given root device URL. ''' qs = urllib.parse.parse_qs(environ['QUERY_STRING']) body = igd.probe(qs['target'][0]) start_response('200 OK', [('Content-Type', 'text/plain; charset=utf-8; version=0.0.4')]) return body prometheus_app = prometheus_client.make_wsgi_app() def not_found(environ, start_response): ''' How did we get here? ''' start_response('404 Not Found', [('Content-Type', 'text/plain')]) return [b'Not Found\r\n']
Python
0
@@ -1006,16 +1006,21 @@ rgets = +list( igd.sear @@ -1024,16 +1024,17 @@ earch(5) +) %0A%0A re
50d2bb0908ea3b45d1ab6da494953a88362a67d4
add value summary
save_func.py
save_func.py
import tensorflow as tf import time def add_train_var(): """ add all trainable variable to summary""" for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) def add_loss(loss_scope = 'losses'): """ add all losses to summary """ for l in tf.get_collection(loss_scope): tf.scalar_summary(l.op.name, l) def restore_model(sess, saver, model_dir, model_name = None): """ restore model: if model_name is None, restore the last one """ if model_name is None: ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir) if ckpt and ckpt.all_model_checkpoint_paths[-1]: print("restore " + ckpt.all_model_checkpoint_paths[-1]) saver.restore(sess, ckpt.all_model_checkpoint_paths[-1]) else: print('no check point') else: print("restore " + model_name) saver.restore(sess, model_dir + '/' + model_name) def save_model(sess, saver, model_dir, iteration): """ save the current model""" curr_time = time.strftime("%Y%m%d_%H%M") model_name = model_dir + '/' + curr_time + \ '_iter_' + str(iteration) + '_model.ckpt' saver.save(sess, model_name)
Python
0.000001
@@ -1072,20 +1072,192 @@ e(sess, model_name)%0A +%0Adef add_value_sum(summary_writer, value, name):%0A%09%22%22%22 add python value to tensorboard %22%22%22%0A%09return tf.Summary(value = %5Btf.Summary.Value(tag = name, simple_value = value)%5D)%09%0A
191cd180e74b6854cddd64348b96dc20cbc8cbba
add dataset to base validator
jobrunner/validators.py
jobrunner/validators.py
import bmds import json import jsonschema base_schema = { 'type': 'object', 'properties': { 'bmds_version': { 'enum': list(bmds.VERSIONS.keys()) }, 'dataset_type': { 'enum': list(bmds.constants.DTYPES), }, }, 'required': ['bmds_version', 'dataset_type'] } continuous_dataset_schema = { 'type': 'array', 'items': { 'type': 'object', 'required': [ 'doses', 'ns', 'responses', 'stdevs', ], 'properties': { 'doses': { 'type': 'array', 'minItems': 3, 'items': { 'type': 'number', 'minimum': 0 }, }, 'ns': { 'type': 'array', 'minItems': 3, 'items': { 'type': 'integer', 'minimum': 0, 'exclusiveMinimum': True, }, }, 'responses': { 'type': 'array', 'minItems': 3, 'items': { 'type': 'number', }, }, 'stdevs': { 'type': 'array', 'minItems': 3, 'items': { 'type': 'number', }, }, } }, 'minItems': 1, } dichotomous_dataset_schema = { 'type': 'array', 'items': { 'type': 'object', 'required': [ 'doses', 'ns', 'incidences' ], 'properties': { 'doses': { 'type': 'array', 'minItems': 3, 'items': { 'type': 'number', 'minimum': 0 }, }, 'ns': { 'type': 'array', 'minItems': 3, 'items': { 'type': 'integer', 'minimum': 0, 'exclusiveMinimum': True, }, }, 'incidences': { 'type': 'array', 'minItems': 3, 'items': { 'type': 'integer', 'minimum': 0, }, }, } }, 'minItems': 1, } def validate_input(data): """Return None if successful, else raise ValueError""" # ensure data is valid JSON try: jsoned = json.loads(data) except json.decoder.JSONDecodeError: raise ValueError('Invalid format - must be valid JSON.') # first-level check try: jsonschema.validate(jsoned, base_schema) except jsonschema.ValidationError as err: raise ValueError(err.message) # check dataset schema try: datasets = jsoned.get('datasets', []) if jsoned['dataset_type'] == bmds.constants.CONTINUOUS: schema = continuous_dataset_schema else: schema = dichotomous_dataset_schema jsonschema.validate(datasets, schema) except jsonschema.ValidationError as err: raise ValueError('Dataset error(s): ' + err.message)
Python
0.000001
@@ -264,16 +264,102 @@ %7D,%0A + 'datasets': %7B%0A 'minItems': 1,%0A 'type': 'array'%0A %7D %0A %7D,%0A @@ -405,16 +405,28 @@ et_type' +, 'datasets' %5D%0A%7D%0A%0Acon
4f682e83af711b66581eb275b5b6ffa057960b28
Simplify the calculation of scores
prediction/explorer.py
prediction/explorer.py
from . import support from . import tuner from .learner import Learner from .random import Random from .session import Session import json import numpy as np import os import threading class Agent: def __init__(self, session, semaphore, config): self.session = session self.semaphore = semaphore self.scores = Agent._restore(config.output.path) self.output_path = config.output.path self.lock = threading.Lock() self.done = threading.Lock() def collect(self, step_count): with self.done: return self.scores[step_count] def submit(self, step_count): with self.lock: if step_count in self.scores: return self.scores[step_count] = None self.done.acquire() worker = threading.Thread(target=self._run, args=(step_count,), daemon=True) worker.start() def _restore(path): scores = {} for path in support.scan(path, 'meta-*.json'): meta = json.loads(open(path).read()) scores[meta['step_count']] = meta['score'] support.log(Agent, 'Score: {}', path) return scores def _run(self, step_count): with self.semaphore: with self.lock: last_step_count = 0 for key in self.scores: if self.scores[key] is None: continue if key > last_step_count: last_step_count = key assert(last_step_count < step_count) support.log(self, 'Learning start: {}, stop: {}', last_step_count, step_count) self.session.run_training(step_count - last_step_count, summarize=False) error = self.session.run_validation()['MSE'] decay = np.reshape(np.exp(-np.arange(len(error))), error.shape) score = np.sum(error * decay) Agent._save(self.output_path, step_count, score) self.session.run_saving() with self.lock: self.scores[step_count] = score support.log(self, 'Learning stop: {}, score: {}', step_count, score) self.done.release() def _save(path, step_count, score): path = os.path.join(path, 'meta-{}.json'.format(step_count)) with open(path, 'w') as file: file.write(json.dumps({ 'step_count': step_count, 'score': score, })) class Explorer: def __init__(self, input, config): self.input = input self.config = config self.tuner = getattr(tuner, config.tuner.name) self.tuner = self.tuner(**config.tuner.options) self.resource_scale = config.max_step_count / self.tuner.resource self.sampler = Sampler(config.sampler) self.semaphore = threading.BoundedSemaphore(config.concurrent_count) self.agents = {} def configure(self, case, restore=True): key = support.tokenize(case) config = self.config.copy() config.output.restore = restore config.output.path = os.path.join(config.output.path, key) for key in case: _adjust(config, key, case[key]) return config def run(self): case, resource, score = self.tuner.run(self._generate, self._assess) step_count = int(self.resource_scale * resource) support.log(self, 'Best case: {}, step: {}, score: {}', case, step_count, score) return (case, step_count) def _assess(self, resource, cases): step_count = int(self.resource_scale * resource) support.log(self, 'Assess cases: {}, stop: {}', len(cases), step_count) agents = [] for case in cases: key = support.tokenize(case) agent = self.agents.get(key) if agent is None: config = self.configure(case) learner = Learner(config.learner.candidate) session = Session(self.input, learner, config) agent = Agent(session, self.semaphore, config) self.agents[key] = agent agent.submit(step_count) agents.append(agent) return [agent.collect(step_count) for agent in agents] def _generate(self, count): support.log(self, 'Generate cases: {}', count) return [self.sampler.get() for _ in range(count)] class Sampler: def __init__(self, config): self.parameters = config support.log(self, 'Cases: {}', self.case_count) @property def case_count(self): return np.prod([len(self.parameters[n]) for n in self.parameters]) def get(self): case = {} for key in sorted(self.parameters.keys()): chosen = Random.get().randint(len(self.parameters[key])) case[key] = self.parameters[key][chosen] return case def _adjust(config, key, value): if key == 'dropout_rate': config.learner.candidate.dropout.options.update({ 'input_keep_prob': 1 - value[0], 'output_keep_prob': 1 - value[1], }) elif key == 'layer_count': config.learner.candidate.layer_count = value elif key == 'learning_rate': config.teacher.trainer.optimizer.options.learning_rate = value elif key == 'unit_count': config.learner.candidate.unit_count = value elif key == 'use_peepholes': config.learner.candidate.cell.options.use_peepholes = value else: assert(False)
Python
0.999995
@@ -1847,16 +1847,23 @@ -err +sc or +e = +np.sum( self @@ -1898,125 +1898,8 @@ SE'%5D -%0A decay = np.reshape(np.exp(-np.arange(len(error))), error.shape)%0A score = np.sum(error * decay )%0A
1279b1c087e98717a6c0802c6a7aceca4a27a0b2
fix copyright header on test file
app/tests/base/tests_exception_handling.py
app/tests/base/tests_exception_handling.py
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for exception handling in AJAX handlers. """ # stdlib imports import json # third-party imports import webapp2 # local imports from app import config from app.base import handlers from tests.testcases import BaseTestCase def _make_test_request(app, url, post_data=None, headers=None, method='GET'): """Make a test request against an app """ request = webapp2.Request.blank(url, POST=post_data, headers=headers) request.method = method return request.get_response(app) def strip_and_parse_as_json(json_string): """Strips the XSSI prefix from a JSON response and parses it into a python dict """ json_string = json_string.replace(handlers._XSSI_PREFIX, '') return json.loads(json_string) class AjaxHandler(handlers.BaseAjaxHandler): """Convenience class to verify exceptional requests.""" def get(self): webapp2.abort(406) def post(self): raise AttributeError class AuthedAjaxHandler(handlers.AuthenticatedAjaxHandler): """Convenience class to verify successful requests.""" def get(self): webapp2.abort(406) def DenyAccess(self): webapp2.abort(407) def XsrfFail(self): webapp2.abort(408) class HandlersTest(BaseTestCase): """Test cases for exception handling""" def setUp(self): self.app = webapp2.WSGIApplication([('/', AjaxHandler), ('/authed', AuthedAjaxHandler)], config=config.CONFIG) def testWebapp2ExceptionHandled(self): """Test that a webapp2.abort() is handled correctly within a JSON handler """ response = self.app.get_response('/') self.assertEqual(406, response.status_int) self.assertEqual('406 Not Acceptable', strip_and_parse_as_json(response.body)['error']) def testGenericExceptionHandled(self): """Test that a generic exception is handled correctly within a JSON handler """ response = _make_test_request(self.app, '/', method='POST') self.assertEqual(500, response.status_int) self.assertEqual('500 Server Error', strip_and_parse_as_json(response.body)['error']) def testWebapp2ExceptionHandledWhenAuthFails(self): """Test that a webapp2.abort() is handled correctly within an authed JSON handler when auth fails """ response = self.app.get_response('/authed') self.assertEqual(407, response.status_int) self.assertEqual('407 Proxy Authentication Required', strip_and_parse_as_json(response.body)['error']) def testWebapp2ExceptionHandledWhenXsrfFails(self): """Test that a webapp2.abort() is handled correctly within an authed JSON handler when XSRF fails """ self._FakeLogin() response = _make_test_request(self.app, '/authed', method='POST') self.assertEqual(408, response.status_int) self.assertEqual('408 Request Time-out', strip_and_parse_as_json(response.body)['error'])
Python
0
@@ -14,18 +14,19 @@ 014 -Google Inc +rehabstudio . Al
f2145ab158cc1eb0d9568c4a89b9f221085287bc
add volume labels to context disks; ssh disks to correct machine (#32)
core/src/main/python/stratuslab/tm/TMContext.py
core/src/main/python/stratuslab/tm/TMContext.py
#!/usr/bin/env python # # Copyright (c) 2012, Centre National de la Recherche Scientifique (CNRS) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import base64 import os import re import shutil import stat from os.path import dirname from tempfile import mkstemp, mkdtemp from stratuslab.Util import execute from stratuslab.cloudinit.Util import decodeMultipartAsJson class TMContext(object): ''' Create the disk with context information. This is a CDROM for standard OpenNebula/HEPiX contextualization. It is a VFAT-formatted volume for cloud-init contextualization. ''' # Debug option PRINT_TRACE_ON_ERROR = True DEFAULT_VERBOSELEVEL = 0 # Context disk permissions = 0660 DISK_PERMS = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP) def __init__(self, args, **kwargs): self.args = args def run(self): try: self._run() finally: self._cleanup() def _run(self): TMContext._checkArgs(self.args) contextFile = self.args[1] contextDiskFile = self.args[2] cdromFiles = self.args[1:] cdromFiles.remove(contextDiskFile) # hack to remove the node from the destination contextDiskFile = contextDiskFile.split(':')[1] kvpairs = TMContext._parseContextFile(contextFile) method = kvpairs.get('context_method', 'opennebula') if (method == 'cloud-init'): TMContext._doCloudInit(contextDiskFile, kvpairs) else: TMContext._doOpenNebula(contextDiskFile, cdromFiles) def _cleanup(self): pass @staticmethod def _checkArgs(args): if (not args or len(args) < 3): raise ValueError('must have at least two arguments: destination disk and context file') ''' This does a "dirty" parsing of the context file looking only for the lines with the keys CONTEXT_METHOD, CLOUD_INIT_USER_DATA and CLOUD_INIT_AUTHORIZED_KEYS. All of the other key-value pairs do not need to be understood by this class. A map with these values (if found) are returned. ''' @staticmethod def _parseContextFile(context_file): result = {} with open(context_file, 'r') as f: for line in f: match = re.match('\s*CONTEXT_METHOD\s*=\s*"(.+)".*', line) if match: result['context_method'] = match.group(1).strip() match = re.match('\s*CLOUD_INIT_USER_DATA\s*=\s*"(.+)".*', line) if match: result['user_data'] = match.group(1).strip() match = re.match('\s*CLOUD_INIT_AUTHORIZED_KEYS\s*=\s*"(.+)".*', line) if match: result['authorized_keys'] = match.group(1).strip() return result @staticmethod def _doOpenNebula(contextDiskFile, cdromFiles): tmpdir = None image = None try: tmpdir = mkdtemp() for f in cdromFiles: shutil.copy(f, tmpdir) _, image = mkstemp() cmd = ["mkisofs", "-o", image, "-J", "-R", tmpdir] rc = execute(cmd) if (rc != 0): raise Exception("error creating cdrom") os.chmod(image, TMContext.DISK_PERMS) print image print contextDiskFile print tmpdir os.makedirs(os.path.dirname(contextDiskFile)) shutil.copy(image, contextDiskFile) finally: if tmpdir: shutil.rmtree(tmpdir, True) if image: os.remove(image) @staticmethod def _doCloudInit(contextDiskFile, params): tmpdir = None image = None try: tmpdir = mkdtemp() image = os.path.join(tmpdir, "disk.vfat") mnt_point = os.path.join(tmpdir, "context") os.mkdir(mnt_point) print tmpdir print image print mnt_point cmd = ["mkfs.vfat", "-v", "-C", image, "1024"] print cmd rc = execute(cmd) print rc if (rc != 0): raise Exception('cannot create VFAT file system for cloud-init') cmd = ["mount", "-o", "loop", image, mnt_point] rc = execute(cmd) if (rc != 0): raise Exception('cannot mount VFAT file system for cloud-init') try: b64_content = params['authorized_keys'] ssh_dir = os.path.join(mnt_point, 'root', '.ssh') os.makedirs(ssh_dir) keys_file = os.path.join(ssh_dir, 'authorized_keys') with open(keys_file, 'wb') as f: content = base64.b64decode(b64_content) f.write(content) except KeyError: pass try: encoded_content = params['user_data'] meta_content = decodeMultipartAsJson('local', encoded_content) meta_file = os.path.join(mnt_point, 'meta.js') with open(meta_file, 'wb') as f: f.write(meta_content) except KeyError: pass cmd = ["umount", mnt_point] rc = execute(cmd) if (rc != 0): raise Exception('cannot umount VFAT file system for cloud-init') os.chmod(image, TMContext.DISK_PERMS) shutil.copy(image, contextDiskFile) finally: if tmpdir: shutil.rmtree(tmpdir, True)
Python
0
@@ -800,17 +800,22 @@ execute +, scp %0A - from str @@ -1709,120 +1709,8 @@ e)%0A%0A - # hack to remove the node from the destination%0A contextDiskFile = contextDiskFile.split(':')%5B1%5D%0A%0A @@ -3557,16 +3557,37 @@ kisofs%22, + %22-V%22, %22_STRATUSLAB%22, %22-o%22, i @@ -3792,160 +3792,11 @@ -print image%0A print contextDiskFile%0A print tmpdir%0A os.makedirs(os.path.dirname(contextDiskFile))%0A shutil.copy +scp (ima @@ -4363,16 +4363,37 @@ s.vfat%22, + %22-n%22, %22_CLOUD_INIT%22, %22-v%22, %22 @@ -5836,26 +5836,18 @@ s -hutil.copy +cp (image,
0c628c0bf88f98899e371fcf7d11a1af6fcf7bd7
use proper setter
corehq/apps/app_manager/tests/test_case_meta.py
corehq/apps/app_manager/tests/test_case_meta.py
from django.test.testcases import SimpleTestCase from mock import patch from nose.tools import nottest from corehq.apps.app_manager.models import Application, Module, OpenCaseAction, ParentSelect, OpenSubCaseAction, \ AdvancedModule, LoadUpdateAction, AdvancedOpenCaseAction, CaseIndex from corehq.apps.app_manager.tests.util import TestXmlMixin class CaseMetaTest(SimpleTestCase, TestXmlMixin): file_path = ('data', 'case_meta') def setUp(self): self.is_usercase_in_use_patch = patch('corehq.apps.app_manager.models.is_usercase_in_use') self.is_usercase_in_use_mock = self.is_usercase_in_use_patch.start() def tearDown(self): self.is_usercase_in_use_patch.stop() def _make_module(self, app, module_id, case_type): m = app.add_module(Module.new_module('Module{}'.format(module_id), lang='en')) m.case_type = case_type mf = app.new_form(module_id, 'form {}'.format(case_type), lang='en', attachment=self.get_xml('standard_questions')) mf.actions.open_case = OpenCaseAction(name_path="/data/question1", external_id=None) mf.actions.open_case.condition.type = 'always' return m def _assert_properties(self, meta, property_set): self.assertEqual(1, len(meta.case_types)) self.assertEqual(set(p.name for p in meta.case_types[0].properties), property_set) def test_hierarchy(self): app, expected_hierarchy = self.get_test_app() meta = app.get_case_metadata() self.assertDictEqual(meta.type_hierarchy, expected_hierarchy) @nottest def get_test_app(self): app = Application.new_app('domain', 'New App') app.version = 1 m0 = self._make_module(app, 0, 'parent') m0.get_form(0).actions.subcases.append(OpenSubCaseAction( case_type='child', reference_id='parent' )) m1 = self._make_module(app, 1, 'child') m1.get_form(0).actions.subcases.append(OpenSubCaseAction( case_type='grand child', reference_id='parent' )) m2 = self._make_module(app, 2, 'grand child') m3 = app.add_module(AdvancedModule.new_module('Module3', lang='en')) m3.case_type = 'other grand child' m3f0 = m3.new_form('other form', 'en') m3f0.actions.load_update_cases.append(LoadUpdateAction( case_type='child', case_tag='child')) m3f0.actions.open_cases.append(AdvancedOpenCaseAction( name_path='/data/question1', case_type='other grand child', case_indices=[CaseIndex(tag='child')] )) m3f0.actions.open_cases[0].open_condition.type = 'always' m2.parent_select = ParentSelect(active=True, module_id=m1.unique_id) m1.parent_select = ParentSelect(active=True, module_id=m0.unique_id) expected_hierarchy = { 'parent': { 'child': { 'grand child': {}, 'other grand child': {} } } } return app, expected_hierarchy def test_case_properties(self): app = Application.new_app('domain', 'New App') app.version = 2 m0 = self._make_module(app, 0, 'normal_module') m0f1 = m0.new_form('update case', 'en', attachment=self.get_xml('standard_questions')) self._assert_properties(app.get_case_metadata(), {'name'}) m0f1.actions.update_case.condition.type = 'always' m0f1.actions.update_case.update = { "p1": "/data/question1", "p2": "/data/question2" } self._assert_properties(app.get_case_metadata(), {'name', 'p1', 'p2'}) def test_case_references(self): app = Application.new_app('domain', 'New App') app.version = 2 m0 = self._make_module(app, 0, 'household') m0f1 = m0.new_form('save to case', 'en', attachment=self.get_xml('standard_questions')) m0f1.case_references = { 'save': { "/data/question1": { "case_type": "household", "properties": [ "save_to_case_p1", "save_to_case_p2" ], } } } self._assert_properties(app.get_case_metadata(), {'name', 'save_to_case_p1', 'save_to_case_p2'}) def test_case_references_advanced(self): app = Application.new_app('domain', 'New App') app.version = 2 m0 = app.add_module(AdvancedModule.new_module('Module3', lang='en')) m0.case_type = 'household_advanced' m0f1 = m0.new_form('save to case', 'en', attachment=self.get_xml('standard_questions')) m0f1.case_references_data = { 'save': { "/data/question1": { "case_type": "household_advanced", "properties": [ "save_to_case_p1", "save_to_case_p2" ], } } } self._assert_properties(app.get_case_metadata(), {'save_to_case_p1', 'save_to_case_p2'}) def test_case_references_open_close(self): app = Application.new_app('domain', 'New App') app.version = 3 m0 = self._make_module(app, 0, 'household') m0f1 = m0.new_form('save to case', 'en', attachment=self.get_xml('standard_questions')) m0f1.case_references = { 'save': { "/data/question1": { "case_type": "save_to_case", } } } meta_type = app.get_case_metadata().get_type('save_to_case') self.assertEqual({}, meta_type.opened_by) self.assertEqual({}, meta_type.closed_by) m0f1.case_references = { 'save': { "/data/question1": { "case_type": "save_to_case", "create": True } } } meta_type = app.get_case_metadata().get_type('save_to_case') self.assertTrue(m0f1.unique_id in meta_type.opened_by) self.assertEqual({}, meta_type.closed_by) m0f1.case_references = { 'save': { "/data/question1": { "case_type": "save_to_case", "close": True } } } meta_type = app.get_case_metadata().get_type('save_to_case') self.assertEqual({}, meta_type.opened_by) self.assertTrue(m0f1.unique_id in meta_type.closed_by)
Python
0.00094
@@ -4780,13 +4780,8 @@ nces -_data = %7B
6126d9e2ad6ed01c26d7f3c029f231ab19765685
Remove get/post/handler from ContentView
feincms/module/mixins.py
feincms/module/mixins.py
import re from django.db import models from django.http import Http404 from django.template import Template from django.utils.cache import add_never_cache_headers from django.utils.datastructures import SortedDict from django.views.generic import TemplateView from feincms import settings class ContentMixin(object): """ Mixin for ``feincms.models.Base`` subclasses which need need some degree of additional control over the request-response cycle. """ #: Collection of request processors request_processors = SortedDict() #: Collection of response processors response_processors = SortedDict() def setup_request(self, request): import warnings warnings.warn( '%s.setup_request does nothing anymore, and will be removed in' ' FeinCMS v1.8', DeprecationWarning, stacklevel=2) @classmethod def register_request_processor(cls, fn, key=None): """ Registers the passed callable as request processor. A request processor always receives two arguments, the current object and the request. """ cls.request_processors[fn if key is None else key] = fn @classmethod def register_response_processor(cls, fn, key=None): """ Registers the passed callable as response processor. A response processor always receives three arguments, the current object, the request and the response. """ cls.response_processors[fn if key is None else key] = fn class ContentView(TemplateView): #: The name of the object for the template rendering context context_object_name = 'feincms_object' def get(self, request, *args, **kwargs): return self.handler(request, *args, **kwargs) def post(self, request, *args, **kwargs): return self.handler(request, *args, **kwargs) def handler(self, request, *args, **kwargs): self.page = Page.objects.for_request(request, raise404=True, best_match=True) def handle_object(self, object): if not hasattr(self.request, '_feincms_extra_context'): self.request._feincms_extra_context = {} self.object = object r = self.run_request_processors() if r: return r r = self.process_content_types() if r: return r response = self.render_to_response(self.get_context_data()) r = self.finalize_content_types(response) if r: return r r = self.run_response_processors(response) if r: return r return response def get_template_names(self): # According to the documentation this method is supposed to return # a list. However, we can also return a Template instance... if isinstance(self.template_name, (Template, list, tuple)): return self.template_name if self.template_name: return [self.template_name] # Hopefully someone run register_templates on the object class # beforehand, otherwise we'll crash... return [self.object.template.path] def get_context_data(self, **kwargs): context = self.request._feincms_extra_context context[self.context_object_name] = self.object return context @property def __name__(self): """ Dummy property to make this handler behave like a normal function. This property is used by django-debug-toolbar """ return self.__class__.__name__ def run_request_processors(self): """ Before rendering an object, run all registered request processors. A request processor may peruse and modify the page or the request. It can also return a ``HttpResponse`` for shortcutting the rendering and returning that response immediately to the client. """ self.request._feincms_extra_context.update({ # XXX This variable name isn't accurate anymore. # We _are_ in a subpage, but it isn't necessarily # an appcontent subpage. 'in_appcontent_subpage': False, 'extra_path': '/', }) url = self.object.get_absolute_url() if self.request.path != url: # extra_path must not end with a slash self.request._feincms_extra_context.update({ 'in_appcontent_subpage': True, 'extra_path': re.sub('^' + re.escape(url.rstrip('/')), '', self.request.path), }) for fn in reversed(self.object.request_processors.values()): r = fn(self.object, self.request) if r: return r def run_response_processors(self, response): """ After rendering an object to a response, the registered response processors are called to modify the response, eg. for setting cache or expiration headers, keeping statistics, etc. """ for fn in self.object.response_processors.values(): r = fn(self.object, self.request, response) if r: return r def process_content_types(self): """ Run the ``process`` method of all content types sporting one """ # store eventual Http404 exceptions for re-raising, # if no content type wants to handle the current self.request http404 = None # did any content type successfully end processing? successful = False for content in self.object.content.all_of_type(tuple( self.object._feincms_content_types_with_process)): try: r = content.process(self.request, view=self) if r in (True, False): successful = r elif r: return r except Http404, e: http404 = e if not successful: if http404: # re-raise stored Http404 exception raise http404 if not settings.FEINCMS_ALLOW_EXTRA_PATH and \ self.request._feincms_extra_context['extra_path'] != '/': raise Http404() def finalize_content_types(self, response): """ Runs finalize() on content types having such a method, adds headers and returns the final response. """ for content in self.object.content.all_of_type(tuple( self.object._feincms_content_types_with_finalize)): r = content.finalize(self.request, response) if r: return r # Add never cache headers in case frontend editing is active if (hasattr(self.request, "COOKIES") and self.request.COOKIES.get('frontend_editing', False)): if hasattr(response, 'add_post_render_callback'): response.add_post_render_callback(add_never_cache_headers) else: add_never_cache_headers(response)
Python
0
@@ -1671,357 +1671,8 @@ t'%0A%0A - def get(self, request, *args, **kwargs):%0A return self.handler(request, *args, **kwargs)%0A%0A def post(self, request, *args, **kwargs):%0A return self.handler(request, *args, **kwargs)%0A%0A def handler(self, request, *args, **kwargs):%0A self.page = Page.objects.for_request(request,%0A raise404=True, best_match=True)%0A%0A
f707a366f887bdadcec6ef4da0525bd784e65aee
Make sure we don't overlap reinvite transactions.
test_cases/reinv_onhold.py
test_cases/reinv_onhold.py
# Copyright (c) 2016 Sippy Software, Inc. All rights reserved. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from random import random from sippy.CCEvents import CCEventUpdate from sippy.Time.Timeout import Timeout from test_cases.reinvite import a_test_reinvite, b_test_reinvite class a_test_reinv_onhold(a_test_reinvite): cld = 'bob_reinv_onhold' cli = 'alice_reinv_onhold' sched = None onhold_count = 0 offhold_count = 0 def __init__(self, *args, **kwargs): while True: self.sched = [0.1 + (random() * 0.5) for x in range(0, 4)] if sum(self.sched) < b_test_reinv_onhold.ring_ival + \ b_test_reinv_onhold.answer_ival + self.get_reinvite_ival() - \ self.disconnect_ival: break if self.debug_lvl > -1: print('%s: self.sched = %s' % (self.my_name(), self.sched)) a_test_reinvite.__init__(self, *args, **kwargs) def reinvite(self, ua): if not self.connect_done or self.disconnect_done: return sdp_body_bak = ua.lSDP ua.lSDP = sdp_body_bak.getCopy() for sect in ua.lSDP.content.sections: if self.tccfg.atype == 'IP4': sect.c_header.addr = '0.0.0.0' else: sect.c_header.addr = '::' while 'sendrecv' in sect.a_headers: sect.a_headers.remove('sendrecv') rval = a_test_reinvite.reinvite(self, ua, alter_port = False) self.onhold_count += 1 ua.lSDP = sdp_body_bak if len(self.sched) > 0: # Take call off-hold little bit later Timeout(self.off_hold, self.sched.pop(), 1, ua) return rval def off_hold(self, ua): a_test_reinvite.reinvite(self, ua, alter_port = False) self.offhold_count += 1 if len(self.sched) > 0: Timeout(self.reinvite, self.sched.pop(), 1, ua) def get_reinvite_ival(self): return a_test_reinvite.get_reinvite_ival(self) / 2.0 def disconnect(self, ua): if self.disconnect_done: return if self.onhold_count != 3 or self.offhold_count != 2: Timeout(self.disconnect, 1.0, 1, ua) return a_test_reinvite.disconnect(self, ua) class b_test_reinv_onhold(b_test_reinvite): cli = 'bob_reinv_onhold' onhold_count = 0 offhold_count = 0 def recvEvent(self, event, ua): if isinstance(event, CCEventUpdate): sdp_body = event.getData() sdp_body.parse() onhold = False for sect in sdp_body.content.sections: onhold = sect.isOnHold() if onhold: self.onhold_count += 1 else: self.offhold_count += 1 return (b_test_reinvite.recvEvent(self, event, ua)) def alldone(self, ua): if self.onhold_count != 3 or self.offhold_count != 2: if self.debug_lvl > -1: fmsg = self.failed_msg() print('%s: onhold_count = %d, offhold_count = %d' % \ (fmsg, self.onhold_count, self.offhold_count)) self.finfo_displayed = True self.nerrs += 1 return b_test_reinvite.alldone(self, ua)
Python
0
@@ -2805,32 +2805,153 @@ -if len(self.sched) %3E 0:%0A +return rval%0A%0A def on_reinvite_connected(self, ua):%0A if len(self.sched) %3E 0:%0A if self.onhold_count %3E self.offhold_count:%0A @@ -2996,16 +2996,20 @@ t later%0A + @@ -3064,35 +3064,153 @@ ua)%0A + -return rval + else:%0A Timeout(self.reinvite, self.sched.pop(), 1, ua)%0A a_test_reinvite.on_reinvite_connected(self, ua) %0A%0A def of @@ -3325,100 +3325,8 @@ += 1 -%0A if len(self.sched) %3E 0:%0A Timeout(self.reinvite, self.sched.pop(), 1, ua) %0A%0A
d67249d0efb074ebcf5e5b20c5f21d92ffe81dae
Bump version to 0.1.7
openkongqi/__init__.py
openkongqi/__init__.py
# -*- coding: utf-8 -*- __version__ = '0.1.6' __author__ = "Stefan Berder" __contact__ = 'stefan@measureofqualty.com'
Python
0.000001
@@ -42,9 +42,9 @@ 0.1. -6 +7 '%0A__
e8cae50b026f9136b9b5afc35a92a74f33937205
Update 1482-maritime_boundary-buffered_land.py
integration-test/1482-maritime_boundary-buffered_land.py
integration-test/1482-maritime_boundary-buffered_land.py
from . import FixtureTest # Adds tests for OSM features (but not NE features) class MaritimeBoundary(FixtureTest): def test_usa_canada_country_boundary(self): # country boundary of USA and Canada self.load_fixtures([ 'https://www.openstreetmap.org/relation/148838', 'https://www.openstreetmap.org/relation/1428125', ], clip=self.tile_bbox(8, 44, 87, padding=0.1)) self.assert_has_feature( 8, 44, 87, "boundaries", {"kind": "country", "maritime_boundary": false}) def test_washington_idaho_region_boundary(self): # region boundary between Washington - Idaho self.load_fixtures([ 'https://www.openstreetmap.org/relation/165479', 'https://www.openstreetmap.org/relation/162116', ], clip=self.tile_bbox(8, 44, 88, padding=0.1)) self.assert_has_feature( 8, 44, 88, "boundaries", {"kind": "region", "maritime_boundary": false})
Python
0
@@ -356,16 +356,135 @@ 28125',%0A + 'file://integration-test/fixtures/buffered_land/'%0A '1482-buffered_land-usa-can-wash-idaho.shp',%0A @@ -635,36 +635,594 @@ try%22 -, %22maritime_boundary%22: false +%7D)%0A %0A def test_usa_canada_country_boundary_not_maritime_boundary(self):%0A # country boundary of USA and Canada%0A self.load_fixtures(%5B%0A 'https://www.openstreetmap.org/relation/148838',%0A 'https://www.openstreetmap.org/relation/1428125',%0A 'file://integration-test/fixtures/buffered_land/'%0A '1482-buffered_land-usa-can-wash-idaho.shp',%0A %5D, clip=self.tile_bbox(8, 44, 87, padding=0.1))%0A self.assert_no_matching_feature(%0A 8, 44, 87, %22boundaries%22,%0A %7B%22kind%22: %22country%22, %22maritime_boundary%22: 1 %7D)%0A%0A @@ -1478,16 +1478,135 @@ 62116',%0A + 'file://integration-test/fixtures/buffered_land/'%0A '1482-buffered_land-usa-can-wash-idaho.shp',%0A @@ -1756,35 +1756,596 @@ ion%22 -, %22maritime_boundary%22: false +%7D)%0A%0A def test_washington_idaho_region_boundary_not_maritime_boundary(self):%0A # region boundary between Washington - Idaho%0A self.load_fixtures(%5B%0A 'https://www.openstreetmap.org/relation/165479',%0A 'https://www.openstreetmap.org/relation/162116',%0A 'file://integration-test/fixtures/buffered_land/'%0A '1482-buffered_land-usa-can-wash-idaho.shp',%0A %5D, clip=self.tile_bbox(8, 44, 88, padding=0.1))%0A self.assert_no_matching_feature(%0A 8, 44, 88, %22boundaries%22,%0A %7B%22kind%22: %22region%22, %22maritime_boundary%22: 1 %7D)%0A
37b8ee356968efbc9d78d65ba7b31352b2b01ee2
Fix parameter file writing
workflows/common/python/runner_utils.py
workflows/common/python/runner_utils.py
import numpy as np import json, os try: basestring except NameError: basestring = str DATA_TYPES = {type(np.float16): 'f16', type(np.float32): 'f32', type(np.float64): 'f64'} def write_output(result, instance_directory): with open('{}/result.txt'.format(instance_directory), 'w') as f_out: f_out.write("{}\n".format(result)) def init(param_string, instance_directory, framework, out_dir_key): #with open(param_file) as f_in: # hyper_parameter_map = json.load(f_in) hyper_parameter_map = json.loads(param_string.strip()) if not os.path.exists(instance_directory): os.makedirs(instance_directory) hyper_parameter_map['framework'] = framework hyper_parameter_map[out_dir_key] = '{}/output'.format(instance_directory) hyper_parameter_map['instance_directory'] = instance_directory return hyper_parameter_map def is_numeric(val): try: float(val) return True except ValueError: return False def format_params(hyper_parameter_map): for k,v in hyper_parameter_map.items(): vals = str(v).split(" ") if len(vals) > 1 and is_numeric(vals[0]): # assume this should be a list if "." in vals[0]: hyper_parameter_map[k] = [float(x) for x in vals] else: hyper_parameter_map[k] = [int(x) for x in vals] def write_params(params, hyper_parameter_map): parent_dir = hyper_parameter_map['instance_directory'] if 'instance_directory' in hyper_parameter_map else '.' f = "{}/parameters.txt".format(parent_dir) with open(f, "w") as f_out: f_out.write("[parameters]\n") for k,v in params.items(): if type(v) in DATA_TYPES: v = DATA_TYPES[type(v)] if isinstance(v, basestring): v = "'{}'".format(v) f_out.write("{}={}\n".format(k, v)) def keras_clear_session(framework): if framework == 'keras': # works around this error: # https://github.com/tensorflow/tensorflow/issues/3388 try: from keras import backend as K K.clear_session() except AttributeError: # theano does not have this function pass
Python
0.000004
@@ -1578,16 +1578,29 @@ nt_dir)%0A + montr=%5B%5D%0A with @@ -1645,25 +1645,28 @@ write(%22%5B -parameter +Global Param s%5D%5Cn%22)%0A @@ -1860,55 +1860,417 @@ (v)%0A - f_out.write(%22%7B%7D=%7B%7D%5Cn%22.format(k, v)) +%0A if(k =='solr_root' or k == 'timeout' ):%0A # this must written at the end%0A print(%22hi%5Cn%22)%0A montr.append(k)%0A montr.append(v)%0A else:%0A f_out.write(%22%7B%7D=%7B%7D%5Cn%22.format(k, v))%0A f_out.write(%22%5BMonitor Params%5D%5Cn%22)%0A for i in range(len(montr)/2):%0A f_out.write(%22%7B%7D=%7B%7D%5Cn%22.format(montr%5B2*i%5D, montr%5B2*i+1%5D)) %0A %0A%0Ade
c4aff416299f91b53f5dca1a76ed16c8a0d51a8f
fix some error by handle TypeError
unzip.py
unzip.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Last modified: 2010 Dec 15 06:43:11 PM CST # # LICENSE: # Copyright (c) 2010 Tzeng, Yi-Feng # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ Extract zip tool. Due to default unzip in linux platfrom can not extract cp950, cp936 encoding etc. """ __version__ = "1.0" __revision__ = '1.2.1' __author__ = "Tzeng, Yi-Feng" __authorcontact__ = "yftzeng@gmail.com" __website__ = "http://antbsd.twbbs.org/" import sys import zipfile import os import getopt import shutil def usage(): print """usage: unzip.py <zipfile> [[-e <encoding>] -p <password>] <zipfile> is the source zipfile to extract <encoding> is the encoding of zipfile <password> is the password of zipfile -h: help long options also work: --verbose --encoding --password """ def main(): shortargs = 'he:p:' longargs = ['help', 'encoding=', 'password='] try: if sys.argv[1].startswith('-'): opts, args = getopt.getopt(sys.argv[1:], shortargs, longargs) zipsource = ''.join(sys.argv[-1:]) else: opts, args = getopt.getopt(sys.argv[2:], shortargs, longargs) zipsource = sys.argv[1] except getopt.GetoptError: usage() sys.exit(2) encoding = 'cp950' password = None for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() if o in ("-e", "--encoding"): encoding = a if o in ("-p", "--password"): password = a try: f = zipfile.ZipFile(zipsource, 'r') except zipfile.BadZipfile: print "ERROR: File is broken zip or not a zip file" sys.exit(2) if password != None: f.setpassword(password) for fileinfo in f.infolist(): try: filename = unicode(fileinfo.filename, encoding) except: print "ERROR: unknown encoding (" + encoding + ")" sys.exit(2) if filename.endswith('/'): if not os.path.isdir(filename): os.mkdir(filename) print "Create : " + filename else: outputfile = open(filename, "wb") try: shutil.copyfileobj(f.open(fileinfo.filename), outputfile) except: print "ERROR: File is encrypted, password required for extraction (-p, --password)" sys.exit(2) print "Extract: " + filename if __name__ == '__main__': main()
Python
0
@@ -63,27 +63,27 @@ 201 -0 Dec 15 06:43:11 P +1 Oct 21 10:49:20 A M CS @@ -136,19 +136,16 @@ i-Feng%0A# - %0A# Per @@ -599,19 +599,16 @@ tions:%0A# - %0A# The @@ -736,19 +736,16 @@ tware.%0A# - %0A# THE @@ -1962,16 +1962,78 @@ ord='%5D%0A%0A + if len(sys.argv) %3C 2:%0A usage()%0A sys.exit()%0A%0A try: @@ -2918,32 +2918,32 @@ ):%0A try:%0A - file @@ -2986,16 +2986,83 @@ coding)%0A + except TypeError:%0A filename = fileinfo.filename%0A
5cf2e157da810dfe9e0436f9e76f5f339444031a
Add doxygen to the linux bootstrap
cerbero/bootstrap/linux.py
cerbero/bootstrap/linux.py
# cerbero - a multi-platform build system for Open Source software # Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Library General Public License for more details. # # You should have received a copy of the GNU Library General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. from cerbero.bootstrap import BootstraperBase from cerbero.bootstrap.bootstraper import register_bootstraper from cerbero.config import Distro from cerbero.errors import FatalError from cerbero.utils import shell, _, user_is_root class UnixBootstraper (BootstraperBase): tool = '' packages = [] def start(self): shell.call('%s %s' % (self.tool, ' '.join(self.packages))) class DebianBootstraper (UnixBootstraper): tool = 'sudo apt-get install' packages = ['autotools-dev', 'automake', 'autoconf', 'libtool', 'g++', 'autopoint', 'make', 'cmake', 'bison', 'flex', 'yasm', 'pkg-config', 'gtk-doc-tools', 'libxv-dev', 'libx11-dev', 'libpulse-dev', 'python2.7-dev', 'texinfo', 'gettext', 'build-essential', 'pkg-config'] class RedHatBootstraper (UnixBootstraper): tool = 'yum install' packages = [''] def register_all(): register_bootstraper(Distro.DEBIAN, DebianBootstraper) register_bootstraper(Distro.REDHAT, RedHatBootstraper)
Python
0.000001
@@ -1669,16 +1669,27 @@ -config' +, 'doxygen' %5D%0A%0A%0Aclas
85516e56cf60337af51ddbf0fef95763a1dcc2d4
Remove extra 's'
bouncer-plumbing/collector-to-mlab/getconfig.py
bouncer-plumbing/collector-to-mlab/getconfig.py
#!/usr/bin/env python # This script builds the portion of the bouncer.yaml config file for the Ooni # collector and test helpers running on an M-Lab slice. To use it, run `python # getconfig.py`, and if all goes well, the YAML portion will be printed to # stdout. If something goes wrong, an error message will be printed to stdout, # and the exit status will be non-zero. The script assumes the oonib.conf file # is in /home/mlab_ooni/oonib.conf. If not, pass the path as the first command # line option. import sys import os import yaml import json import subprocess import urllib2 # For a hack import time MLAB_SIMULATOR_URL = "http://127.0.0.1:8585/update-ooni" def get_bouncer_config_part(oonib_conf): try: # Open this slice's oonib.conf f = open(oonib_conf, "r") oonib_conf_contents = f.read() f.close() oonib_conf_parsed = yaml.safe_load(oonib_conf_contents) except IOError: return_failure("Couldn't read oonib.conf") try: # Read this slice's (collector) .onion address. tor_datadir = oonib_conf_parsed['main']['tor_datadir'] tor_hostname_path = os.path.join(tor_datadir, 'collector', 'hostname') f = open(tor_hostname_path, "r") tor_onion_address = f.read().strip() f.close() except IOError: return_failure("Couldn't read Tor hostname file") except KeyError: return_failure("Oonib.conf is not valid or is missing information.") # Find this slice's IP address. slice_ipv4_address = get_ipv4_address() # List the running test helpers and their addresses. test_helpers = {} # FIXME: This should be a dynamically-generated list of all the test helpers # that are actually running. However, I have no idea how to infer which ones # are running and which ones aren't from the oonib.conf, since it seems to # be the same regardless of whether they're running or not (is there some # other source of information?) try: # For this first M-Lab deployment, we only support one test, which is # http-return-json-headers. In the future, this script should # automatically determine which helpers are running on a slice and # include exactly those. This is tracked in: # https://github.com/m-lab-tools/ooni-support/issues/55 http_return_headers_port = oonib_conf_parsed['helpers']['http-return-json-headers']['port'] if http_return_headers_port is not None: test_helpers['http-return-json-headers'] = 'http://' + slice_ipv4_address + ':' + str(http_return_headers_port) tcp_echo_port = oonib_conf_parsed['helpers']['tcp-echo']['port'] if tcp_echo_port is not None: test_helpers['tcp-echo'] = slice_ipv4_address + ':' + str(tcp_echo_port) # FIXME: What about the UDP port? dns_tcp_port = oonib_conf_parsed['helpers']['dns']['tcp_port'] if dns_tcp_port is not None: test_helpers['dns'] = slice_ipv4_address + ':' + str(dns_tcp_port) # FIXME: What about the 'address' field of the ssl helper? ssl_port = oonib_conf_parsed['helpers']['ssl']['port'] if ssl_port is not None: test_helpers['ssl'] = "https://" + slice_ipv4_address + ':' + str(ssl_port) # FIXME: Add daphn3 test helper. except KeyError: return_failure("Oonib.conf is not valid or is missing information.") config_part = { 'httpo://' + tor_onion_address: { 'test-helpers': test_helpers } } return config_part def get_ipv4_address(): output = subprocess.Popen(["/home/mlab_ooni/bin/get_ipv4.sh"], stdout=subprocess.PIPE).communicate()[0] return output.strip() def return_failure(msg): print "ERROR: " + msg exit(1) def put_config(oonib_conf): part = get_bouncer_config_part(oonib_conf) put_parameters = { 'city': 'foobar', 'country': 'foobar', 'fqdn': "nothing.google.com" + str(time.time()), 'ip': '127.0.0.1', 'port': '0', 'site': 'mars', 'tool_extra': part } send_put(json.dumps(put_parameters)) def send_put(json_body): # https://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request(MLAB_SIMULATOR_URL, data=json_body) request.add_header('Content-Type', 'application/json') request.get_method = lambda: 'PUT' opener.open(request) oonib_conf = '/home/mlab_ooni/oonib.conf' if len(sys.argv) >= 2: oonib_conf = sys.argv[1] put_config(oonib_conf)
Python
0.001651
@@ -3513,17 +3513,16 @@ t-helper -s ': test_
54fcc052ca5b6d0c6ee7d4c6fd1e5dedb5c7bd20
Handle empty models in the string representation
openprovider/models.py
openprovider/models.py
# coding=utf-8 """ Wrapper classes for API models. Most of these are thin wrappers over lxml objectified versions of API responses. """ import datetime import lxml.etree from openprovider.util import camel_to_snake, snake_to_camel class Model(object): """ Superclass for all models. Delegates attribute access to a wrapped class. """ def __init__(self, obj=None, **kwargs): self._obj = obj self._attrs = dict((snake_to_camel(key), value) for (key, value) in kwargs.items()) def __dir__(self): attrs = set(self.__dict__.keys() + [camel_to_snake(key) for key in self._attrs.keys()]) if self._obj is not None: attrs.update(camel_to_snake(t.tag) for t in self._obj.iterchildren()) return [attr for attr in attrs if not attr.startswith('_')] def __getattr__(self, attr): """ Magic for returning an attribute. Will try the attributes of the wrapper class first, then attributes in self._attrs, then the attributes of the wrapped objectified element. Will try a camelCased version of the snake_cased input if the attribute contains an underscore. This means foo.company_name will return the same as foo.companyName. """ if "_" in attr: attr = snake_to_camel(attr) if attr in self.__dict__: # Check ourselves first to avoid infinite loops return getattr(self, attr) try: return self._attrs[attr] except KeyError: if self._obj is not None: try: return self._obj[attr] except (AttributeError, KeyError): pass raise AttributeError("Model has no attribute '%s' (tried %r)" % (camel_to_snake(attr), dir(self))) def get_elem(self): """Returns the wrapped lxml element, if one exists, or else None.""" return self._obj def dump(self, *args, **kwargs): """Dumps a representation of the Model on standard output.""" lxml.etree.dump(self._obj, *args, **kwargs) def __repr__(self): return "<%s.%s: %s>" % (type(self).__module__, type(self).__name__, self) def __str__(self): return str(lxml.etree.tostring(self._obj)) def submodel(klass, key): """Shortcut for defining a submodel (has-a relation).""" def getter(self): return klass(getattr(self._obj, key)) return property(getter) def textattribute(attr): # TODO: Lots of duplication with __getattr__ def getter(self): try: return self._attrs[attr] except KeyError: if self.get_elem() is not None: try: return self.get_elem()[attr].text except (AttributeError, KeyError): pass raise AttributeError("Model has no attribute '%s' (tried %r)" % (camel_to_snake(attr), dir(self))) def setter(self, value): self._attrs[attr] = value return property(getter, setter) class Name(Model): """ A person's name. initials (required) Initials (first letters of first names, first letter of last name) firstName (required) First name prefix (optional) Prefix (often occuring in Dutch names; for example van de) lastName (required) Last name """ def __str__(self): if getattr(self, "prefix", None): return "%s %s %s" % (self.first_name, self.prefix, self.last_name) else: return "%s %s" % (self.first_name, self.last_name) class Domain(Model): """ A domain name. name (required) The domain name without extension extension (required) The extension part of the domain name """ def __str__(self): return "%s.%s" % (self.name, self.extension) class RegistryDetails(Model): """ A container for a messages from the registry messages A list of messages """ @property def messages(self): try: return [RegistryMessage(item) for item in self.array[0].item] except AttributeError: return [] class RegistryMessage(Model): """ A message from the registry date A datetime object of the message message The actual message """ @property def date(self): return datetime.datetime.strptime(str(self._obj.date), '%Y-%m-%d %H:%M:%S') class DomainDetails(Model): """ A detailed domain. """ domain = submodel(Domain, "domain") registry_details = submodel(RegistryDetails, "registryDetails") def __str__(self): return str(self.domain) class Nameserver(Model): """ A nameserver with either an IPv4 or an IPv6 address. name (required) URI or hostname of the nameserver ip (required if no valid ip6) IPv4 address of the nameserver ip6 (required if no valid ip) IPv6 address of the nameserver """ def __str__(self): return str(self.name) class Record(Model): """ A DNS record. type (required) One of the following data types: A, AAAA, CNAME, MX, SPF, TXT name (optional) The part of the hostname before the domainname; for example www or ftp value (required) The value of the record; depending on the type, certain restrictions apply; see the FAQ for these restrictions prio (optional) Priority of the record; required for MX records; ignored for all other record types ttl (required) The Time To Live of the record; this is a value in seconds """ pass class History(Model): """ Representation of a single modification of a piece of data. date (required) Date of the modification was (required) Old contents of the record is (required) New contents of the record """ pass class Address(Model): """ A physical street address. street (required) number (required) suffix (optional) zipcode (required) city (required) state (optional) country (required) """ pass class Phone(Model): """ An international phone number. country_code (required) area_code (required) subscriber_number (required) """ country_code = textattribute("countryCode") area_code = textattribute("areaCode") subscriber_number = textattribute("subscriberNumber") def __str__(self): """Return the string representation of phone number.""" return "%s %s %s" % (self.country_code, self.area_code, self.subscriber_number) class Reseller(Model): """ A reseller profile. id companyName address phone fax vatperc balance reservedBalance """ address = submodel(Address, "address") phone = submodel(Phone, "phone") fax = submodel(Phone, "fax") class Customer(Model): """ A customer. handle companyName vat name gender address phone fax email """ name = submodel(Name, "name") address = submodel(Address, "address") phone = submodel(Phone, "phone") fax = submodel(Phone, "fax") def __str__(self): return str(self.handle) class SSLProduct(Model): """ An SSL product. id name brandName category isMobileSupported isIdnSupported isSgcSupported isWildcardSupported isExtendedValidationSupported deliveryTime freeRefundPeriod freeReissuePeriod maxPeriod numberOfDomains encryption root warranty prices supportedSoftware description """ pass class SSLOrder(Model): """ An ordered SSL certificate. id commonName productName brandName status orderDate activeDate expirationDate hostNames organizationHandle administrativeHandle technicalHandle billingHandle emailApprover csr certificate rootCertificate """ pass class Extension(Model): """ A domain extension (TLD). name transferAvailable isTransferAuthCodeRequired domicileAvailable usageCount description prices isAuthorizationCodeRequired isLockingAllowed isTradeAllowed restorePrice """ pass
Python
0.000001
@@ -2312,16 +2312,48 @@ f._obj)) + if self._obj else 'Empty model' %0A%0A%0Adef s
92ce590174265c4ac6701a2d11fe65465ececcc6
Add deploy key uploading to the command line
doctr/__main__.py
doctr/__main__.py
""" doctr A tool to automatically deploy docs to GitHub pages from Travis CI. The doctr command is two commands in one. To use, first run doctr on your local machine. This will prompt for your GitHub credentials and the name of the repo you want to deploy docs for. This will generate a secure key, which you should insert into your .travis.yml. Then, on Travis, for the build where you build your docs, add - doctr to the end of the build to deploy the docs to GitHub pages. This will only run on the master branch, and won't run on pull requests. For more information, see https://gforsyth.github.io/doctr/docs/ """ import sys import os import argparse from .local import (generate_GitHub_token, encrypt_variable, encrypt_file, generate_ssh_key) from .travis import setup_GitHub_push, commit_docs, push_docs, get_repo from . import __version__ def main(): parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-V', '--version', action='version', version='doctr ' + __version__) location = parser.add_mutually_exclusive_group() location.add_argument('--travis', action='store_true', default=None, help="Run as if on Travis. The default is to detect automatically.") location.add_argument('--local', action='store_true', default=None, help="Run as if local (not on Travis). The default is to detect automatically.") parser.add_argument('--token', action="store_true", default=False, help="""Generate a personal access token to push to GitHub. The default is to use a deploy key. WARNING: This will grant read/write access to all the public repositories for the user. This option is not recommended unless you are using a separate GitHub user for deploying.""") args = parser.parse_args() if args.local == args.travis == None: on_travis = os.environ.get("TRAVIS_JOB_NUMBER", '') else: on_travis = args.travis if on_travis: repo = get_repo() if setup_GitHub_push(repo): commit_docs() push_docs() else: repo = input("What repo to you want to build the docs for? ") if args.token: token = generate_GitHub_token() encrypted_variable = encrypt_variable("GH_TOKEN={token}".format(token=token).encode('utf-8'), repo=repo) else: generate_ssh_key("doctr deploy key for {repo}".format(repo=repo)) key = encrypt_file('github_deploy_key', delete=True) encrypted_variable = encrypt_variable(b"DOCTR_DEPLOY_ENCRYPTION_KEY={key}".format(key=key), repo=repo) # TODO: Add deploy key to GitHub print("""\ The deploy key has been added for {repo}. Commit the file github_deploy_key.enc to the repository. You can go to https://github.com/{repo}/settings/keys to revoke the deploy key. """.format(repo=repo)) travis_content = """ env: global: secure: "{encrypted_variable}" """.format(encrypted_variable=encrypted_variable.decode('utf-8')) print("Put\n", travis_content, "in your .travis.yml.\n", "Also make sure to create an empty gh-pages branch on GitHub, and " "enable it at https://github.com/{repo}/settings".format(repo=repo), sep='') if __name__ == '__main__': sys.exit(main())
Python
0.000001
@@ -737,16 +737,46 @@ pt_file, +%0A upload_GitHub_deploy_key, generat @@ -1833,16 +1833,197 @@ ng.%22%22%22)%0A +%0A parser.add_argument(%22--no-upload-key%22, action=%22store_false%22, default=True,%0A dest=%22upload_key%22, help=%22%22%22Don't automatically upload the deploy key%0A to GitHub.%22%22%22)%0A%0A args @@ -2875,245 +2875,662 @@ -# TODO: Add deploy key to GitHub%0A%0A print(%22%22%22%5C%0AThe deploy key has been added for %7Brepo%7D.%0A%0ACommit the file github_deploy_key.enc to the repository.%0A%0AYou can go to https://github.com/%7Brepo%7D/settings/keys to revoke the deploy +deploy_keys_url = 'https://github.com/%7Brepo%7D/settings/keys'.format(repo=repo)%0A%0A if args.upload_key:%0A with open(%22github_deploy_key.pub%22) as f:%0A key = f.read()%0A%0A upload_GitHub_deploy_key(repo, key)%0A%0A print(%22%22%22%5C%0AThe deploy key has been added for %7Brepo%7D.%0A%0ACommit the file github_deploy_key.enc to the repository.%0A%0AYou can go to %7Bdeploy_keys_url%7D to revoke the deploy key.%0A%22%22%22.format(repo=repo, deploy_keys_url=deploy_keys_url))%0A%0A else:%0A print(%22%22%22%5C%0AGo to %7Bdeploy_keys_url%7D and add the following as a new key:%0A%0A%7Bkey%7D%0A%0ABe sure to allow write access for the key -. %0A%22%22%22 @@ -3537,25 +3537,56 @@ .format( -repo=repo +key=key, deploy_keys_url=deploy_keys_url ))%0A%0A
0543774cffde0ad6eafe4bebc77df04c03027cf7
Remove container specific import from __init__.py (#14)
testcontainers/__init__.py
testcontainers/__init__.py
from testcontainers.selenium import BrowserWebDriverContainer from testcontainers.mysql import MySqlContainer from testcontainers.postgres import PostgresContainer from testcontainers.oracle import OracleDbContainer from testcontainers.core.generic import GenericContainer from testcontainers.core.waiting_utils import wait_container_is_ready, wait_for
Python
0.000001
@@ -1,220 +1,4 @@ -from testcontainers.selenium import BrowserWebDriverContainer%0Afrom testcontainers.mysql import MySqlContainer%0Afrom testcontainers.postgres import PostgresContainer%0Afrom testcontainers.oracle import OracleDbContainer%0A from
00cc8c249dddea91b778e3623b36187969d89012
Fix bug in _CustomOperation
hoomd/custom_operation.py
hoomd/custom_operation.py
from hoomd.operation import _TriggeredOperation from hoomd.parameterdicts import ParameterDict from hoomd.custom_action import CustomAction from hoomd.typeconverter import OnlyType from hoomd.trigger import Trigger from hoomd.logger import LoggerQuantity from hoomd import _hoomd class _CustomOperation(_TriggeredOperation): """Wrapper for user created `hoomd.CustomAction`s. This is the parent class for `hoomd.update.CustomUpdater` and `hoomd.analyzer.CustomAnalzyer`. A basic wrapper that allows for Python object inheriting from `hoomd.custom_action.CustomAction` to be attached to a simulation. To see how to implement a custom Python action, look at the documentation for `hoomd.CustomAction`. This class also implements a "pass-through" system for attributes. Attributes and methods from the passed in `action` will be available directly in this class. This does not apply to attributes with these names: ``trigger``, ``_action``, and ``action``. Note: Due to the pass through no attribute should exist both in `hoomd._CustomOperation` and the `hoomd.CustomAction`. Note: This object should not be instantiated or subclassed by an user. Attributes: trigger (hoomd.Trigger): A trigger to determine when the wrapped `hoomd.CustomAction` is run. """ _override_setattr = {'_action'} @property def _cpp_class_name(self): """C++ Class to use for attaching.""" raise NotImplementedError def __init__(self, action, trigger=1): if not isinstance(action, CustomAction): raise ValueError("action must be a subclass of " "hoomd.custom_action.CustomAction.") self._action = action loggables = list(action.log_quantities) if not all(isinstance(val, LoggerQuantity) for val in loggables.values()): raise ValueError("Error wrapping {}. All advertised log " "quantities must be of type LoggerQuantity." "".format(action)) self._export_dict = loggables param_dict = ParameterDict(trigger=Trigger) param_dict['trigger'] = trigger self._param_dict.update(param_dict) def __getattr__(self, attr): """Allows pass through to grab attributes/methods of the wrapped object. """ try: return super().__getattr__(attr) except AttributeError: try: return getattr(self._action, attr) except AttributeError: raise AttributeError( "{} object has no attribute {}".format(type(self), attr)) def _setattr_hook(self, attr, value): """This implements the __setattr__ pass through to the CustomAction.""" if hasattr(self._action, attr): setattr(self._action, attr, value) else: object.__setattr__(self, attr, value) def attach(self, simulation): """Attach to a `hoomd.Simulation`. Detaching is implemented by a parent class. Args: simulation (hoomd.Simulation): The simulation the operation operates on. """ self._cpp_obj = getattr(_hoomd, self._cpp_class_name)( simulation.state._cpp_sys_def, self._action) super().attach(simulation) self._action.attach(simulation) def act(self, timestep): """Perform the action of the custom action if attached. Calls through to the action property of the instance. Args: timestep (int): The current timestep of the state. """ if self.is_attached: self._action.act(timestep) @property def action(self): """`hoomd.CustomAction` The action the operation wraps.""" return self._action class _InternalCustomOperation(_CustomOperation): """Internal class for Python ``Action``s. Offers a streamlined __init__. Adds a wrapper around an hoomd Python action. This extends the attribute getting and setting wrapper of `hoomd._CustomOperation` with a wrapping of the `__init__` method as well as a error raised if the ``action`` is attempted to be accessed directly. """ @property def _internal_class(self): """Internal class to use for the Action of the Operation.""" raise NotImplementedError def __init__(self, trigger, *args, **kwargs): super().__init__(self._internal_class(*args, **kwargs), trigger) self._export_dict = {key: value.update_cls(self.__class__) for key, value in self._export_dict.items()} @property def action(self): """Prevents the access of action in public API.""" raise AttributeError
Python
0
@@ -1789,11 +1789,11 @@ s = -lis +dic t(ac
4f2040f001a083db02633164dc063a7ac46e5602
use ordered dicts
ophiuchus/data/core.py
ophiuchus/data/core.py
# coding: utf-8 from __future__ import division, print_function __author__ = "adrn <adrn@astro.columbia.edu>" # Third-party import astropy.coordinates as coord import astropy.units as u from astropy.utils.data import get_pkg_data_filename import numpy as np import numexpr import gary.coordinates as gc from gary.observation import distance from gary.units import galactic from gary.util import atleast_2d # Project from .. import galactocentric_frame, vcirc, vlsr from ..coordinates import Ophiuchus __all__ = ['OphiuchusData'] class OphiuchusData(object): """ Utility class for interacting with the data for the Ophiuchus stream. """ def __init__(self, expr=None): # read the catalog data file filename = get_pkg_data_filename('sesar.txt') _tbl = np.genfromtxt(filename, dtype=None, skip_header=2, names=True) if expr is not None: ix = numexpr.evaluate(expr, _tbl) _tbl = _tbl[ix] # convert distance modulus uncertainty to distance uncertainty dists = [] dist_errs = [] for DM,err_DM in zip(_tbl['DM'], _tbl['err_DM']): d = distance(np.random.normal(DM, err_DM, size=1024)).to(u.kpc).value dists.append(np.median(d)) dist_errs.append(np.std(d)) dists = np.array(dists)*u.kpc dist_errs = np.array(dist_errs)*u.kpc # make an astropy coordinate object from the positions self.coord = coord.ICRS(ra=_tbl['ra']*u.degree, dec=_tbl['dec']*u.degree, distance=dists)\ .transform_to(coord.Galactic) self.coord_err = dict( l=0.*self.coord.l.decompose(galactic), b=0.*self.coord.l.decompose(galactic), distance=dist_errs.decompose(galactic) ) # a SphericalRepresentation of the coordinates in Ophiuchus coordinates # self.coord_oph = orbitfit.rotate_sph_coordinate(self.coord, self.R) self.coord_oph = self.coord.transform_to(Ophiuchus) # velocity information and uncertainties self.veloc = dict( mul=(_tbl['mu_l']*u.mas/u.yr).decompose(galactic), mub=(_tbl['mu_b']*u.mas/u.yr).decompose(galactic), vr=(_tbl['v_los']*u.km/u.s).decompose(galactic) ) self.veloc_err = dict( mul=(_tbl['err_mu_l']*u.mas/u.yr).decompose(galactic), mub=(_tbl['err_mu_b']*u.mas/u.yr).decompose(galactic), vr=(_tbl['err_v_los']*u.km/u.s).decompose(galactic) ) def oph_to_galactic(self, rep): """ Transform from Ophiuchus stream coordinates to Galactic coordinates. """ xyz = rep.represent_as(coord.CartesianRepresentation).xyz.value in_frame_car = coord.CartesianRepresentation(self.R.T.dot(xyz).T*u.kpc) return self.coord.realize_frame(in_frame_car) def _mcmc_sample_to_coord(self, p): p = atleast_2d(p, insert_axis=-1) # note: from Gary, not Numpy rep = coord.SphericalRepresentation(lon=p[0]*0.*u.radian, # this is required by the MCMC lat=p[0]*u.radian, # this index looks weird but is right distance=p[1]*u.kpc) return self.oph_to_galactic(rep) def _mcmc_sample_to_w0(self, p): p = atleast_2d(p, insert_axis=-1) # note: from Gary, not Numpy c = self._mcmc_sample_to_coord(p) x0 = c.transform_to(galactocentric_frame).cartesian.xyz.decompose(galactic).value v0 = gc.vhel_to_gal(c, pm=(p[2]*u.rad/u.Myr,p[3]*u.rad/u.Myr), rv=p[4]*u.kpc/u.Myr, galactocentric_frame=galactocentric_frame, vcirc=vcirc, vlsr=vlsr).decompose(galactic).value w0 = np.concatenate((x0, v0)) return w0
Python
0.000001
@@ -106,16 +106,72 @@ .edu%3E%22%0A%0A +# Standard library%0Afrom collections import OrderedDict%0A%0A # Third- @@ -1668,25 +1668,32 @@ coord_err = -d +OrderedD ict(%0A @@ -2138,17 +2138,24 @@ veloc = -d +OrderedD ict(%0A @@ -2372,17 +2372,24 @@ c_err = -d +OrderedD ict(%0A
9553dca666dcf751977bffb21415f8b43811ce02
Add album on post extend article, on article
opps/article/models.py
opps/article/models.py
# -*- coding: utf-8 -*- from django.db import models from django.utils.translation import ugettext_lazy as _ from tagging.fields import TagField from googl.short import GooglUrlShort from opps.core.models import Publishable from opps.image.models import Image from opps.source.models import Source from opps.channel.models import Channel class Article(Publishable): title = models.CharField(_(u"Title"), max_length=140) slug = models.SlugField(_(u"URL"), max_length=150, unique=True, db_index=True) short_url = models.URLField(_("Short URL"), blank=False, null=True) short_title = models.CharField(_(u"Short title"), max_length=140, blank=False, null=True) headline = models.TextField(_(u"Headline"), blank=True) channel = models.ForeignKey('channel.Channel', verbose_name=_(u"Channel")) main_image = models.ForeignKey('image.Image', verbose_name=_(u'Main Image'), blank=False, null=True, on_delete=models.SET_NULL) sources = models.ManyToManyField('source.Source', null=True, blank=True, through=models.get_model('source', 'PostSource')) tags = TagField(null=True, verbose_name=_(u"Tags")) class Meta: abstract = True def __absolute_url(self): return "{0}/{1}".format(self.channel, self.slug) def get_absolute_url(self): return "http://{0}".format(self.__absolute_url()) get_absolute_url.short_description = 'URL' def __unicode__(self): return self.__absolute_url() def save(self, *args, **kwargs): if not self.short_url: self.short_url = GooglUrlShort(self.get_absolute_url()).short() super(Article, self).save(*args, **kwargs) class Post(Article): content = models.TextField(_(u"Content")) images = models.ManyToManyField(Image, null=True, blank=True, related_name='post_images', through='PostImage') class Album(Article): images = models.ManyToManyField(Image, null=True, blank=True, related_name='album_images', through='AlbumImage') class ManyToImage(models.Model): image = models.ForeignKey(Image, verbose_name=_(u'Image'), null=True, blank=True, on_delete=models.SET_NULL) order = models.PositiveIntegerField(_(u'Order'), default=0) def __unicode__(self): return self.image.title class Meta: abstract = True class PostImage(ManyToImage): post = models.ForeignKey(Post, verbose_name=_(u'Post'), null=True, blank=True, related_name='postimage_post', on_delete=models.SET_NULL) class PostSource(models.Model): post = models.ForeignKey(Post, verbose_name=_(u'Post'), null=True, blank=True, related_name='postsource_post', on_delete=models.SET_NULL) source = models.ForeignKey(Source, verbose_name=_(u'Source'), null=True, blank=True, related_name='postsource_source', on_delete=models.SET_NULL) order = models.PositiveIntegerField(_(u'Order'), default=0) def __unicode__(self): return self.source.slug class AlbumImage(ManyToImage): album = models.ForeignKey(Album, verbose_name=_(u'Album'), null=True, blank=True, related_name='albumimage_post', on_delete=models.SET_NULL) class ArticleBox(Publishable): name = models.CharField(_(u"Box name"), max_length=140) slug = models.SlugField(_(u"Slug"), max_length=150, unique=True, db_index=True) post = models.ForeignKey(Post, null=True, blank=True, on_delete=models.SET_NULL) channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.SET_NULL) posts = models.ManyToManyField(Post, null=True, blank=True, related_name='articlebox_post', through='ArticleBoxPost') def __unicode__(self): return self.slug class ArticleBoxPost(models.Model): post = models.ForeignKey(Post, verbose_name=_(u'Article Box Post'), null=True, blank=True, related_name='articleboxpost_post', on_delete=models.SET_NULL) articlebox = models.ForeignKey(ArticleBox, verbose_name=_(u'Article Box'), null=True, blank=True, related_name='articlebox', on_delete=models.SET_NULL) def __unicode__(self): return "{0}-{1}".format(self.articlebox.slug, self.post.slug)
Python
0
@@ -2066,32 +2066,177 @@ gh='PostImage')%0A + album = models.ForeignKey('Album', verbose_name=_(u'Album'), null=True,%0A blank=True, on_delete=models.SET_NULL)%0A %0A%0Aclass Album(Ar
fef8eb920ccd24cade6430361f638eb46f609151
Replace deprecated get_all_field_names reference
organizations/utils.py
organizations/utils.py
# -*- coding: utf-8 -*- # Copyright (c) 2012-2015, Ben Lopatin and contributors # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with # the distribution # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .models import Organization def create_organization(user, name, slug=None, is_active=None, org_defaults=None, org_user_defaults=None, **kwargs): """ Returns a new organization, also creating an initial organization user who is the owner. The specific models can be specified if a custom organization app is used. The simplest way would be to use a partial. >>> from organizations.utils import create_organization >>> from myapp.models import Account >>> from functools import partial >>> create_account = partial(create_organization, model=Account) """ org_model = kwargs.pop('model', None) or kwargs.pop('org_model', None) or Organization kwargs.pop('org_user_model', None) # Discard deprecated argument org_owner_model = org_model.owner.related.related_model try: # Django 1.9 org_user_model = org_model.organization_users.rel.related_model except AttributeError: # Django 1.8 org_user_model = org_model.organization_users.related.related_model if org_defaults is None: org_defaults = {} if org_user_defaults is None: if 'is_admin' in org_user_model._meta.get_all_field_names(): org_user_defaults = {'is_admin': True} else: org_user_defaults = {} if slug is not None: org_defaults.update({'slug': slug}) if is_active is not None: org_defaults.update({'is_active': is_active}) org_defaults.update({'name': name}) organization = org_model.objects.create(**org_defaults) org_user_defaults.update({'organization': organization, 'user': user}) new_user = org_user_model.objects.create(**org_user_defaults) org_owner_model.objects.create(organization=organization, organization_user=new_user) return organization def model_field_attr(model, model_field, attr): """ Returns the specified attribute for the specified field on the model class. """ fields = dict([(field.name, field) for field in model._meta.fields]) return getattr(fields[model_field], attr)
Python
0.00001
@@ -1374,35 +1374,454 @@ rom -.models import Organization +itertools import chain%0A%0Afrom .models import Organization%0A%0A%0Adef model_field_names(model):%0A %22%22%22%0A Returns a list of field names in the model%0A%0A Direct from Django upgrade migration guide.%0A %22%22%22%0A return list(set(chain.from_iterable(%0A (field.name, field.attname) if hasattr(field, 'attname') else (field.name,)%0A for field in model._meta.get_fields()%0A if not (field.many_to_one and field.related_model is None)%0A ))) %0A%0A%0Ad @@ -2962,49 +2962,40 @@ in -org_user_model._meta.get_all_field_names( +model_field_names(org_user_model ):%0A
05f782b7544c6639049e6c83d5f26d28189924a7
I think it time to 0.1.0
blog/__init__.py
blog/__init__.py
VERSION = (0, 1, 'pre') __version__ = ".".join(map(str, VERSION))
Python
0.999978
@@ -14,13 +14,9 @@ 1, -'pre' +0 )%0A__
60bec2ee0e1aa131cc47f350ae02b6aeadf93b4b
Bump Biogrid version to 4.2.192 (Nov 25 2020)
indra/sources/biogrid.py
indra/sources/biogrid.py
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import os import re import csv import logging import itertools import requests from io import BytesIO, StringIO from zipfile import ZipFile from collections import namedtuple from indra.util import read_unicode_csv from indra.statements import * import indra.databases.hgnc_client as hgnc_client import indra.databases.uniprot_client as up_client logger = logging.getLogger(__name__) biogrid_file_url = 'https://downloads.thebiogrid.org/Download/BioGRID/' + \ 'Release-Archive/BIOGRID-3.4.158/BIOGRID-ALL-3.4.158.tab2.zip' # The explanation for each column of the tsv file is here: # https://wiki.thebiogrid.org/doku.php/biogrid_tab_version_2.0 _BiogridRow = namedtuple('BiogridRow', ['biogrid_int_id', 'entrez_a', 'entrez_b', 'biogrid_a', 'biogrid_b', 'syst_name_a', 'syst_name_b', 'hgnc_a', 'hgnc_b', 'syn_a', 'syn_b', 'exp_system', 'exp_system_type', 'author', 'pmid', 'organism_a', 'organism_b', 'throughput', 'score', 'modification', 'phenotypes', 'qualifications', 'tags', 'source_db']) class BiogridProcessor(object): """Extracts INDRA Complex statements from Biogrid interaction data. Parameters ---------- biogrid_file : str The file containing the Biogrid data in .tab2 format. If not provided, the BioGrid data is downloaded from the BioGrid website. physical_only : boolean If True, only physical interactions are included (e.g., genetic interactions are excluded). If False, all interactions are included). Attributes ---------- statements: list[indra.statements.Statements] Extracted INDRA Complex statements. physical_only : boolean Indicates whether only physical interactions were included during statement processing. """ def __init__(self, biogrid_file=None, physical_only=True): self.statements = [] self.physical_only = physical_only # If a path to the file is included, process it, skipping the header if biogrid_file: rows = read_unicode_csv(biogrid_file, '\t', skiprows=1) # If no file is provided, download from web else: logger.info('No data file specified, downloading from BioGrid ' 'at %s' % biogrid_file_url) rows = _download_biogrid_data(biogrid_file_url) # Process the rows into Statements for row in rows: filt_row = [None if item == '-' else item for item in row] bg_row = _BiogridRow(*filt_row) # Filter out non-physical interactions if desired if self.physical_only and bg_row.exp_system_type != 'physical': continue # Ground agents agent_a = self._make_agent(bg_row.entrez_a, bg_row.syst_name_a) agent_b = self._make_agent(bg_row.entrez_b, bg_row.syst_name_b) # Skip any agents with neither HGNC grounding or string name if agent_a is None or agent_b is None: continue # Get evidence ev = Evidence(source_api='biogrid', source_id=bg_row.biogrid_int_id, pmid=bg_row.pmid, text=None, annotations=dict(bg_row._asdict())) # Make statement s = Complex([agent_a, agent_b], evidence=ev) self.statements.append(s) def _make_agent(self, entrez_id, text_id): """Make an Agent object, appropriately grounded. Parameters ---------- entrez_id : str Entrez id number text_id : str A plain text systematic name, or None if not listed. Returns ------- agent : indra.statements.Agent A grounded agent object. """ hgnc_name, db_refs = self._make_db_refs(entrez_id, text_id) if hgnc_name is not None: name = hgnc_name elif text_id is not None: name = text_id # Handle case where the name is None else: return None return Agent(name, db_refs=db_refs) def _make_db_refs(self, entrez_id, text_id): """Looks up the HGNC ID and name, as well as the Uniprot ID. Parameters ---------- entrez_id : str Entrez gene ID. text_id : str or None A plain text systematic name, or None if not listed in the Biogrid data. Returns ------- hgnc_name : str Official HGNC symbol for the gene. db_refs : dict db_refs grounding dictionary, used when constructing the Agent object. """ db_refs = {} if text_id != '-' and text_id is not None: db_refs['TEXT'] = text_id hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id) hgnc_name = hgnc_client.get_hgnc_name(hgnc_id) if hgnc_id is not None: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id is not None: db_refs['UP'] = up_id return (hgnc_name, db_refs) def _download_biogrid_data(url): """Downloads zipped, tab-separated Biogrid data in .tab2 format. Parameters: ----------- url : str URL of the BioGrid zip file. Returns ------- csv.reader A csv.reader object for iterating over the rows (header has already been skipped). """ res = requests.get(biogrid_file_url) if res.status_code != 200: raise Exception('Unable to download Biogrid data: status code %s' % res.status_code) zip_bytes = BytesIO(res.content) zip_file = ZipFile(zip_bytes) zip_info_list = zip_file.infolist() # There should be only one file in this zip archive if len(zip_info_list) != 1: raise Exception('There should be exactly zipfile in BioGrid zip ' 'archive: %s' % str(zip_info_list)) unzipped_bytes = zip_file.read(zip_info_list[0]) # Unzip the file biogrid_str = StringIO(unzipped_bytes.decode('utf8')) # Make file-like obj csv_reader = csv.reader(biogrid_str, delimiter='\t') # Get csv reader next(csv_reader) # Skip the header return csv_reader
Python
0
@@ -593,23 +593,23 @@ BIOGRID- -3.4.158 +4.2.192 /BIOGRID @@ -617,15 +617,15 @@ ALL- -3.4.158 +4.2.192 .tab
45c74931323aa250486e96e943b6f820aca20ec5
Update user serializer to use full rather than simple
lims/users/views.py
lims/users/views.py
from django.contrib.auth.models import User, Group from django.conf import settings from rest_framework import parsers, renderers from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.response import Response from rest_framework.views import APIView from rest_framework import viewsets from rest_framework import status from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.decorators import list_route, detail_route from rest_framework.validators import ValidationError import django_filters from lims.addressbook.serializers import AddressSerializer from lims.crm.helpers import CRMCreateContact from lims.crm.serializers import CreateCRMAccountSerializer from .serializers import (UserSerializer, GroupSerializer, RegisterUserSerializer, SimpleUserSerializer,) from lims.permissions.permissions import (IsInAdminGroupOrRO, IsInAdminGroupOrTheUser) from lims.shared.mixins import AuditTrailViewMixin from lims.users.models import ResetCode class ObtainAuthToken(APIView): """ Customisation of ObtainAuthToken class to return an organisation along with the token. """ throttle_classes = () permission_classes = () parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer def post(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(): user = serializer.validated_data['user'] usr = User.objects.get(username=user) groups = [g.name for g in user.groups.all()] token, created = Token.objects.get_or_create(user=user) return Response({'token': token.key, 'crm': settings.ENABLE_CRM, 'id': usr.id, 'groups': groups}) return Response({'message': 'Username/password incorrect'}, status=400) class UserFilter(django_filters.FilterSet): has_crm_details = django_filters.CharFilter(method='filter_has_crm_details') def filter_has_crm_details(self, queryset, value): if value == 'False': return queryset.filter(crmaccount__isnull=True) elif value == 'True': return queryset.filter(crmaccount__isnull=False) return queryset class Meta: model = User fields = ['has_crm_details'] class UserViewSet(AuditTrailViewMixin, viewsets.ModelViewSet): """ User data. """ queryset = User.objects.all() serializer_class = UserSerializer permission_classes = (IsInAdminGroupOrTheUser,) search_fields = ('username', 'email') filter_class = UserFilter def get_queryset(self): if self.request.user.groups.filter(name='admin').exists(): # Exclude the system specific AnonymousUser from results as deleting could cause issues return User.objects.exclude(username='AnonymousUser') else: return User.objects.filter(username=self.request.user.username) @detail_route(methods=['patch']) def change_password(self, request, pk=None): """ Reset password for a given user """ new_password = request.data.get('new_password', None) if new_password: user = self.get_object() if request.user.id == user.id or request.user.groups.filter(name='admin').exists(): user.set_password(new_password) user.save() return Response({'message': 'Password for {} changed'.format(user.username)}) raise ValidationError({'message': 'You do not have permissions to change this users password'}) raise ValidationError({'message': 'You must supply a new password'}) @list_route(permission_classes=[IsAuthenticated]) def me(self, request): serializer = SimpleUserSerializer(request.user) return Response(serializer.data) @list_route(permission_classes=[IsAuthenticated]) def staff(self, request): results = User.objects.filter(groups__name='staff') serializer = SimpleUserSerializer(results, many=True) return Response(serializer.data) @list_route(methods=['post'], permission_classes=[AllowAny]) def register(self, request): """ Register a user in the system. Access to all without authentication. Checks/creates a CRM account and address if possible. """ required_data = RegisterUserSerializer(data=request.data) serializer = UserSerializer(data=request.data) if required_data.is_valid(): # It's already been validated above but we still need # to re-validate for the serializer to work serializer.is_valid() instance = serializer.save() # Create and address and link to CRM request.data['user'] = instance.username address = AddressSerializer(data=request.data) if address.is_valid(): address.save() # Validate data for CRM if settings.ENABLE_CRM: crm_data = CreateCRMAccountSerializer(data=request.data) crm_data.is_valid() CRMCreateContact(request, crm_data.validated_data) return Response(serializer.data, status=201) else: return Response(required_data.errors, status=status.HTTP_400_BAD_REQUEST) @list_route(permission_classes=[AllowAny]) def exists(self, request): """ Check if a given email address exists as a user in the system """ email = request.query_params.get('email', None) if email: exists = User.objects.filter(email=email).exists() output = {'exists': exists} if exists: user = User.objects.get(email=email) output['username'] = user.username return Response(output) return Response({'message': 'An email address is required'}, status=400) @list_route(permission_classes=[AllowAny]) def get_reset_code(self, request): """ Get a reset code for an email address and email user """ email = request.query_params.get('email', None) if email: try: user = User.objects.get(email=email) except: raise ValidationError({'message': 'Email address not in system'}) else: try: exists = ResetCode.objects.get(account__email=email) except: pass else: exists.delete() reset_code = ResetCode(account=user) reset_code.save() if reset_code.send_email(): return Response({'message': 'Email sent'}) return Response({'message': 'Email failed to send'}, status=500) return Response({'message': 'Please provide an email address'}, status=400) @list_route(methods=['patch'], permission_classes=[AllowAny]) def reset_account(self, request): """ Reset a single user account using a generated code """ email = request.data.get('email', None) reset_code = request.data.get('code', None) new_password = request.data.get('new_password', None) if email and new_password and reset_code: try: reset_data = ResetCode.objects.get(code=reset_code, account__email=email) except: return Response({'message': 'Unable to find a reset for the account'}, status=400) else: reset_data.account.set_password(new_password) reset_data.account.save() reset_data.delete() return Response({'message': 'Account {} reset'.format( reset_data.account.username)}) raise ValidationError({'message': 'Please provide email, code and password data'}) class GroupViewSet(AuditTrailViewMixin, viewsets.ModelViewSet): queryset = Group.objects.all() serializer_class = GroupSerializer permission_classes = (IsInAdminGroupOrRO,) search_fields = ('name',) def get_queryset(self): if self.request.user.groups.filter(name='admin').exists(): return Group.objects.exclude(name='admin') else: return self.request.user.groups.all()
Python
0
@@ -3939,32 +3939,49 @@ @list_route( +methods=%5B'get'%5D, permission_class @@ -4042,38 +4042,32 @@ serializer = -Simple UserSerializer(r
2ef6fdf4bf3f3bf4257238c36835d697ba9307d3
Add one more test for gene normalization
indra/tests/test_gnbr.py
indra/tests/test_gnbr.py
import os from indra.sources.gnbr.processor import * import indra.sources.gnbr.api as api from indra.statements.validate import assert_valid_statements def test_standardize_agent(): agent = get_std_gene('xxx', '673') assert isinstance(agent[0], Agent) assert agent[0].name == 'BRAF' assert agent[0].db_refs.get('TEXT') == 'xxx' assert agent[0].db_refs.get('EGID') == '673' assert agent[0].db_refs.get('HGNC') == '1097' def test_process_gene_gene(): test_path1: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_gene_gene_part1_test.tsv') test_path2: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_gene_gene_part2_test.tsv') gp = api.process_gene_gene(test_path1, test_path2) assert len(gp.statements) != 0 assert isinstance(gp, GnbrProcessor) assert gp.first_type == 'gene' assert gp.second_type == 'gene' assert isinstance(gp.statements[0], Activation) assert isinstance(gp.statements[1], Activation) assert isinstance(gp.statements[2], IncreaseAmount) assert isinstance(gp.statements[3], IncreaseAmount) assert isinstance(gp.statements[4], Complex) assert_valid_statements(gp.statements) def test_process_chemical_gene(): test_path1: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_chemical_gene_part1_test.tsv') test_path2: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_chemical_gene_part2_test.tsv') gp = api.process_chemical_gene(test_path1, test_path2) assert len(gp.statements) != 0 assert isinstance(gp, GnbrProcessor) assert gp.first_type == 'chemical' assert gp.second_type == 'gene' assert isinstance(gp.statements[0], Activation) assert isinstance(gp.statements[1], Inhibition) assert isinstance(gp.statements[2], Complex) assert isinstance(gp.statements[3], DecreaseAmount) assert_valid_statements(gp.statements) def test_process_gene_disease(): test_path1: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_gene_disease_part1_test.tsv') test_path2: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_gene_disease_part2_test.tsv') gp = api.process_gene_disease(test_path1, test_path2) assert len(gp.statements) != 0 assert isinstance(gp, GnbrProcessor) assert gp.first_type == 'gene' assert gp.second_type == 'disease' assert isinstance(gp.statements[0], Inhibition) assert isinstance(gp.statements[1], Activation) assert_valid_statements(gp.statements) def test_process_chemical_disease(): test_path1: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_chemical_disease_part1_test.tsv') test_path2: str = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gnbr_chemical_disease_part2_test.tsv') gp = api.process_chemical_disease(test_path1, test_path2) assert len(gp.statements) != 0 assert isinstance(gp, GnbrProcessor) assert gp.first_type == 'chemical' assert gp.second_type == 'disease' assert isinstance(gp.statements[0], Inhibition) assert isinstance(gp.statements[1], Inhibition) assert isinstance(gp.statements[2], Inhibition) assert isinstance(gp.statements[3], Inhibition) assert isinstance(gp.statements[4], Inhibition) assert_valid_statements(gp.statements)
Python
0.000001
@@ -441,16 +441,360 @@ 1097'%0A%0A%0A +def test_multiple_genes():%0A agents = get_std_gene('Erk1/2', '5594;5595')%0A assert agents%5B0%5D.name == 'MAPK1'%0A assert agents%5B1%5D.name == 'MAPK3'%0A assert agents%5B0%5D.db_refs%5B'TEXT'%5D == 'Erk1/2'%0A assert agents%5B1%5D.db_refs%5B'TEXT'%5D == 'Erk1/2'%0A assert agents%5B0%5D.db_refs%5B'HGNC'%5D == '6871'%0A assert agents%5B1%5D.db_refs%5B'HGNC'%5D == '6877'%0A%0A%0A def test
99b2b3609e86568690c9be130e2ebad21a4fe846
Change qid_or_uuid -> qid and update Document2.DoesNotExist try/except
desktop/core/src/desktop/lib/botserver/views.py
desktop/core/src/desktop/lib/botserver/views.py
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import json from urllib.parse import urlsplit from pprint import pprint from desktop import conf from desktop.conf import ENABLE_GIST_PREVIEW from desktop.lib.django_util import login_notrequired, JsonResponse from desktop.lib.exceptions_renderable import PopupException from desktop.models import Document2, _get_gist_document from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.decorators.csrf import csrf_exempt LOG = logging.getLogger(__name__) SLACK_VERIFICATION_TOKEN = conf.SLACK.SLACK_VERIFICATION_TOKEN.get() SLACK_BOT_USER_TOKEN = conf.SLACK.SLACK_BOT_USER_TOKEN.get() slack_client = None if conf.SLACK.IS_ENABLED.get(): from slack_sdk import WebClient slack_client = WebClient(token=SLACK_BOT_USER_TOKEN) @login_notrequired @csrf_exempt def slack_events(request): try: slack_message = json.loads(request.body) if slack_message['token'] != SLACK_VERIFICATION_TOKEN: return HttpResponse(status=403) # challenge verification if slack_message['type'] == 'url_verification': response_dict = {"challenge": slack_message['challenge']} return JsonResponse(response_dict, status=200) if 'event' in slack_message: event_message = slack_message['event'] parse_events(event_message) except ValueError as err: raise PopupException(_("Response content is not valid JSON"), detail=err) return HttpResponse(status=200) def parse_events(event): """ Parses the event according to its 'type'. """ channel_id = event.get('channel') if event.get('type') == 'message': handle_on_message(channel_id, event.get('bot_id'), event.get('text'), event.get('user')) if event.get('type') == 'link_shared': handle_on_link_shared(channel_id, event.get('message_ts'), event.get('links')) def handle_on_message(channel_id, bot_id, text, user_id): # Ignore bot's own message since that will cause an infinite loop of messages if we respond. if bot_id: return HttpResponse(status=200) if slack_client: if text and 'hello hue' in text.lower(): response = say_hi_user(channel_id, user_id) if not response['ok']: raise PopupException(_("Error posting message"), detail=response["error"]) def handle_on_link_shared(channel_id, message_ts, links): for item in links: path = urlsplit(item['url'])[2] id_type, qid_or_uuid = urlsplit(item['url'])[3].split('=') try: if path == '/hue/editor' and id_type == 'editor': doc = Document2.objects.get(id=qid_or_uuid) elif path == '/hue/gist' and id_type == 'uuid' and ENABLE_GIST_PREVIEW.get(): doc = _get_gist_document(uuid=qid_or_uuid) else: raise PopupException(_("Cannot unfurl link")) doc_data = json.loads(doc.data) statement = doc_data['snippets'][0]['statement_raw'] if id_type == 'editor' else doc_data['statement_raw'] dialect = doc_data['dialect'].capitalize() if id_type == 'editor' else doc.extra.capitalize() created_by = doc.owner.get_full_name() or doc.owner.username except Document2.DoesNotExist: msg = "Document with {key}={value} does not exist".format(key='uuid' if id_type == 'uuid' else 'id', value=qid_or_uuid) raise PopupException(_(msg)) payload = _make_unfurl_payload(item['url'], statement, dialect, created_by) response = slack_client.chat_unfurl(channel=channel_id, ts=message_ts, unfurls=payload) if not response['ok']: raise PopupException(_("Cannot unfurl link"), detail=response["error"]) def _make_unfurl_payload(url, statement, dialect, created_by): payload = { url: { "color": "#025BA6", "blocks": [ { "type": "section", "text": { "type": "mrkdwn", "text": "\n*<{}|Hue - SQL Editor>*".format(url) } }, { "type": "section", "text": { "type": "mrkdwn", "text": statement if len(statement) < 150 else (statement[:150] + '...') } }, { "type": "section", "fields": [ { "type": "mrkdwn", "text": "*Dialect:*\n{}".format(dialect) }, { "type": "mrkdwn", "text": "*Created By:*\n{}".format(created_by) } ] } ] } } return payload def say_hi_user(channel_id, user_id): """ Sends Hi<user_id> message in a specific channel. """ bot_message = 'Hi <@{}> :wave:'.format(user_id) return slack_client.api_call(api_method='chat.postMessage', json={'channel': channel_id, 'text': bot_message})
Python
0
@@ -3192,24 +3192,16 @@ ype, qid -_or_uuid = urlsp @@ -3335,32 +3335,24 @@ s.get(id=qid -_or_uuid )%0A elif @@ -3462,32 +3462,24 @@ ent(uuid=qid -_or_uuid )%0A else @@ -3534,19 +3534,205 @@ link%22))%0A -%0A + except Document2.DoesNotExist:%0A msg = %22Document with %7Bkey%7D=%7Bvalue%7D does not exist%22.format(key='uuid' if id_type == 'uuid' else 'id', value=qid)%0A raise PopupException(_(msg))%0A%0A doc_ @@ -3763,18 +3763,16 @@ ta)%0A - statemen @@ -3874,18 +3874,16 @@ w'%5D%0A - dialect @@ -3968,18 +3968,16 @@ alize()%0A - crea @@ -4041,200 +4041,8 @@ -except Document2.DoesNotExist:%0A msg = %22Document with %7Bkey%7D=%7Bvalue%7D does not exist%22.format(key='uuid' if id_type == 'uuid' else 'id', value=qid_or_uuid)%0A raise PopupException(_(msg))%0A %0A
3c54116f4c31a7e7392bdd7fd8fdc6773621d9f6
fix bad fix of sync contrib-base branch :-P (#9088)
Utils/contribution_sync/sync_contrib_base.py
Utils/contribution_sync/sync_contrib_base.py
#!/usr/bin/env python3 import os import sys import urllib3 from github import Github, enable_console_debug_logging from github.Repository import Repository from typing import List urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) def get_master_commit_sha(repo: Repository) -> str: # noqa: E999 '''Return the sha commit of the master branch Args: repo (Repository): The repository whose master branch will be queried Returns: (str): The commit sha of the master branch's HEAD ''' branch_data = repo.get_branch('master') commit_sha = branch_data.commit.sha return commit_sha def get_branch_names_with_contrib(repo: Repository) -> List[str]: # noqa: E999 '''Return the list of branches that have the prefix of "contrib/" and that are base branches of open PRs Args: repo (Repository): The repository whose branches will be searched and listed Returns: (List[str]): List of branch names that have the "contrib/" prefix and are base branches of open PRs ''' branch_names = [] for branch in repo.get_branches(): if branch.name.startswith('contrib/'): prs_with_branch_as_base = repo.get_pulls(state='OPEN', base=branch.name) if prs_with_branch_as_base.totalCount >= 1: prs_with_branch_as_head = repo.get_pulls(state='OPEN', head=branch.name) if prs_with_branch_as_head.totalCount == 0: branch_names.append(branch.name) return branch_names def main(): debug_mode = len(sys.argv) >= 2 and 'debug' in sys.argv[1].casefold if debug_mode: enable_console_debug_logging() gh = Github(os.getenv('CONTENTBOT_GH_ADMIN_TOKEN'), verify=False) organization = 'demisto' repo = 'content' content_repo = gh.get_repo(f'{organization}/{repo}') master_sha = get_master_commit_sha(content_repo) contrib_base_branches = get_branch_names_with_contrib(content_repo) for branch_name in contrib_base_branches: git_ref = content_repo.get_git_ref(f'heads/{branch_name}') print(f'Updating branch "{branch_name}" to sha "{master_sha}"') git_ref.edit(master_sha, force=True) if debug_mode: print(f'{contrib_base_branches=}') if __name__ == "__main__": main()
Python
0
@@ -1076,16 +1076,104 @@ es = %5B%5D%0A + open_prs_head_refs = %7Bopen_pr.head.ref for open_pr in repo.get_pulls(state='OPEN')%7D%0A for @@ -1393,163 +1393,52 @@ %3E= 1 -:%0A prs_with_branch_as_head = repo.get_pulls(state='OPEN', head=branch.name)%0A if prs_with_branch_as_head.totalCount == 0:%0A + and branch.name not in open_prs_head_refs:%0A @@ -1591,16 +1591,18 @@ casefold +() %0A if
48e4abf450b63eb61bef72fa9dcd0217641f3777
fix for review comments
acos_client/v30/nat.py
acos_client/v30/nat.py
# Copyright 2015, A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import unicode_literals from acos_client import errors as acos_errors from acos_client.v30 import base class Nat(base.BaseV30): @property def pool(self): return self.Pool(self.client) class Pool(base.BaseV30): url_prefix = "/ip/nat/pool/" def _set(self, name, start_ip, end_ip, mask, ip_rr=None, vrid=None, gateway=None): params = { "pool": self.minimal_dict( { 'pool-name': name, 'start-address': start_ip, 'end-address': end_ip, 'netmask': mask, } ), } if ip_rr: params["pool"]["ip-rr"] = ip_rr if vrid: params["pool"]["vrid"] = vrid if gateway: params["pool"]['gateway'] = gateway return params def get(self, name): return self._get(self.url_prefix + name) def try_get(self, name): try: return self.get(self, name) except acos_errors.NotFound: return None def exists(self, name): try: self.get(name) return True except acos_errors.NotFound: return False def all(self): return self._get(self.url_prefix) def create(self, name, start_ip, end_ip, mask, ip_rr=None, vrid=None, gateway=None, max_retries=None, timeout=None, **kwargs): if self.exists(name): raise acos_errors.Exists params = self._set(name, start_ip, end_ip, mask, ip_rr=ip_rr, vrid=vrid, gateway=gateway) return self._post(self.url_prefix, params, max_retries=max_retries, timeout=timeout, axapi=kwargs) def delete(self, name, **kwargs): self._delete(self.url_prefix + name) def stats(self, name='', max_retries=None, timeout=None, **kwargs): return self._get(self.url_prefix + name + '/stats', max_retries=max_retries, timeout=timeout, **kwargs) def all_stats(self, **kwargs): return self.stats()
Python
0
@@ -1746,30 +1746,24 @@ rn self.get( -self, name)%0A
8fd28c81ae1e590e8ec9cb04097b36c4e088101d
Improve spaces replacement with slugify
core/internals.py
core/internals.py
import os import sys from core import const log = const.log try: from slugify import SLUG_OK, slugify except ImportError: log.error('Oops! `unicode-slugify` was not found.') log.info('Please remove any other slugify library and install `unicode-slugify`') sys.exit(5) formats = { 0 : 'track_name', 1 : 'artist', 2 : 'album', 3 : 'album_artist', 4 : 'genre', 5 : 'disc_number', 6 : 'duration', 7 : 'year', 8 : 'original_date', 9 : 'track_number', 10 : 'total_tracks', 11 : 'isrc' } def input_link(links): """ Let the user input a choice. """ while True: try: log.info('Choose your number:') the_chosen_one = int(input('> ')) if 1 <= the_chosen_one <= len(links): return links[the_chosen_one - 1] elif the_chosen_one == 0: return None else: log.warning('Choose a valid number!') except ValueError: log.warning('Choose a valid number!') def trim_song(text_file): """ Remove the first song from file. """ with open(text_file, 'r') as file_in: data = file_in.read().splitlines(True) with open(text_file, 'w') as file_out: file_out.writelines(data[1:]) def is_spotify(raw_song): """ Check if the input song is a Spotify link. """ status = len(raw_song) == 22 and raw_song.replace(" ", "%20") == raw_song status = status or raw_song.find('spotify') > -1 return status def is_youtube(raw_song): """ Check if the input song is a YouTube link. """ status = len(raw_song) == 11 and raw_song.replace(" ", "%20") == raw_song status = status and not raw_song.lower() == raw_song status = status or 'youtube.com/watch?v=' in raw_song return status def generate_songname(file_format, tags): """ Generate a string of the format '[artist] - [song]' for the given spotify song. """ format_tags = dict(formats) format_tags[0] = tags['name'] format_tags[1] = tags['artists'][0]['name'] format_tags[2] = tags['album']['name'] format_tags[3] = tags['artists'][0]['name'] format_tags[4] = tags['genre'] format_tags[5] = tags['disc_number'] format_tags[6] = tags['duration'] format_tags[7] = tags['year'] format_tags[8] = tags['release_date'] format_tags[9] = tags['track_number'] format_tags[10] = tags['total_tracks'] format_tags[11] = tags['external_ids']['isrc'] for x in formats: file_format = file_format.replace('{' + formats[x] + '}', str(format_tags[x])) if const.args.no_spaces: file_format = file_format.replace(' ', '_') return file_format def sanitize_title(title): """ Generate filename of the song to be downloaded. """ if const.args.no_spaces: title = title.replace(' ', '_') # slugify removes any special characters title = slugify(title, ok='-_()[]{}\/', lower=False, spaces=(not const.args.no_spaces)) return title def filter_path(path): if not os.path.exists(path): os.makedirs(path) for temp in os.listdir(path): if temp.endswith('.temp'): os.remove(os.path.join(path, temp)) def videotime_from_seconds(time): if time < 60: return str(time) if time < 3600: return '{0}:{1:02}'.format(time//60, time % 60) return '{0}:{1:02}:{2:02}'.format((time//60)//60, (time//60) % 60, time % 60)
Python
0.000011
@@ -3104,62 +3104,20 @@ lse, -%0A spaces=(not const.args.no_spaces) + spaces=True )%0A
315c3bc1e2961dc9de5d77fed5d232f106e59968
improve a couple of comments
users.py
users.py
''' working with gitlab's users ''' from crud import Crud from utils import filter_dict import random class Users (Crud): def __init__(self): Crud.__init__(self, 'users', lambda x: x['username']) # generate random password rand_pass = lambda s, l = 10: ''.join(chr(random.randint(64, 122)) for x in xrange(l)) ''' create a new user ''' def add(self, sysNam, login, fullName, email, **opts): return Crud.add(self, sysNam, dict( [('name', fullName), ('username', login), ('email', email)] + ('password' in opts and [] or [('password', self.rand_pass())]) + opts.items())) # for saving users' cache between the calls of get_user() _usrs = Users() ''' if a user represented by usrDict is found in the system, then return its id else - create a new one and return its id ''' def get_user(sysNam, usrDict): try: usr = _usrs.by_name(sysNam, usrDict['username']) except KeyError: # add the 1st identity to a users' dict dictWithUuid = filter_dict(dict(usrDict.items() + (usrDict['identities'] and usrDict['identities'][0].items() or [])), 'admin', 'bio', 'can_create_group', 'extern_uid', 'linkedin', 'password', 'projects_limit', 'provider', 'skype', 'twitter', 'website_url') usr = _usrs.add(sysNam, usrDict['username'], usrDict['name'], usrDict['email'], confirm = False, **dictWithUuid) # rebuilding of cache after adding a new user is needed _usrs.clr_cache(sysNam) return usr['id']
Python
0.001666
@@ -599,11 +599,12 @@ for -sav +reus ing @@ -931,19 +931,105 @@ o a -users' dict +top level of the users' dict%0A%09%09# ('cause POST API call to /users works with only one extern_uuid) %0A%09%09d @@ -1448,14 +1448,12 @@ uild -ing of + the cac @@ -1482,18 +1482,8 @@ user - is needed %0A%09%09_
0364831c22dbc73e523aeadcb9c0016e207f8447
Remove version-added on name field.
lib/ansible/modules/cloud/openstack/os_networks_facts.py
lib/ansible/modules/cloud/openstack/os_networks_facts.py
#!/usr/bin/python # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- module: os_networks_facts short_description: Retrieve facts about one or more OpenStack networks. version_added: "2.0" author: "Davide Agnello (@dagnello)" description: - Retrieve facts about one or more networks from OpenStack. requirements: - "python >= 2.6" - "shade" options: name: description: - Name or ID of the Network required: false version_added: "2.2" filters: description: - A dictionary of meta data to use for further filtering. Elements of this dictionary may be additional dictionaries. required: false extends_documentation_fragment: openstack ''' EXAMPLES = ''' # Gather facts about previously created networks - os_networks_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject - debug: var=openstack_networks # Gather facts about a previously created network by name - os_networks_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject name: network1 - debug: var=openstack_networks # Gather facts about a previously created network with filter (note: name and filters parameters are Not mutually exclusive) - os_networks_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject filters: tenant_id: 55e2ce24b2a245b09f181bf025724cbe subnets: - 057d4bdf-6d4d-4728-bb0f-5ac45a6f7400 - 443d4dc0-91d4-4998-b21c-357d10433483 - debug: var=openstack_networks ''' RETURN = ''' openstack_networks: description: has all the openstack facts about the networks returned: always, but can be null type: complex contains: id: description: Unique UUID. returned: success type: string name: description: Name given to the network. returned: success type: string status: description: Network status. returned: success type: string subnets: description: Subnet(s) included in this network. returned: success type: list of strings tenant_id: description: Tenant id associated with this network. returned: success type: string shared: description: Network shared flag. returned: success type: boolean ''' def main(): argument_spec = openstack_full_argument_spec( name=dict(required=False, default=None), filters=dict(required=False, type='dict', default=None) ) module = AnsibleModule(argument_spec) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') try: cloud = shade.openstack_cloud(**module.params) networks = cloud.search_networks(module.params['name'], module.params['filters']) module.exit_json(changed=False, ansible_facts=dict( openstack_networks=networks)) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e)) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
Python
0
@@ -1203,34 +1203,8 @@ lse%0A - version_added: %222.2%22%0A f
c63681e6fc3d5e336a13698a473f4aaf61fdb9b1
Remove width and x arguments until fixed positioning.
center_dmenu.py
center_dmenu.py
#!/usr/bin/env python2 # Copyright 2013 Ryan McGowan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from Xlib import display import sys from os import system def get_dimensions(): current_display = display.Display() current_screen = current_display.screen() return (current_screen['width_in_pixels'], current_screen['height_in_pixels'], current_screen['width_in_mms'], current_screen['height_in_mms']) def parse_dmenu_args(args): x_width, x_height, mms_width, mms_height = get_dimensions() num_args = len(args) # Do some math to determine a multiplier to go from points to pixels. pixels_per_point = x_height / (mms_height / 25.4) / 72 # 20% padding means only 80% of the screen is used by dmenu with 10% # padding on each side. padding = .24 typeface = 'Inconsolata' # Font size and lineheight are in points font_size = 10 line_height = 24 # Get arguments from the command line. if num_args > 1: padding = float(args[1]) if num_args > 2: line_height = int(args[2]) if num_args > 3: font_size = int(args[3]) if num_args > 4: typeface = args[4] # Set some default values for dmenu args dmenu_run_args = { 'x': int(round(padding * x_width / 2.0, 0)), 'height': int(round(line_height * pixels_per_point, 0)), 'extra_args': "-fn '{0}:size={1}'".format(typeface, font_size) } # Determine propper height and width for input into dmenu dmenu_run_args['width'] = x_width - (2 * dmenu_run_args['x']) dmenu_run_args['y'] = (x_height - dmenu_run_args['height']) / 2 return dmenu_run_args def main(args): dmenu_run_args = parse_dmenu_args(args) return system(("dmenu_run {extra_args} -w {width} -x {x} -y {y}" " -h {height}").format(**dmenu_run_args)) if __name__ == '__main__': sys.exit(main(sys.argv))
Python
0
@@ -2283,26 +2283,8 @@ rgs%7D - -w %7Bwidth%7D -x %7Bx%7D -y
999ae0c40a7a101672c735a552a7e79fcc6e9f96
Add desk_update url to admin urls
admin/common_auth/urls.py
admin/common_auth/urls.py
from __future__ import absolute_import from django.conf.urls import url from django.core.urlresolvers import reverse_lazy from django.contrib.auth.views import password_change, password_change_done from admin.common_auth import views urlpatterns = [ url(r'^login/?$', views.LoginView.as_view(), name='login'), url(r'^logout/$', views.logout_user, name='logout'), url(r'^register/$', views.RegisterUser.as_view(), name='register'), url(r'^password_change/$', password_change, {'post_change_redirect': reverse_lazy('auth:password_change_done')}, name='password_change'), url(r'^password_change/done/$', password_change_done, {'template_name': 'password_change_done.html'}, name='password_change_done'), url(r'^settings/desk/$', views.DeskUserFormView.as_view(), name='desk'), ]
Python
0.000001
@@ -792,16 +792,22 @@ DeskUser +Create FormView @@ -832,10 +832,107 @@ desk'),%0A + url(r'%5Esettings/desk/update/$', views.DeskUserUpdateFormView.as_view(), name='desk_update'),%0A %5D%0A
bce0c2853e6c7901280c9989893e6230ec29dbe4
Allow util.retry to return result of task coroutine
aiorchestra/core/utils.py
aiorchestra/core/utils.py
# Author: Denys Makogon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import asyncio class Singleton(type): _instance = None def __call__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(Singleton, cls).__call__(*args, **kwargs) return cls._instance async def retry(fn, args=None, kwargs=None, exceptions=None, task_retries=1, task_retry_interval=10): args = args or [] kwargs = kwargs or {} while task_retries > 0: try: result = await fn(*args, **kwargs) if result: return except Exception as e: if not exceptions or not isinstance(e, exceptions): raise e if task_retry_interval: await asyncio.sleep(task_retry_interval) task_retries -= 1 raise Exception("exiting retry loop") def operation(action): async def wraps(*args, **kwargs): source = list(args)[0] source.context.logger.debug( '[{0}] - staring task "{1}" execution.' .format(source.name, action.__name__)) try: await action(*args, **kwargs) source.context.logger.debug( '[{0}] - ending task "{1}" execution' .format(source.name, action.__name__)) except Exception as ex: source.context.logger.error( '[{0}] - error during task "{1}" execution. ' 'Reason: {2}.' .format(source.name, action.__name__, str(ex))) raise ex return wraps
Python
0.000499
@@ -1134,16 +1134,23 @@ return + result %0A
6b90e0e0131d2bf0f0e5efb472b1c1af8abab5b7
fix PEP8 error
aiortc/rtcicetransport.py
aiortc/rtcicetransport.py
import asyncio import attr from aioice import Candidate, Connection from pyee import EventEmitter @attr.s class RTCIceCandidate: component = attr.ib() foundation = attr.ib() ip = attr.ib() port = attr.ib() priority = attr.ib() protocol = attr.ib() type = attr.ib() sdpMLineIndex = attr.ib(default=None) tcpType = attr.ib(default=None) def candidate_from_aioice(x): return RTCIceCandidate( component=x.component, foundation=x.foundation, ip=x.host, port=x.port, priority=x.priority, protocol=x.transport, tcpType=x.tcptype, type=x.type) def candidate_to_aioice(x): return Candidate( component=x.component, foundation=x.foundation, host=x.ip, port=x.port, priority=x.priority, transport=x.protocol, tcptype=x.tcpType, type=x.type) class RTCIceGatherer(EventEmitter): """ The :class:`RTCIceGatherer` interface gathers local host, server reflexive and relay candidates, as well as enabling the retrieval of local Interactive Connectivity Establishment (ICE) parameters which can be exchanged in signaling. """ def __init__(self): super().__init__() self._connection = Connection(ice_controlling=False, stun_server=('stun.l.google.com', 19302)) self.__state = 'new' @property def state(self): """ The current state of the ICE gatherer. """ return self.__state async def gather(self): """ Gather ICE candidates. """ if self.__state == 'new': self.__setState('gathering') await self._connection.gather_candidates() self.__setState('completed') def getLocalCandidates(self): """ Retrieve the list of valid local candidates associated with the ICE gatherer. """ return [candidate_from_aioice(x) for x in self._connection.local_candidates] def getLocalParameters(self): """ Retrieve the ICE parameters of the ICE gatherer. :rtype: RTCIceParameters """ return RTCIceParameters( usernameFragment=self._connection.local_username, password=self._connection.local_password) def __setState(self, state): self.__state = state self.emit('statechange') @attr.s class RTCIceParameters: """ The :class:`RTCIceParameters` dictionary includes the ICE username fragment and password and other ICE-related parameters. """ usernameFragment = attr.ib(default=None) "ICE username fragment." password = attr.ib(default=None) "ICE password." class RTCIceTransport(EventEmitter): """ The :class:`RTCIceTransport` interface allows an application access to information about the Interactive Connectivity Establishment (ICE) transport over which packets are sent and received. :param: gatherer: An :class:`RTCIceGatherer`. """ def __init__(self, gatherer): super().__init__() self.__iceGatherer = gatherer self.__state = 'new' @property def iceGatherer(self): """ The ICE gatherer passed in the constructor. """ return self.__iceGatherer @property def role(self): """ The current role of the ICE transport: `'controlling'` or `'controlled'`. """ if self._connection.ice_controlling: return 'controlling' else: return 'controlled' @property def state(self): """ The current state of the ICE transport. """ return self.__state def addRemoteCandidate(self, candidate): """ Add a remote candidate. """ self._connection.remote_candidates += [candidate_to_aioice(candidate)] def getRemoteCandidates(self): """ Retrieve the list of candidates associated with the remote :class:`RTCIceTransport`. """ return [candidate_from_aioice(x) for x in self._connection.remote_candidates] def setRemoteCandidates(self, remoteCandidates): """ Set the list of candidates associated with the remote :class:`RTCIceTransport`. """ self._connection.remote_candidates = [candidate_to_aioice(x) for x in remoteCandidates] async def start(self, remoteParameters): """ Initiate connectivity checks. :param: remoteParameters: The :class:`RTCIceParameters` associated with the remote :class:`RTCIceTransport`. """ self.__setState('checking') self._connection.remote_username = remoteParameters.usernameFragment self._connection.remote_password = remoteParameters.password await self._connection.connect() self.__setState('completed') async def stop(self): """ Irreversibly stop the :class:`RTCIceTransport`. """ if self.state != 'closed': self.__setState('closed') await self._connection.close() @property def _connection(self): return self.iceGatherer._connection def __setState(self, state): self.__state = state self.emit('statechange')
Python
0.000002
@@ -1,20 +1,4 @@ -import asyncio%0A%0A impo
32f185e31ba5ccfd589c2db94c7d213b78cccd2a
Remove __init__ redefinition for module
alignak/objects/module.py
alignak/objects/module.py
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2016: Alignak team, see AUTHORS.txt file for contributors # # This file is part of Alignak. # # Alignak is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Alignak is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Alignak. If not, see <http://www.gnu.org/licenses/>. # # # This file incorporates work covered by the following copyright and # permission notice: # # Copyright (C) 2009-2014: # Hartmut Goebel, h.goebel@goebel-consult.de # Guillaume Bour, guillaume@bour.cc # aviau, alexandre.viau@savoirfairelinux.com # Nicolas Dupeux, nicolas@dupeux.net # Grégory Starck, g.starck@gmail.com # Gerhard Lausser, gerhard.lausser@consol.de # Sebastien Coavoux, s.coavoux@free.fr # Jean Gabes, naparuba@gmail.com # Romain Forlot, rforlot@yahoo.com # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. """ This module provide Module and Modules classes used to manage internal and external modules for each daemon """ import logging from alignak.objects.item import Item, Items from alignak.property import StringProp, ListProp from alignak.util import strip_and_uniq logger = logging.getLogger(__name__) # pylint: disable=C0103 class Module(Item): """ Class to manage a module """ my_type = 'module' properties = Item.properties.copy() properties.update({ 'python_name': StringProp(), 'module_alias': StringProp(), 'module_types': ListProp(default=[''], split_on_coma=True), 'modules': ListProp(default=[''], split_on_coma=True) }) macros = {} def __init__(self, params=None, parsing=True): """ This function is useful because of the unit tests suite. Without this module initialisation some tests are broken :param params: :param parsing: """ super(Module, self).__init__(params, parsing=parsing) # For debugging purpose only (nice name) def get_name(self): """ Get name of module :return: Name of module :rtype: str """ return self.module_alias def get_types(self): """ Get name of module :return: Name of module :rtype: str """ return self.module_types def is_a_module(self, module_type): """ Is the module of the required type? :param module_type: module type to check :type: str :return: True / False """ return module_type in self.module_types def __repr__(self): return '<module module=%s alias=%s />' % (self.python_name, self.module_alias) __str__ = __repr__ class Modules(Items): """ Class to manage list of modules Modules is used to group all Module """ name_property = "module_alias" inner_class = Module def linkify(self): """ Link modules :return: None """ self.linkify_s_by_plug() def linkify_s_by_plug(self, modules=None): """ Link modules :return: None """ for module in self: new_modules = [] mods = strip_and_uniq(module.modules) for plug_name in mods: plug_name = plug_name.strip() # don't read void names if plug_name == '': continue # We are the modules, we search them :) plug = self.find_by_name(plug_name) if plug is not None: new_modules.append(plug) else: err = "[module] unknown %s module from %s" % (plug_name, module.get_name()) module.configuration_errors.append(err) module.modules = new_modules def explode(self): """ Explode but not explode because this function is empty :return: None """ pass
Python
0.000086
@@ -2693,323 +2693,8 @@ %7B%7D%0A%0A - def __init__(self, params=None, parsing=True):%0A %22%22%22%0A This function is useful because of the unit tests suite. Without this module initialisation%0A some tests are broken%0A :param params:%0A :param parsing:%0A %22%22%22%0A super(Module, self).__init__(params, parsing=parsing)%0A%0A
274f05a3178109bbcb119be802e0f30c161090d4
Remove pointless tearDown
allegedb/allegedb/test.py
allegedb/allegedb/test.py
import unittest from copy import deepcopy import allegedb testkvs = [0, 1, 10, 10**10, 10**10**4, 'spam', 'eggs', 'ham', '💧', '🔑', '𐦖',('spam', 'eggs', 'ham')] testvs = [['spam', 'eggs', 'ham'], {'foo': 'bar', 0: 1, '💧': '🔑'}] testdata = [] for k in testkvs: for v in testkvs: testdata.append((k, v)) for v in testvs: testdata.append((k, v)) testdata.append(('lol', deepcopy(testdata))) class AllegedTest(unittest.TestCase): def setUp(self): self.engine = allegedb.ORM('sqlite:///:memory:') self.graphmakers = (self.engine.new_graph, self.engine.new_digraph, self.engine.new_multigraph, self.engine.new_multidigraph) def tearDown(self): self.engine.close() class GraphTest(AllegedTest): def setUp(self): super().setUp() g = self.engine.new_graph('test') g.add_node(0) self.assertIn(0, g) g.add_node(1) self.assertIn(1, g) g.add_edge(0, 1) self.assertIn(1, g.adj[0]) self.assertIn(0, g.adj[1]) # TODO: test adding edges whose nodes do not yet exist self.engine.turn = 1 self.assertIn(0, g) self.assertIn(1, g) self.engine.branch = 'no_edge' self.assertIn(0, g) self.assertIn(1, g) self.assertIn(1, g.adj[0]) self.assertIn(0, g.adj[1]) g.remove_edge(0, 1) self.assertIn(0, g) self.assertIn(1, g) self.assertNotIn(0, g.adj[1]) self.assertNotIn(1, g.adj[0]) self.engine.branch = 'triangle' g.add_node(2) self.assertIn(2, g) g.add_edge(0, 1) g.add_edge(1, 2) g.add_edge(2, 0) self.engine.branch = 'square' self.engine.turn = 2 self.assertIn(2, g) self.assertIn(2, list(g.node.keys())) g.remove_edge(2, 0) g.add_node(3) g.add_edge(2, 3) g.add_edge(3, 0) self.engine.branch = 'nothing' g.remove_nodes_from((0, 1, 2, 3)) self.engine.branch = 'trunk' self.engine.turn = 0 class BranchLineageTest(GraphTest): def runTest(self): """Create some branches of history and check that allegedb remembers where each came from and what happened in each. """ self.assertTrue(self.engine.is_parent_of('trunk', 'no_edge')) self.assertTrue(self.engine.is_parent_of('trunk', 'triangle')) self.assertTrue(self.engine.is_parent_of('trunk', 'nothing')) self.assertTrue(self.engine.is_parent_of('no_edge', 'triangle')) self.assertTrue(self.engine.is_parent_of('square', 'nothing')) self.assertFalse(self.engine.is_parent_of('nothing', 'trunk')) self.assertFalse(self.engine.is_parent_of('triangle', 'no_edge')) g = self.engine.graph['test'] self.assertIn(0, g.node) self.assertIn(1, g.node) self.assertIn(0, g.edge) self.assertIn(1, g.edge[0]) self.engine.turn = 0 def badjump(): self.engine.branch = 'no_edge' self.assertRaises(ValueError, badjump) self.engine.turn = 2 self.engine.branch = 'no_edge' self.assertIn(0, g) self.assertIn(0, list(g.node.keys())) self.assertNotIn(1, g.edge[0]) self.assertRaises(KeyError, lambda: g.edge[0][1]) self.engine.branch = 'triangle' self.assertIn(2, g.node) for orig in (0, 1, 2): for dest in (0, 1, 2): if orig == dest: continue self.assertIn(orig, g.edge) self.assertIn(dest, g.edge[orig]) self.engine.branch = 'square' self.assertNotIn(0, g.edge[2]) self.assertRaises(KeyError, lambda: g.edge[2][0]) self.engine.turn = 2 self.assertIn(3, g.node) self.assertIn(1, g.edge[0]) self.assertIn(2, g.edge[1]) self.assertIn(3, g.edge[2]) self.assertIn(0, g.edge[3]) self.engine.branch = 'nothing' for node in (0, 1, 2): self.assertNotIn(node, g.node) self.assertNotIn(node, g.edge) self.engine.branch = 'trunk' self.engine.turn = 0 self.assertIn(0, g.node) self.assertIn(1, g.node) self.assertIn(0, g.edge) self.assertIn(1, g.edge[0]) class StorageTest(AllegedTest): def runTest(self): """Test that all the graph types can store and retrieve key-value pairs for the graph as a whole, for nodes, and for edges. """ for graphmaker in self.graphmakers: g = graphmaker('testgraph') g.add_node(0) g.add_node(1) g.add_edge(0, 1) n = g.node[0] e = g.edge[0][1] if isinstance(e, allegedb.graph.MultiEdges): e = e[0] for (k, v) in testdata: g.graph[k] = v self.assertIn(k, g.graph) self.assertEqual(g.graph[k], v) del g.graph[k] self.assertNotIn(k, g.graph) n[k] = v self.assertIn(k, n) self.assertEqual(n[k], v) del n[k] self.assertNotIn(k, n) e[k] = v self.assertIn(k, e) self.assertEqual(e[k], v) del e[k] self.assertNotIn(k, e) self.engine.del_graph('testgraph') class CompiledQueriesTest(AllegedTest): def runTest(self): """Make sure that the queries generated in SQLAlchemy are the same as those precompiled into SQLite. """ from allegedb.alchemy import Alchemist self.assertTrue(hasattr(self.engine.query, 'alchemist')) self.assertTrue(isinstance(self.engine.query.alchemist, Alchemist)) from json import load with open(self.engine.query.json_path + '/sqlite.json', 'r') as jsonfile: precompiled = load(jsonfile) self.assertEqual( precompiled.keys(), self.engine.query.alchemist.sql.keys() ) for (k, query) in precompiled.items(): self.assertEqual( query, str( self.engine.query.alchemist.sql[k] ) ) if __name__ == '__main__': unittest.main()
Python
0.000098
@@ -664,61 +664,8 @@ h)%0A%0A - def tearDown(self):%0A self.engine.close()%0A%0A %0Acla
160987b75c0fd31f9385d0cc4a66b43b89748517
Update ephemeral_dataproc_spark_dag.py
examples/cloud-composer-examples/composer_http_post_example/ephemeral_dataproc_spark_dag.py
examples/cloud-composer-examples/composer_http_post_example/ephemeral_dataproc_spark_dag.py
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime, timedelta from airflow import DAG from airflow.contrib.operators.dataproc_operator import DataprocClusterCreateOperator, \ DataProcPySparkOperator, DataprocClusterDeleteOperator from airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator from airflow.operators import BashOperator, PythonOperator from airflow.models import Variable from airflow.utils.trigger_rule import TriggerRule ################################################################## # This file defines the DAG for the logic pictured below. # ################################################################## # # # create_cluster # # | # # V # # submit_pyspark....... # # | . # # / \ V # # / \ move_failed_files # # / \ ^ # # | | . # # V V . # # delete_cluster bq_load..... # # | # # V # # delete_transformed_files # # # # (Note: Dotted lines indicate conditional trigger rule on # # failure of the up stream tasks. In this case the files in the # # raw-{timestamp}/ GCS path will be moved to a failed-{timestamp}# # path.) # ################################################################## # These are stored as a Variables in our Airflow Environment. BUCKET = Variable.get('gcs_bucket') # GCS bucket with our data. OUTPUT_TABLE = Variable.get('bq_output_table') # BigQuery table to which results will be written # Path to python script that does data manipulation PYSPARK_JOB = 'gs://' + BUCKET + '/spark-jobs/spark_avg_speed.py' # Airflow parameters, see https://airflow.incubator.apache.org/code.html DEFAULT_DAG_ARGS = { 'owner': 'airflow', # The owner of the task. # Task instance should not rely on the previous task's schedule to succeed. 'depends_on_past': False, # We use this in combination with schedule_interval=None to only trigger the DAG with a # POST to the REST API. # Alternatively, we could set this to yesterday and the dag will be triggered upon upload to the # dag folder. 'start_date': datetime.utcnow(), 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, # Retry once before failing the task. 'retry_delay': timedelta(minutes=5), # Time between retries. 'project_id': Variable.get('gcp_project'), # Cloud Composer project ID. # We only want the DAG to run when we POST to the api. # Alternatively, this could be set to '@daily' to run the job once a day. # more options at https://airflow.apache.org/scheduler.html#dag-runs 'schedule_interval': None } # Create Directed Acyclic Graph for Airflow with DAG('average-speed', default_args=DEFAULT_DAG_ARGS) as dag: # Here we are using dag as context. # Create the Cloud Dataproc cluster. # Note: this operator will be flagged a success if the cluster by this name already exists. create_cluster = DataprocClusterCreateOperator( task_id='create_dataproc_cluster', # ds_nodash is an airflow macro for "[Execution] Date string no dashes" # in YYYYMMDD format. See docs https://airflow.apache.org/code.html?highlight=macros#macros cluster_name='ephemeral-spark-cluster-{{ ds_nodash }}', num_workers=2, num_preemptible_workers=2, zone=Variable.get('gce_zone') ) # Submit the PySpark job. submit_pyspark = DataProcPySparkOperator( task_id='run_dataproc_pyspark', main=PYSPARK_JOB, # Obviously needs to match the name of cluster created in the prior Operator. cluster_name='ephemeral-spark-cluster-{{ ds_nodash }}', # Let's template our arguments for the pyspark job from the POST payload. arguments=["--gcs_path_raw={{ dag_run.conf['raw_path'] }}", "--gcs_path_transformed=gs://" + BUCKET + "/{{ dag_run.conf['transformed_path'] }}"] ) # Load the transformed files to a BigQuery table. bq_load = GoogleCloudStorageToBigQueryOperator( task_id='GCS_to_BigQuery', bucket=BUCKET, # Wildcard for objects created by spark job to be written to BigQuery # Reads the relative path to the objects transformed by the spark job from the POST message. source_objects=["{{ dag_run.conf['transformed_path'] }}/part-*"], destination_project_dataset_table=OUTPUT_TABLE, schema_fields=None, schema_object='schemas/nyc-tlc-yellow.json', # Relative gcs path to schema file. source_format='CSV', # Note that our spark job does json -> csv conversion. create_disposition='CREATE_IF_NEEDED', skip_leading_rows=0, write_disposition='WRITE_TRUNCATE', # If the table exists, overwrite it. max_bad_records=0 ) # Delete the Cloud Dataproc cluster. delete_cluster = DataprocClusterDeleteOperator( task_id='delete_dataproc_cluster', # Obviously needs to match the name of cluster created in the prior two Operators. cluster_name='ephemeral-spark-cluster-{{ ds_nodash }}', # This will tear down the cluster even if there are failures in upstream tasks. trigger_rule=TriggerRule.ALL_DONE ) # Delete gcs files in the timestamped transformed folder. delete_transformed_files = BashOperator( task_id='delete_transformed_files', bash_command="gsutil -m rm -r gs://" + BUCKET + "/{{ dag_run.conf['transformed_path'] }}/" ) # If the spark job or BQ Load fails we rename the timestamped raw path to # a timestamped failed path. move_failed_files = BashOperator( task_id='move_failed_files', bash_command="gsutil mv gs://" + BUCKET + "/{{ dag_run.conf['raw_path'] }}/ " + "gs://" + BUCKET + "/{{ dag_run.conf['failed_path'] }}/", trigger_rule=TriggerRule.ONE_FAILED ) # Set the dag property of the first Operators, this will be inherited by downstream Operators. create_cluster.dag = dag create_cluster.set_downstream(submit_pyspark) submit_pyspark.set_downstream([delete_cluster, bq_load]) bq_load.set_downstream(delete_transformed_files) move_failed_files.set_upstream([bq_load, submit_pyspark])
Python
0
@@ -4665,43 +4665,8 @@ =2,%0A - num_preemptible_workers=2,%0A
be812bc91fed48f679900732f5ee4cc3c9dc97a7
Use the default login for superuser.
devilry/project/common/default_urls.py
devilry/project/common/default_urls.py
from django.conf.urls import include from django.conf.urls import url from django.contrib import admin from django.core.urlresolvers import reverse from django.http import HttpResponseBadRequest, HttpResponsePermanentRedirect from devilry.devilry_frontpage.views import frontpage admin.autodiscover() def redirecto_to_show_delivery(request, assignmentgroupid): delivery_id = request.GET.get('deliveryid') if not delivery_id: return HttpResponseBadRequest( 'Requires <code>deliveryid</code> in QUERYSTRING. Perhaps you did not ' 'paste the entire URL from your email?') return HttpResponsePermanentRedirect( reverse('devilry_student_show_delivery', kwargs={'delivery_id': delivery_id})) devilry_urls = ( (r'^markup/', include('devilry.devilry_markup.urls')), (r'^authenticate/', include('devilry.devilry_authenticate.urls')), (r'^devilry_resetpassword/', include('devilry.devilry_resetpassword.urls')), url(r'^cradmin_temporaryfileuploadstore/', include('django_cradmin.apps.cradmin_temporaryfileuploadstore.urls')), (r'^devilry_help/', include('devilry.devilry_help.urls')), (r'^devilry_core/', include('devilry.apps.core.urls')), (r'^devilry_settings/', include('devilry.devilry_settings.urls')), ('r^student/assignmentgroup/(?P<assignmentgroupid>\d+)$', redirecto_to_show_delivery), (r'^devilry_student/', include('devilry.devilry_student.urls')), (r'^devilry_group/', include('devilry.devilry_group.urls')), (r'^devilry_admin/', include('devilry.devilry_admin.urls')), (r'^superuser/', include(admin.site.urls)), (r'^devilry_send_email_to_students/', include('devilry.devilry_send_email_to_students.urls')), (r'^devilry_search/', include('devilry.devilry_search.urls')), (r'^devilry_header/', include('devilry.devilry_header.urls')), (r'^devilry_bulkcreate_users/', include('devilry.devilry_bulkcreate_users.urls')), # (r'^devilry_qualifiesforexam/', include('devilry.devilry_qualifiesforexam.urls')), # (r'^devilry_qualifiesforexam_approved/', include('devilry.devilry_qualifiesforexam_approved.urls')), # (r'^devilry_qualifiesforexam_points/', include('devilry.devilry_qualifiesforexam_points.urls')), # (r'^devilry_qualifiesforexam_select/', include('devilry.devilry_qualifiesforexam_select.urls')), url(r'^devilry_examiner/', include('devilry.devilry_examiner.urls')), url(r'^devilry_gradingsystem/', include('devilry.devilry_gradingsystem.urls')), url(r'^devilry_gradingsystemplugin_points/', include('devilry.devilry_gradingsystemplugin_points.urls')), url(r'^devilry_gradingsystemplugin_approved/', include('devilry.devilry_gradingsystemplugin_approved.urls')), url(r'^devilry_detektor/', include('devilry.devilry_detektor.urls')), url(r'^$', frontpage, name='devilry_frontpage'), )
Python
0
@@ -96,16 +96,74 @@ t admin%0A +from django.contrib.auth.decorators import login_required%0A from dja @@ -336,27 +336,59 @@ ge%0A%0A +%0A admin. -autodiscover( +site.login = login_required(admin.site.login )%0A%0A%0A
54c18ae0c6372922fbf108e4ad659fa81ec66284
Fix test on Win
tests/commands/pkg/test_exec.py
tests/commands/pkg/test_exec.py
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unused-argument import pytest from platformio.package.commands.exec import package_exec_cmd def test_pkg_not_installed(clirunner, validate_cliresult, isolated_pio_core): result = clirunner.invoke( package_exec_cmd, ["--", "openocd"], ) with pytest.raises( AssertionError, match=("Could not find a package with 'openocd' executable file"), ): validate_cliresult(result) def test_pkg_specified(clirunner, validate_cliresult, isolated_pio_core, strip_ansi): # with install result = clirunner.invoke( package_exec_cmd, ["-p", "platformio/tool-openocd", "--", "openocd", "--version"], obj=dict(force_click_stream=True), ) validate_cliresult(result) output = strip_ansi(result.output) assert "Tool Manager: Installing platformio/tool-openocd" in output assert "Open On-Chip Debugger" in output def test_unrecognized_options( clirunner, validate_cliresult, isolated_pio_core, strip_ansi ): # unrecognized option result = clirunner.invoke( package_exec_cmd, ["--", "openocd", "--test-unrecognized"], obj=dict(force_click_stream=True), ) with pytest.raises( AssertionError, match=("openocd: unrecognized option"), ): validate_cliresult(result)
Python
0.000001
@@ -1870,16 +1870,17 @@ match=( +r %22openocd @@ -1881,16 +1881,17 @@ penocd: +( unrecogn @@ -1890,24 +1890,33 @@ unrecognized +%7Cunknown) option%22),%0A
ab13d8157090e452c1cffec2b917800cd9c5ed56
Fix whitespace problem and always replace window['title'] None, with empty string
py3status/modules/window_title.py
py3status/modules/window_title.py
# -*- coding: utf-8 -*- """ Display the current window title. Configuration parameters: cache_timeout: How often we refresh this module in seconds (default 0.5) format: display format for window_title (default '{title}') max_width: If width of title is greater, shrink it and add '...' (default 120) Requires: i3-py: (https://github.com/ziberna/i3-py) `pip install i3-py` If payload from server contains wierd utf-8 (for example one window have something bad in title) - the plugin will give empty output UNTIL this window is closed. I can't fix or workaround that in PLUGIN, problem is in i3-py library. @author shadowprince @license Eclipse Public License """ import i3 def find_focused(tree): if type(tree) == list: for el in tree: res = find_focused(el) if res: return res elif type(tree) == dict: if tree['focused']: return tree else: return find_focused(tree['nodes'] + tree['floating_nodes']) class Py3status: """ """ # available configuration parameters cache_timeout = 0.5 format = '{title}' max_width = 120 def __init__(self): self.title = '' def window_title(self): window = find_focused(i3.get_tree()) transformed = False if window and 'name' in window and window['name'] != self.title: if window['name'] is None: window['name'] = '' self.title = (len(window['name']) > self.max_width and u"...{}".format(window['name'][-(self.max_width - 3):]) or window['name']) transformed = True return { 'cached_until': self.py3.time_in(self.cache_timeout), 'full_text': self.py3.safe_format(self.format, {'title': self.title}), 'transformed': transformed } if __name__ == "__main__": """ Run module in test mode. """ from py3status.module_test import module_test module_test(Py3status)
Python
0.025603
@@ -1295,16 +1295,84 @@ ree())%0A%0A + if window%5B'name'%5D is None:%0A window%5B'name'%5D = ''%0A%0A @@ -1469,100 +1469,8 @@ le:%0A - if window%5B'name'%5D is None:%0A window%5B'name'%5D = ''%0A %0A
ffc2d143df572a6835789205cc079ac6f3b5707c
Fix FTP directory download
pubrunner/getresource.py
pubrunner/getresource.py
import pubrunner import sys import argparse import os import git import tempfile import shutil import logging import traceback import yaml import json import subprocess import shlex import wget import gzip import hashlib import six import six.moves.urllib as urllib import time from six.moves import reload_module import ftplib import ftputil from collections import OrderedDict import re def calcSHA256(filename): return hashlib.sha256(open(filename, 'rb').read()).hexdigest() def calcSHA256forDir(directory): sha256s = {} for filename in os.listdir(directory): sha256 = calcSHA256(os.path.join(directory,filename)) sha256s[filename] = sha256 return sha256s def download(url,out): if url.startswith('ftp'): url = url.replace("ftp://","") hostname = url.split('/')[0] path = "/".join(url.split('/')[1:]) with ftputil.FTPHost(hostname, 'anonymous', 'secret') as host: downloadFTP(path,out,host) elif url.startswith('http'): downloadHTTP(url,out) else: raise RuntimeError("Unsure how to download file. Expecting URL to start with ftp or http. Got: %s" % url) def downloadFTP(path,out,host): if host.path.isfile(path): remoteTimestamp = host.path.getmtime(path) doDownload = True if os.path.isdir(out): localTimestamp = os.path.getmtime(out) if not remoteTimestamp > localTimestamp: doDownload = False if path.endswith('.gz'): outUnzipped = out[:-3] if os.path.isfile(outUnzipped): localTimestamp = os.path.getmtime(outUnzipped) if not remoteTimestamp > localTimestamp: doDownload = False if doDownload: print("\tDownloading %s" % path) didDownload = host.download(path,out) os.utime(out,(remoteTimestamp,remoteTimestamp)) else: print("\tSkipping %s" % path) elif host.path.isdir(path): basename = host.path.basename(path) newOut = os.path.join(basename) os.makedirs(newOut) for children in host.listdir(path): srcFilename = host.path.join(path,child) dstFilename = os.path.join(newOut,child) downloadFTP(srcFilename,dstFilename,host) else: raise RuntimeError("Path (%s) is not a file or directory" % path) def downloadHTTP(url,out): fileAlreadyExists = os.path.isfile(out) if fileAlreadyExists: timestamp = os.path.getmtime(source) beforeHash = calcSHA256(out) os.unlink(out) wget.download(url,out,bar=None) if fileAlreadyExists: afterHash = calcSHA256(out) if beforeHash == afterHash: # File's haven't changed to move the modified date back os.utime(out,(timestamp,timestamp)) def gunzip(source,dest,deleteSource=False): timestamp = os.path.getmtime(source) with gzip.open(source, 'rb') as f_in, open(dest, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.utime(dest,(timestamp,timestamp)) if deleteSource: os.unlink(source) def getResource(resource): print("Fetching resource: %s" % resource) globalSettings = pubrunner.getGlobalSettings() resourceDir = os.path.expanduser(globalSettings["storage"]["resources"]) thisResourceDir = os.path.join(resourceDir,resource) packagePath = os.path.dirname(pubrunner.__file__) resourceYamlPath = os.path.join(packagePath,'resources','%s.yml' % resource) assert os.path.isfile(resourceYamlPath), "Can not find appropriate file for resource: %s" % resource with open(resourceYamlPath) as f: resourceInfo = yaml.load(f) #print(json.dumps(resourceInfo,indent=2)) if resourceInfo['type'] == 'git': assert isinstance(resourceInfo['url'], six.string_types), 'The URL for a git resource must be a single address' if os.path.isdir(thisResourceDir): # Assume it is an existing git repo repo = git.Repo(thisResourceDir) repo.remote().pull() else: os.makedirs(thisResourceDir) git.Repo.clone_from(resourceInfo["url"], thisResourceDir) return thisResourceDir elif resourceInfo['type'] == 'dir': assert isinstance(resourceInfo['url'], six.string_types) or isinstance(resourceInfo['url'],list), 'The URL for a dir resource must be a single or multiple addresses' if isinstance(resourceInfo['url'], six.string_types): urls = [resourceInfo['url']] else: urls = resourceInfo['url'] if os.path.isdir(thisResourceDir): for url in urls: basename = url.split('/')[-1] assert isinstance(url,six.string_types), 'Each URL for the dir resource must be a string' download(url,os.path.join(thisResourceDir,basename)) else: os.makedirs(thisResourceDir) for url in urls: basename = url.split('/')[-1] assert isinstance(url,six.string_types), 'Each URL for the dir resource must be a string' download(url,os.path.join(thisResourceDir,basename)) if 'unzip' in resourceInfo and resourceInfo['unzip'] == True: for filename in os.listdir(thisResourceDir): if filename.endswith('.gz'): unzippedName = filename[:-3] gunzip(os.path.join(thisResourceDir,filename), os.path.join(thisResourceDir,unzippedName), deleteSource=True) return thisResourceDir else: raise RuntimeError("Unknown resource type (%s) for resource: %s" % (resourceInfo['type'],resource))
Python
0.000001
@@ -1829,16 +1829,20 @@ th.join( +out, basename @@ -1843,16 +1843,49 @@ sename)%0A +%09%09if not os.path.isdir(newOut):%0A%09 %09%09os.mak @@ -1913,11 +1913,8 @@ hild -ren in
b4c5073bcd46cf80905c56e499093f16109df109
Fix query for information_schema
pyathenajdbc/sqlalchemy_athena.py
pyathenajdbc/sqlalchemy_athena.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals import re from sqlalchemy.engine.default import DefaultDialect from sqlalchemy.sql.compiler import IdentifierPreparer, SQLCompiler from sqlalchemy.sql.sqltypes import (BIGINT, BINARY, BOOLEAN, DATE, DECIMAL, FLOAT, INTEGER, NULLTYPE, STRINGTYPE, TIMESTAMP) class UniversalSet(object): """UniversalSet https://github.com/dropbox/PyHive/blob/master/pyhive/common.py""" def __contains__(self, item): return True class AthenaIdentifierPreparer(IdentifierPreparer): """PrestoIdentifierPreparer https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py""" reserved_words = UniversalSet() class AthenaCompiler(SQLCompiler): """PrestoCompiler https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py""" def visit_char_length_func(self, fn, **kw): return 'length{}'.format(self.function_argspec(fn, **kw)) _TYPE_MAPPINGS = { 'DOUBLE': FLOAT, 'SMALLINT': INTEGER, 'BOOLEAN': BOOLEAN, 'INTEGER': INTEGER, 'VARCHAR': STRINGTYPE, 'TINYINT': INTEGER, 'DECIMAL': DECIMAL, 'ARRAY': STRINGTYPE, 'ROW': STRINGTYPE, # StructType 'VARBINARY': BINARY, 'MAP': STRINGTYPE, 'BIGINT': BIGINT, 'DATE': DATE, 'TIMESTAMP': TIMESTAMP, } class AthenaDialect(DefaultDialect): name = 'awsathena' driver = 'jdbc' preparer = AthenaIdentifierPreparer statement_compiler = AthenaCompiler supports_alter = False supports_pk_autoincrement = False supports_default_values = False supports_empty_insert = False supports_unicode_statements = True supports_unicode_binds = True returns_unicode_strings = True description_encoding = None supports_native_boolean = True @classmethod def dbapi(cls): import pyathenajdbc return pyathenajdbc def _get_default_schema_name(self, connection): return 'default' def create_connect_args(self, url): # Connection string format: # awsathena+jdbc:// # {access_key}:{secret_key}@athena.{region_name}.amazonaws.com:443/ # {schema_name}?s3_staging_dir={s3_staging_dir}&driver_path={driver_path}&... opts = { 'access_key': url.username, 'secret_key': url.password, 'region_name': re.sub(r'^athena\.([a-z0-9-]+)\.amazonaws\.com$', r'\1', url.host), 'schema_name': url.database if url.database else self.default_schema_name } opts.update(url.query) return [[], opts] def get_schema_names(self, connection, **kw): query = """ SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('information_schema') """ return [row.schema_name for row in connection.execute(query).fetchall()] def get_table_names(self, connection, schema=None, **kw): query = """ SELECT table_name FROM information_schema.tables WHERE table_schema = '{0}' """.format(schema if schema else connection.connection.schema_name) return [row.table_name for row in connection.execute(query).fetchall()] def has_table(self, connection, table_name, schema=None): table_names = self.get_table_names(connection, schema) if table_name in table_names: return True return False def get_columns(self, connection, table_name, schema=None, **kw): query = """ SELECT column_name, data_type, is_nullable, column_default, ordinal_position, comment FROM information_schema.columns WHERE table_schema = '{0}' """.format(schema if schema else connection.connection.schema_name) return [ { 'name': row.column_name, 'type': _TYPE_MAPPINGS.get(re.sub(r'^([A-Z]+)($|\(.+\)$)', r'\1', row.data_type.upper()), NULLTYPE), 'nullable': True if row.is_nullable == 'YES' else False, 'default': row.column_default, 'ordinal_position': row.ordinal_position, 'comment': row.comment, } for row in connection.execute(query).fetchall() ] def get_foreign_keys(self, connection, table_name, schema=None, **kw): # Athena has no support for foreign keys. return [] def get_pk_constraint(self, connection, table_name, schema=None, **kw): # Athena has no support for primary keys. return [] def get_indexes(self, connection, table_name, schema=None, **kw): # Athena has no support for indexes. return [] def do_rollback(self, dbapi_connection): # No transactions for Athena pass def _check_unicode_returns(self, connection, additional_tests=None): # Requests gives back Unicode strings return True def _check_unicode_description(self, connection): # Requests gives back Unicode strings return True
Python
0.999978
@@ -3033,32 +3033,105 @@ ma=None, **kw):%0A + schema = schema if schema else connection.connection.schema_name%0A query = @@ -3295,57 +3295,8 @@ hema - if schema else connection.connection.schema_name )%0A @@ -3665,42 +3665,351 @@ -query = %22%22%22%0A SELECT +# information_schema.columns fails when filtering with table_schema or table_name%0A # when specifying a name that does not exist in table_schema or table_name.%0A schema = schema if schema else connection.connection.schema_name%0A query = %22%22%22%0A SELECT%0A table_schema,%0A table_name, %0A @@ -4260,118 +4260,11 @@ -WHERE table_schema = '%7B0%7D'%0A %22%22%22.format(schema if schema else connection.connection.schema_name) +%22%22%22 %0A @@ -4779,16 +4779,91 @@ chall()%0A + if row.table_schema == schema and row.table_name == table_name%0A
252eeac29bd7f5c1237a0868059fab5577b401ac
Use total bytes values to calculate rates
checks.d/iis.py
checks.d/iis.py
''' Check the performance counters from IIS ''' from checks import AgentCheck class IIS(AgentCheck): METRICS = [ ('iis.uptime', 'gauge', 'ServiceUptime'), # Network ('iis.net.bytes_sent', 'gauge', 'BytesSentPerSec'), ('iis.net.bytes_rcvd', 'gauge', 'BytesReceivedPerSec'), ('iis.net.bytes_total', 'gauge', 'BytesTotalPerSec'), ('iis.net.num_connections', 'gauge', 'CurrentConnections'), ('iis.net.files_sent', 'rate', 'TotalFilesSent'), ('iis.net.files_rcvd', 'rate', 'TotalFilesReceived'), ('iis.net.connection_attempts', 'rate', 'TotalConnectionAttemptsAllInstances'), # HTTP Methods ('iis.httpd_request_method.get', 'rate', 'TotalGetRequests'), ('iis.httpd_request_method.post', 'rate', 'TotalPostRequests'), ('iis.httpd_request_method.head', 'rate', 'TotalHeadRequests'), ('iis.httpd_request_method.put', 'rate', 'TotalPutRequests'), ('iis.httpd_request_method.delete', 'rate', 'TotalDeleteRequests'), ('iis.httpd_request_method.options', 'rate', 'TotalOptionsRequests'), ('iis.httpd_request_method.trace', 'rate', 'TotalTraceRequests'), # Errors ('iis.errors.not_found', 'rate', 'TotalNotFoundErrors'), ('iis.errors.locked', 'rate', 'TotalLockedErrors'), # Users ('iis.users.anon', 'rate', 'TotalAnonymousUsers'), ('iis.users.nonanon', 'rate', 'TotalNonAnonymousUsers'), # Requests ('iis.requests.cgi', 'rate', 'TotalCGIRequests'), ('iis.requests.isapi', 'rate', 'TotalISAPIExtensionRequests'), ] def check(self, instance): try: import wmi except ImportError: self.log.error("Unable to import 'wmi' module") return # Connect to the WMI provider host = instance.get('host', None) user = instance.get('username', None) password = instance.get('password', None) tags = instance.get('tags', None) w = wmi.WMI(host, user=user, password=password) try: wmi_cls = w.Win32_PerfFormattedData_W3SVC_WebService(name="_Total") if not wmi_cls: raise Exception('Missing _Total from Win32_PerfFormattedData_W3SVC_WebService') except Exception: self.log.exception('Unable to fetch Win32_PerfFormattedData_W3SVC_WebService class') return wmi_cls = wmi_cls[0] for metric, mtype, wmi_val in self.METRICS: if not hasattr(wmi_cls, wmi_val): self.log.error('Unable to fetch metric %s. Missing %s in Win32_PerfFormattedData_W3SVC_WebService' \ % (metric, wmi_val)) continue # Submit the metric value with the correct type value = getattr(wmi_cls, wmi_val) metric_func = getattr(self, mtype) metric_func(metric, value, tags=tags)
Python
0.000001
@@ -208,33 +208,37 @@ tes_sent', ' -gaug +rat e', ' +Total BytesSentPer @@ -234,22 +234,16 @@ ytesSent -PerSec '),%0A @@ -266,33 +266,37 @@ tes_rcvd', ' -gaug +rat e', ' +Total BytesReceive @@ -296,22 +296,16 @@ Received -PerSec '),%0A @@ -337,33 +337,37 @@ ', ' -gaug +rat e', ' +Total BytesT -otalPerSec +ransferred '),%0A
508fb5e0ca53995d90baa4b500bf4ab0f3f3fb50
Add more types to SQLAlchemy type conversion
pyathenajdbc/sqlalchemy_athena.py
pyathenajdbc/sqlalchemy_athena.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals import re from sqlalchemy.engine import reflection from sqlalchemy.engine.default import DefaultDialect from sqlalchemy.sql.compiler import IdentifierPreparer, SQLCompiler from sqlalchemy.sql.sqltypes import (BIGINT, BINARY, BOOLEAN, DATE, DECIMAL, FLOAT, INTEGER, NULLTYPE, STRINGTYPE, TIMESTAMP) import pyathenajdbc class UniversalSet(object): """UniversalSet https://github.com/dropbox/PyHive/blob/master/pyhive/common.py""" def __contains__(self, item): return True class AthenaIdentifierPreparer(IdentifierPreparer): """PrestoIdentifierPreparer https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py""" reserved_words = UniversalSet() class AthenaCompiler(SQLCompiler): """PrestoCompiler https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py""" def visit_char_length_func(self, fn, **kw): return 'length{0}'.format(self.function_argspec(fn, **kw)) _TYPE_MAPPINGS = { 'REAL': FLOAT, 'FLOAT': FLOAT, 'DOUBLE': FLOAT, 'SMALLINT': INTEGER, 'BOOLEAN': BOOLEAN, 'INTEGER': INTEGER, 'VARCHAR': STRINGTYPE, 'TINYINT': INTEGER, 'DECIMAL': DECIMAL, 'ARRAY': STRINGTYPE, 'ROW': STRINGTYPE, # StructType 'VARBINARY': BINARY, 'MAP': STRINGTYPE, 'BIGINT': BIGINT, 'DATE': DATE, 'TIMESTAMP': TIMESTAMP, } class AthenaDialect(DefaultDialect): name = 'awsathena' driver = 'jdbc' preparer = AthenaIdentifierPreparer statement_compiler = AthenaCompiler default_paramstyle = pyathenajdbc.paramstyle supports_alter = False supports_pk_autoincrement = False supports_default_values = False supports_empty_insert = False supports_unicode_statements = True supports_unicode_binds = True returns_unicode_strings = True description_encoding = None supports_native_boolean = True @classmethod def dbapi(cls): return pyathenajdbc def _get_default_schema_name(self, connection): return connection.connection.schema_name def create_connect_args(self, url): # Connection string format: # awsathena+jdbc:// # {access_key}:{secret_key}@athena.{region_name}.amazonaws.com:443/ # {schema_name}?s3_staging_dir={s3_staging_dir}&driver_path={driver_path}&... opts = { 'access_key': url.username, 'secret_key': url.password, 'region_name': re.sub(r'^athena\.([a-z0-9-]+)\.amazonaws\.com$', r'\1', url.host), 'schema_name': url.database if url.database else 'default' } opts.update(url.query) return [[], opts] @reflection.cache def get_schema_names(self, connection, **kw): query = """ SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('information_schema') """ return [row.schema_name for row in connection.execute(query).fetchall()] @reflection.cache def get_table_names(self, connection, schema=None, **kw): schema = schema if schema else connection.connection.schema_name query = """ SELECT table_name FROM information_schema.tables WHERE table_schema = '{0}' """.format(schema) return [row.table_name for row in connection.execute(query).fetchall()] def has_table(self, connection, table_name, schema=None): table_names = self.get_table_names(connection, schema) if table_name in table_names: return True return False @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): # information_schema.columns fails when filtering with table_schema or table_name, # if specifying a name that does not exist in table_schema or table_name. schema = schema if schema else connection.connection.schema_name query = """ SELECT table_schema, table_name, column_name, data_type, is_nullable, column_default, ordinal_position, comment FROM information_schema.columns """ return [ { 'name': row.column_name, 'type': _TYPE_MAPPINGS.get(re.sub(r'^([A-Z]+)($|\(.+\)$)', r'\1', row.data_type.upper()), NULLTYPE), 'nullable': True if row.is_nullable == 'YES' else False, 'default': row.column_default, 'ordinal_position': row.ordinal_position, 'comment': row.comment, } for row in connection.execute(query).fetchall() if row.table_schema == schema and row.table_name == table_name ] def get_foreign_keys(self, connection, table_name, schema=None, **kw): # Athena has no support for foreign keys. return [] def get_pk_constraint(self, connection, table_name, schema=None, **kw): # Athena has no support for primary keys. return [] def get_indexes(self, connection, table_name, schema=None, **kw): # Athena has no support for indexes. return [] def do_rollback(self, dbapi_connection): # No transactions for Athena pass def _check_unicode_returns(self, connection, additional_tests=None): # Requests gives back Unicode strings return True def _check_unicode_description(self, connection): # Requests gives back Unicode strings return True
Python
0
@@ -1105,24 +1105,48 @@ APPINGS = %7B%0A + 'BOOLEAN': BOOLEAN,%0A 'REAL': @@ -1189,24 +1189,48 @@ LE': FLOAT,%0A + 'TINYINT': INTEGER,%0A 'SMALLIN @@ -1251,124 +1251,122 @@ ' -BOOLEAN': BOOLEAN,%0A 'INTEGER': INTEGER,%0A 'VARCHAR': STRINGTYPE,%0A 'TINYINT': INTEGER,%0A 'DECIMAL': DECIMAL +INTEGER': INTEGER,%0A 'BIGINT': BIGINT,%0A 'DECIMAL': DECIMAL,%0A 'CHAR': STRINGTYPE,%0A 'VARCHAR': STRINGTYPE ,%0A @@ -1473,38 +1473,16 @@ NGTYPE,%0A - 'BIGINT': BIGINT,%0A 'DAT
efe2e46c36501afa6072e0bb984ee737caeaeb13
order APi is instant not a progress
chiphub/urls.py
chiphub/urls.py
"""chiphub URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url from django.contrib import admin import chatroom.views as chatroom import digikey.views as digikey import login.views as login import main.views as main urlpatterns = [ url(r'^admin/', include(admin.site.urls)), # Misc url(r'^$', main.index), url(r'^faq/$', main.faq), url(r'^exchange/$', main.exchange), url(r'^about_us/$', main.about_us), # ChatRoom url(r'^append/$', chatroom.append), url(r'^retreive/$', chatroom.retreive), url(r'^chatroom/$', chatroom.chatroom), # Login url(r'^profile/$', login.profile), url(r'^update_profile/$', login.update_profile), url(r'^list_order/$', chatroom.list_order), url(r'^order_info/$', digikey.order_info), url(r'^islogin/$', login.isLogin), url(r'^logout/$', login.logout), # Google OAuth url(r'^google_login/$', login.google_login), url(r'^google_callback/$', login.google_callback), # Digikey url(r'^progress/$', digikey.progress), url(r'^order/$', digikey.order_page), url(r'^order_digikey/$', digikey.order_digikey), url(r'^price_digikey/$', digikey.get_digikey_price), url(r'^rally_digikey/$', digikey.get_current_rally), url(r'^group_history_digikey/$', digikey.get_groups), url(r'^group_info_digikey/$', digikey.get_group_info), url(r'^user_history_digikey/$', digikey.get_user_orders), url(r'^order_info_digikey/$', digikey.get_single_order), url(r'^pay_digikey/$', digikey.apply_paying_info), # Digikey - new url(r'^/digikey/progress_page/$', digikey.progress), url(r'^/digikey/current_order/$', digikey.order_page), url(r'^/digikey/ordering/$', digikey.order_digikey), url(r'^/digikey/pay/$', digikey.apply_paying_info), url(r'^/digikey/price/$', digikey.get_digikey_price), url(r'^/digikey/rally/$', digikey.get_current_rally), url(r'^/digikey/groups/$', digikey.get_groups), url(r'^/digikey/group_info/$', digikey.get_group_info), url(r'^/digikey/user_history/$', digikey.get_user_orders), url(r'^/digikey/order_info/$', digikey.get_single_order_info), ]
Python
0.000001
@@ -2288,19 +2288,16 @@ ey/order -ing /$', dig
39824268150fb5b9cb86c599dad81b18f7ef7142
Add the Award and AwardGrant models to the init set
pyforge/pyforge/model/__init__.py
pyforge/pyforge/model/__init__.py
# -*- coding: utf-8 -*- """The application's model objects""" from .session import ProjectSession from .project import Theme, Neighborhood, NeighborhoodFile, Project, ProjectFile, AppConfig, SearchConfig, ScheduledMessage from .discuss import Discussion, Thread, PostHistory, Post, Attachment from .artifact import Artifact, Message, VersionedArtifact, Snapshot, ArtifactLink, nonce, Feed from .auth import User, ProjectRole, OpenId, EmailAddress from .openid_model import OpenIdStore, OpenIdAssociation, OpenIdNonce from .filesystem import File from .tag import TagEvent, Tag, UserTags from .types import ArtifactReference, ArtifactReferenceType from .session import main_doc_session, main_orm_session from .session import project_doc_session, project_orm_session from .session import artifact_orm_session from ming.orm.mapped_class import MappedClass MappedClass.compile_all()
Python
0
@@ -382,16 +382,35 @@ ce, Feed +, Award, AwardGrant %0Afrom .a
804402572ca664b7a1bd3ed21de277210fe5c74e
Use importlib instead of django.utils.importlib
src/decorator_include/__init__.py
src/decorator_include/__init__.py
""" A replacement for ``django.conf.urls.include`` that takes a decorator, or an iterable of view decorators as the first argument and applies them, in reverse order, to all views in the included urlconf. """ from __future__ import unicode_literals from builtins import object, str from django.core.exceptions import ImproperlyConfigured from django.core.urlresolvers import RegexURLPattern, RegexURLResolver from django.utils.importlib import import_module class DecoratedPatterns(object): """ A wrapper for an urlconf that applies a decorator to all its views. """ def __init__(self, urlconf_name, decorators): self.urlconf_name = urlconf_name try: iter(decorators) except TypeError: decorators = [decorators] self.decorators = decorators if not isinstance(urlconf_name, str): self._urlconf_module = self.urlconf_name else: self._urlconf_module = None def decorate_pattern(self, pattern): if isinstance(pattern, RegexURLResolver): regex = pattern.regex.pattern urlconf_module = pattern.urlconf_name default_kwargs = pattern.default_kwargs namespace = pattern.namespace app_name = pattern.app_name urlconf = DecoratedPatterns(urlconf_module, self.decorators) decorated = RegexURLResolver( regex, urlconf, default_kwargs, app_name, namespace ) else: callback = pattern.callback for decorator in reversed(self.decorators): callback = decorator(callback) decorated = RegexURLPattern( pattern.regex.pattern, callback, pattern.default_args, pattern.name ) return decorated def _get_urlconf_module(self): if self._urlconf_module is None: self._urlconf_module = import_module(self.urlconf_name) return self._urlconf_module urlconf_module = property(_get_urlconf_module) def _get_urlpatterns(self): try: patterns = self.urlconf_module.urlpatterns except AttributeError: patterns = self.urlconf_module return [self.decorate_pattern(pattern) for pattern in patterns] urlpatterns = property(_get_urlpatterns) def __getattr__(self, name): return getattr(self.urlconf_module, name) def decorator_include(decorators, arg, namespace=None, app_name=None): """ Works like ``django.conf.urls.include`` but takes a view decorator or an iterable of view decorators as the first argument and applies them, in reverse order, to all views in the included urlconf. """ if isinstance(arg, tuple): if namespace: raise ImproperlyConfigured( 'Cannot override the namespace for a dynamic module that provides a namespace' ) urlconf, app_name, namespace = arg else: urlconf = arg decorated_urlconf = DecoratedPatterns(urlconf, decorators) return (decorated_urlconf, app_name, namespace)
Python
0.000001
@@ -202,16 +202,17 @@ nf.%0A%22%22%22%0A +%0A from __f @@ -403,16 +403,107 @@ esolver%0A +%0Atry:%0A from importlib import import_module%0Aexcept ImportError:%0A # For python 2.6%0A from dja
803368f1741a9558ea84092dc975c1a10f51fa79
Change url in dashboard administrador
administracion/urls.py
administracion/urls.py
from django.conf.urls import url from .views import admin_main_dashboard, admin_users_dashboard, \ admin_users_create, admin_users_edit, admin_users_edit_form, \ admin_users_delete_modal, admin_users_delete, list_studies app_name = 'administracion' # Urls en espanol urlpatterns = [ url(r'^principal/', admin_main_dashboard, name='main'), url(r'^usuarios/nuevo/', admin_users_create, name='users_add'), url(r'^usuarios/editar/(\d+)/', admin_users_edit_form, name='users_edit_form'), url(r'^usuarios/editar/guardar/', admin_users_edit, name='users_edit'), url(r'^usuarios/borrar/(\d+)/', admin_users_delete_modal, name='users_delete_modal'), url(r'^usuarios/borrar/confirmar/', admin_users_delete, name='users_delete'), url(r'^usuarios/', admin_users_dashboard, name='users'), url(r'^principal/(?P<status_study>[\w\-]+)/$', list_studies, name='main_estudios'), ]
Python
0.000001
@@ -337,16 +337,17 @@ incipal/ +$ ', admin
b488b884cf802546a795b0084003a9200e0d646b
Implement update_cmd in cli
cibopath/cli.py
cibopath/cli.py
# -*- coding: utf-8 -*- import logging import click from cibopath import __version__ from cibopath.user_config import UserConfig from cibopath.log import create_logger @click.group() @click.pass_context @click.option( '-v', '--verbose', is_flag=True, help='Print debug information' ) @click.option( '-c', '--config-file', type=click.Path(), default='~/.cibopathrc', help='Config file to hold settings' ) @click.version_option(__version__, u'-V', u'--version', prog_name='cibopath') def cli(ctx, verbose, config_file): """Cibopath - Search Cookiecutters on GitHub.""" ctx.obj = UserConfig(config_file) logger = create_logger() if verbose: logger.setLevel(logging.DEBUG) logger.debug('Logger initialized') else: logger.setLevel(logging.INFO) @click.pass_obj def _default_username(config): try: return config.get_value('github', 'username') except KeyError: return None @click.pass_obj def _default_token(config): try: return config.get_value('github', 'token') except KeyError: return None @cli.command('update') @click.option('-u', '--username', required=True, default=_default_username) @click.option('-t', '--token', required=True, default=_default_token) def update_cmd(username, token): logger = logging.getLogger('cibopath') logger.debug( 'username:{username} token:{token}' ''.format(username=username, token=token) ) def _show_user_config(ctx, param, value): if not value or ctx.resilient_parsing: return click.echo(ctx.obj.text) ctx.exit() def _validate_variable(ctx, param, value): try: section, key = value.split('.') return section, key except ValueError: raise click.BadParameter('variable needs to be in format section.key') @cli.command('config') @click.pass_obj @click.option( '--list', 'show_config', is_flag=True, default=False, is_eager=True, expose_value=False, callback=_show_user_config ) @click.argument('variable', callback=_validate_variable) @click.argument('value') def config_cmd(config, variable, value): config.set_value(*variable, value) main = cli
Python
0.000001
@@ -164,16 +164,96 @@ _logger%0A +from cibopath.scraper import load_templates%0Afrom cibopath.templates import dump%0A %0A%0A@click @@ -1548,16 +1548,198 @@ )%0A )%0A + templates = load_templates(username, token)%0A%0A logger.debug('Found %7B%7D templates'.format(len(templates)))%0A dump(templates)%0A logger.debug('Successfully updated templates')%0A %0A%0Adef _s @@ -2445,16 +2445,17 @@ value)%0A%0A +%0A main = c
e80c6520c6b44327dbb880b4c4b3d4162744bef7
Set user's permissions when create him/her
administrator/admin.py
administrator/admin.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django import forms from django.contrib import admin from django.contrib.auth.admin import UserAdmin as Admin from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import Group from django.utils.translation import ugettext_lazy as _ from .filters import ChoiceDropdownFilter from .models import DishCategory from .models import Role from .models import User from restaurant.models import Restaurant class RegistrationForm(UserCreationForm): """A form for users creation. Email, name, password and role are given. """ email = forms.EmailField(required=True) class Meta: """Give some options (metadata) attached to the form.""" model = User fields = ('role',) def save(self, commit=True): """Save a new user. Return a User object. """ user = super(RegistrationForm, self).save(commit=False) user.email = self.cleaned_data['email'] user.role = self.cleaned_data['role'] user.set_is_staff(user.role) if commit: user.save() return user class UserChangeForm(forms.ModelForm): """A form for users modification.""" class Meta: """Give some options (metadata) attached to the form.""" model = User fields = ('name', 'email', 'phone', 'role', 'status',) def save(self, commit=True): """Save the provided password in a hashed format and put is_active into an appropriate value (according to the user's status) Return a User object. """ user = super(UserChangeForm, self).save(commit=False) user.set_is_active(user.status) user.set_is_staff(user.role) user.set_permissions(user.role) if commit: user.save() return user def delete_selected_users(modeladmin, request, queryset): """Block selected users instead of dropping them.""" for obj in queryset: obj.delete() delete_selected_users.short_description = "Delete selested users" class UserAdmin(Admin): """Represent a model in the admin interface.""" """def queryset(self, request): qs = super(UserAdmin, self).queryset(request) # If super-user, show all comments if request.user.role == Role.objects.get(id=1): return qs return qs.filter(added_by=request.user)""" form = UserChangeForm add_form = RegistrationForm search_fields = ('name', 'email', 'phone') list_display = ('name', 'email', 'phone', 'role', 'status') ordering = ['name'] list_per_page = 10 list_filter = [('status', ChoiceDropdownFilter), ('role', ChoiceDropdownFilter)] fieldsets = ( (None, {'fields': ('name', 'email',)}), (_('Personal info'), {'fields': ('phone',)}), (_('Status'), {'fields': ('status',)}), (_('Permissions'), {'fields': ('role',)}), ) # add_fieldsets is not a standard ModelAdmin attribute. UserAdmin # overrides get_fieldsets to use this attribute when creating a user. add_fieldsets = ( (None, { 'fields': ('email', 'name', 'password1', 'password2', 'role')} ), ) actions = [delete_selected_users] def soft_delete(modeladmin, request, queryset): """Soft delete function for QuerySet list.""" for obj in queryset: obj.delete() soft_delete.short_description = "Delete selected items" class RestaurantForm(forms.ModelForm): """A form for restaurants modifications.""" class Meta: """Give some options (metadata) attached to the form.""" model = Restaurant fields = ('name', 'logo', 'location', 'type', 'tables_count', 'description', 'status', 'manager') def __init__(self, *args, **kwargs): super(RestaurantForm, self).__init__(*args, **kwargs) users = User.objects.all() self.fields['manager'].choices = [(user.pk, user.get_full_name()) for user in users if user.status != 1 and user.role == Role.objects.get(id=2)] def save(self, commit=True): """Save the restaurant. Return a Restaurant object. """ restaurant = super(RestaurantForm, self).save(commit=False) restaurant.set_manager(restaurant.manager) if commit: restaurant.save() return restaurant class RestaurantAdmin(admin.ModelAdmin): """Custom display in restaurant's list.""" def get_queryset(self, request): """ Represent the objects. Return a QuerySet of all model instances that can be edited by the admin site. """ qs = super(RestaurantAdmin, self).get_queryset(request) if request.user.role == Role.objects.get(id=1): return qs return qs.filter(manager=request.user.id) form = RestaurantForm list_display = ('name', 'type', 'status', 'tables_count', 'manager') list_per_page = 15 actions = [soft_delete] admin.site.disable_action('delete_selected') list_filter = [('status', ChoiceDropdownFilter)] def _type_id(self, obj): return obj.type_id _type_id.short_description = 'restaurant type' class DishCategoryAdmin(admin.ModelAdmin): """Custom display dishes categories list.""" list_display = ('name', 'id', 'is_delete') list_per_page = 15 admin.site.register(User, UserAdmin) admin.site.register(Restaurant, RestaurantAdmin) admin.site.register(DishCategory, DishCategoryAdmin) admin.site.unregister(Group)
Python
0.000001
@@ -1099,32 +1099,72 @@ taff(user.role)%0A + user.set_permissions(user.role)%0A if commi @@ -2245,273 +2245,8 @@ %22%22%0A%0A - %22%22%22def queryset(self, request):%0A qs = super(UserAdmin, self).queryset(request)%0A%0A # If super-user, show all comments%0A if request.user.role == Role.objects.get(id=1):%0A return qs%0A%0A return qs.filter(added_by=request.user)%22%22%22%0A%0A
225b49d12771dc76a21d1f7e2836c13e84a4868e
fix CronTrigger.get_next_fire_time() bug: calculate start_date problem
apscheduler/triggers/cron/__init__.py
apscheduler/triggers/cron/__init__.py
from datetime import datetime, timedelta from tzlocal import get_localzone import six from apscheduler.triggers.base import BaseTrigger from apscheduler.triggers.cron.fields import BaseField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES from apscheduler.util import datetime_ceil, convert_to_datetime, datetime_repr, astimezone class CronTrigger(BaseTrigger): """ Triggers when current time matches all specified time constraints, similarly to how the UNIX cron scheduler works. :param int|str year: 4-digit year :param int|str month: month (1-12) :param int|str day: day of the (1-31) :param int|str week: ISO week (1-53) :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) :param int|str hour: hour (0-23) :param int|str minute: minute (0-59) :param int|str second: second (0-59) :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) :param datetime|str end_date: latest possible date/time to trigger on (inclusive) :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults to scheduler timezone) .. note:: The first weekday is always **monday**. """ FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') FIELDS_MAP = { 'year': BaseField, 'month': BaseField, 'week': WeekField, 'day': DayOfMonthField, 'day_of_week': DayOfWeekField, 'hour': BaseField, 'minute': BaseField, 'second': BaseField } __slots__ = 'timezone', 'start_date', 'end_date', 'fields' def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, end_date=None, timezone=None): if timezone: self.timezone = astimezone(timezone) elif start_date and start_date.tzinfo: self.timezone = start_date.tzinfo elif end_date and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') values = dict((key, value) for (key, value) in six.iteritems(locals()) if key in self.FIELD_NAMES and value is not None) self.fields = [] assign_defaults = False for field_name in self.FIELD_NAMES: if field_name in values: exprs = values.pop(field_name) is_default = False assign_defaults = not values elif assign_defaults: exprs = DEFAULT_VALUES[field_name] is_default = True else: exprs = '*' is_default = True field_class = self.FIELDS_MAP[field_name] field = field_class(field_name, exprs, is_default) self.fields.append(field) def _increment_field_value(self, dateval, fieldnum): """ Increments the designated field and resets all less significant fields to their minimum values. :type dateval: datetime :type fieldnum: int :return: a tuple containing the new date, and the number of the field that was actually incremented :rtype: tuple """ values = {} i = 0 while i < len(self.fields): field = self.fields[i] if not field.REAL: if i == fieldnum: fieldnum -= 1 i -= 1 else: i += 1 continue if i < fieldnum: values[field.name] = field.get_value(dateval) i += 1 elif i > fieldnum: values[field.name] = field.get_min(dateval) i += 1 else: value = field.get_value(dateval) maxval = field.get_max(dateval) if value == maxval: fieldnum -= 1 i -= 1 else: values[field.name] = value + 1 i += 1 difference = datetime(**values) - dateval.replace(tzinfo=None) return self.timezone.normalize(dateval + difference), fieldnum def _set_field_value(self, dateval, fieldnum, new_value): values = {} for i, field in enumerate(self.fields): if field.REAL: if i < fieldnum: values[field.name] = field.get_value(dateval) elif i > fieldnum: values[field.name] = field.get_min(dateval) else: values[field.name] = new_value difference = datetime(**values) - dateval.replace(tzinfo=None) return self.timezone.normalize(dateval + difference) def get_next_fire_time(self, previous_fire_time, now): if previous_fire_time: start_date = max(now, previous_fire_time + timedelta(microseconds=1)) else: start_date = max(now, self.start_date) if self.start_date else now fieldnum = 0 next_date = datetime_ceil(start_date).astimezone(self.timezone) while 0 <= fieldnum < len(self.fields): field = self.fields[fieldnum] curr_value = field.get_value(next_date) next_value = field.get_next_value(next_date) if next_value is None: # No valid value was found next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1) elif next_value > curr_value: # A valid, but higher than the starting value, was found if field.REAL: next_date = self._set_field_value(next_date, fieldnum, next_value) fieldnum += 1 else: next_date, fieldnum = self._increment_field_value(next_date, fieldnum) else: # A valid value was found, no changes necessary fieldnum += 1 # Return if the date has rolled past the end date if self.end_date and next_date > self.end_date: return None if fieldnum >= 0: return next_date def __str__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] return 'cron[%s]' % (', '.join(options)) def __repr__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] if self.start_date: options.append("start_date='%s'" % datetime_repr(self.start_date)) return '<%s (%s)>' % (self.__class__.__name__, ', '.join(options))
Python
0.000001
@@ -5175,26 +5175,26 @@ art_date = m -ax +in (now, previo @@ -5275,18 +5275,18 @@ date = m -ax +in (now, se
487a94c844dc28cab0b9fe490ed28c749953e745
Improve error reporting of PluginRegistry
Cura/PluginRegistry.py
Cura/PluginRegistry.py
from Cura.PluginError import PluginError, PluginNotFoundError, InvalidMetaDataError import imp import os ## A central object to dynamically load modules as plugins. # # The PluginRegistry class can load modules dynamically and use # them as plugins. Each plugin module is expected to be a directory with # and `__init__` file defining a `getMetaData` and a `register` function. # # `getMetaData` should return a dictionary of metadata, with the "name" # and "type" keys expected to be set. The register function is passed # the application object as parameter and is expected to register the # appropriate classes with the appropriate objects. # # Plugins can be located in any location listed in the plugin locations. # The plugin locations are scanned recursively for plugins. class PluginRegistry(object): def __init__(self): super(PluginRegistry,self).__init__() # Call super to make multiple inheritence work. self._plugins = {} self._meta_data = {} self._plugin_locations = [] self._application = None ## Load a single plugin by name # \param name \type{string} The name of the plugin # \exception PluginNotFoundError Raised when the plugin could not be found. def loadPlugin(self, name): if name in self._plugins: # Already loaded, do not load again if(self._application is not None): self._application.log('w', 'Plugin %s was already loaded',name) return plugin = self._findPlugin(name) if not plugin: raise PluginNotFoundError(name) if name not in self._meta_data: self._populateMetaData(name) try: plugin.register(self._application) self._application.log('i', 'Loaded plugin %s', name) self._plugins[name] = plugin except PluginError as e: self._application.log('e', e) except AttributeError as e: self._application.log('e', e) ## Load all plugins matching a certain set of metadata # \param metaData \type{dict} The metaData that needs to be matched. # \sa loadPlugin def loadPlugins(self, meta_data): plugin_names = self._findAllPlugins() for name in plugin_names: plugin_data = self.getMetaData(name) if self._subsetInDict(plugin_data, meta_data): self.loadPlugin(name) ## Get the metadata for a certain plugin # \param name \type{string} The name of the plugin # \return \type{dict} The metadata of the plugin. Can be an empty dict. # \exception InvalidMetaDataError Raised when no metadata can be found or the metadata misses the right keys. def getMetaData(self, name): if name not in self._meta_data: if not self._populateMetaData(name): return {} return self._meta_data[name] ## Get a list of all metadata matching a certain subset of metaData # \param metaData \type{dict} The subset of metadata that should be matched. # \sa getMetaData def getAllMetaData(self, metaData): pluginNames = self._findAllPlugins() returnVal = [] for name in pluginNames: pluginData = self.getMetaData(name) if self._subsetInDict(pluginData, metaData): returnVal.append(pluginData) return returnVal ## Get the list of plugin locations # \return \type{list} The plugin locations def getPluginLocations(self): return self._plugin_locations ## Add a plugin location to the list of locations to search # \param location \type{string} The location to add to the list def addPluginLocation(self, location): #TODO: Add error checking! self._plugin_locations.append(location) ## Set the central application object # This is used by plugins as a central access point for other objects # \param app \type{Application} The application object to use def setApplication(self, app): self._application = app ## private: # Populate the list of metadata def _populateMetaData(self, name): plugin = self._findPlugin(name) if not plugin: self._application.log('e', 'Could not find plugin %s', name) return False meta_data = None try: meta_data = plugin.getMetaData() except AttributeError as e: print(e) raise InvalidMetaDataError(name) if not meta_data or (not "name" in meta_data and not "type" in meta_data): raise InvalidMetaDataError(name) self._meta_data[name] = meta_data return True # Try to find a module implementing a plugin # \param name The name of the plugin to find # \returns module if it was found None otherwise def _findPlugin(self, name): location = None for folder in self._plugin_locations: location = self._locatePlugin(name, folder) if not location: return None try: file, path, desc = imp.find_module(name, [ location ]) except ImportError as e: print(e) return False try: module = imp.load_module(name, file, path, desc) except ImportError as e: print(e) return False finally: if file: os.close(file) return module # Returns a list of all possible plugin names in the plugin locations def _findAllPlugins(self, paths = None): names = [] if not paths: paths = self._plugin_locations for folder in paths: for file in os.listdir(folder): filepath = os.path.join(folder, file) if os.path.isdir(filepath): if os.path.isfile(os.path.join(filepath, '__init__.py')): names.append(file) else: names += self._findAllPlugins([ filepath ]) return names # Try to find a directory we can use to load a plugin from # \param name The name of the plugin to locate # \param folder The base folder to look into def _locatePlugin(self, name, folder): for file in os.listdir(folder): filepath = os.path.join(folder, file) if os.path.isdir(filepath): if file == name: return folder else: filepath = self._locatePlugin(name, filepath) if filepath: return filepath return False # Check if a certain dictionary contains a certain subset of key/value pairs # \param dictionary The dictionary to search # \param subset The subset to search for def _subsetInDict(self, dictionary, subset): for key in subset: if key not in dictionary: return False if dictionary[key] != subset[key]: return False return True
Python
0
@@ -5334,33 +5334,86 @@ print( -e +%22Import error when importing %7B0%7D: %7B1%7D%22.format(name, e) )%0A re @@ -5541,33 +5541,86 @@ print( -e +%22Import error loading module %7B0%7D: %7B1%7D%22.format(name, e) )%0A re
30230a29ea8ee40121a83bb5a3203f80a296cf0e
Bump to v0.5.0
setup.py
setup.py
""" ---------------- Flask-Mustache ---------------- `Mustache`__ integration for Flask. __ http://mustache.github.com/ Flask-Mustache adds template helpers and context processors to assist Flask developers with integrating the Mustache library into their development process. """ from setuptools import setup setup( name='Flask-MustacheJS', version='0.4.9', url='https://github.com/bradleywright/flask-mustachejs', license='BSD', author='Bradley Wright', author_email='brad@intranation.com', description='Mustache integration in Flask, with Jinja and client-side libraries.', long_description=__doc__, packages=['flask_mustache'], zip_safe=False, include_package_data=True, # include static assets package_data = { '': ['*.jinja', '*.js'] }, platforms='any', install_requires=[ 'Flask', 'pystache' ], classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
Python
0
@@ -366,11 +366,11 @@ ='0. -4.9 +5.0 ',%0A
f61c82152042197d03d49b3ba9a141d025edf1c9
Add cp.get_url test
tests/integration/modules/cp.py
tests/integration/modules/cp.py
# Import python libs import os # Import salt libs import integration class CPModuleTest(integration.ModuleCase): ''' Validate the test module ''' def test_get_file(self): ''' cp.get_file ''' tgt = os.path.join(integration.TMP, 'scene33') self.run_function( 'cp.get_file', [ 'salt://grail/scene33', tgt, ]) with open(tgt, 'r') as scene: data = scene.read() self.assertIn('KNIGHT: They\'re nervous, sire.', data) self.assertNotIn('bacon', data) def test_get_template(self): ''' cp.get_template ''' tgt = os.path.join(integration.TMP, 'scene33') self.run_function( 'cp.get_template', [ 'salt://grail/scene33', tgt, 'spam=bacon', ]) with open(tgt, 'r') as scene: data = scene.read() self.assertIn('bacon', data) self.assertNotIn('spam', data) def test_get_dir(self): ''' cp.get_dir ''' tgt = os.path.join(integration.TMP, 'many') self.run_function( 'cp.get_dir', [ 'salt://grail', tgt ]) self.assertIn('grail', os.listdir(tgt)) self.assertIn('36', os.listdir(os.path.join(tgt, 'grail'))) self.assertIn('empty', os.listdir(os.path.join(tgt, 'grail'))) self.assertIn('scene', os.listdir(os.path.join(tgt, 'grail', '36')))
Python
0.000001
@@ -1658,12 +1658,555 @@ l', '36')))%0A +%0A def test_get_url(self):%0A '''%0A cp.get_url%0A '''%0A # We should add a %22if the internet works download some files%22%0A tgt = os.path.join(integration.TMP, 'scene33')%0A self.run_function(%0A 'cp.get_url',%0A %5B%0A 'salt://grail/scene33',%0A tgt,%0A %5D)%0A with open(tgt, 'r') as scene:%0A data = scene.read()%0A self.assertIn('KNIGHT: They%5C're nervous, sire.', data)%0A self.assertNotIn('bacon', data)%0A%0A
5d9cea1b0cefaeb38dd86623e05144aa3b91ff9e
Update mangaChapterDownload.py
comic_dl/manga_eden/mangaChapterDownload.py
comic_dl/manga_eden/mangaChapterDownload.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import cfscrape import requests import json import sys import os import globalFunctions class MangaChapterDownload(): def __init__(self, page_id, download_directory, **kwargs): self.page_id = str(page_id).strip() self.manga_name = str(kwargs.get("manga_name")) self.chapter_number = str(kwargs.get("chapter_number")) self.logging = kwargs.get("log_flag") self.conversion = kwargs.get("conversion") self.keep_files = kwargs.get("keep_files") self.json_content = self.json_download(page_id=self.page_id) self.image_links = self.link_lookup(json_source=self.json_content) if self.manga_name == "" or self.chapter_number == "": try: self.manga_name = raw_input("Please Enter Manga Name : ") self.chapter_number = raw_input("Please Enter Chapter Number : ") except Exception as WrongInputType: # If python3, then raw_input() won't work. self.manga_name = input("Please Enter Manga Name : ") self.chapter_number = input("Please Enter Chapter Number : ") else: pass file_directory = globalFunctions.GlobalFunctions().create_file_directory(self.chapter_number, self.manga_name) directory_path = os.path.realpath(str(download_directory) + "/" + str(file_directory)) if not os.path.exists(directory_path): os.makedirs(directory_path) links = [] file_names = [] for image in self.image_links: link = self.image_links[image] file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(str(image), len(self.image_links))) + str( link[-4:]) file_names.append(file_name) links.append(link) globalFunctions.GlobalFunctions().multithread_download(self.chapter_number, self.manga_name, None, directory_path, file_names, links, self.logging) globalFunctions.GlobalFunctions().conversion(directory_path, self.conversion, self.keep_files, self.manga_name, self.chapter_number) def json_download(self, page_id): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', 'Accept-Encoding': 'gzip, deflate' } sess = requests.session() sess = cfscrape.create_scraper(sess) search_url = "http://www.mangaeden.com/api/chapter/{0}/".format(page_id) connection = sess.get(search_url, headers=headers) if connection.status_code != 200: print("Whoops! Seems like I can't connect to website.") print("It's showing : %s" % connection) print("Run this script with the --verbose argument and report the issue along with log file on Github.") sys.exit(1) else: json_data = connection.content return json_data def link_lookup(self, json_source): image_links = {} """ the images's urls and sizes of the chapter are received via this API """ page_json = json.loads(json_source) # print(page_json["images"]) list_of_pages = list(page_json["images"]) for page in list_of_pages: # print(page) image_links[page[0]] = "https://cdn.mangaeden.com/mangasimg/" + str(page[1]) # Let's sort this dictionary based on the chapter count (KEYS). sorted(image_links.items(), key=lambda s: s[0]) if image_links: return image_links else: return None
Python
0
@@ -48,23 +48,27 @@ import c -f +loud scrape +r %0Aimport @@ -2598,15 +2598,19 @@ = c -f +loud scrape +r .cre
b617b778d2e442d8e2ab6a2098f7799bfb83bf85
Bump version to 0.8.1
setup.py
setup.py
#!/usr/bin/env python """Setup ACAPI package.""" import os from setuptools import setup with open(os.path.join(os.path.dirname(__name__), "README.md")) as f: long_description = f.read() setup( name="acapi", version="0.8.0", description="Acquia Cloud API client.", long_description=long_description, author="Dave Hall", author_email="me@davehall.com.au", url="http://github.com/skwashd/python-acquia-cloud", install_requires=["requests==2.22.0", "requests-cache==0.5.2"], license="MIT", classifiers=[ "Development Status :: 4 - Beta", "Topic :: Internet", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", ], packages=["acapi", "acapi.resources"], )
Python
0
@@ -229,17 +229,17 @@ on=%220.8. -0 +1 %22,%0A d
c7db3806a57935921c733609a265e505654ab601
Add Korean language
Cura/util/resources.py
Cura/util/resources.py
#coding:utf8 """ Helper module to get easy access to the path where resources are stored. This is because the resource location is depended on the packaging method and OS """ __copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License" import os import sys import glob import gettext if sys.platform.startswith('darwin'): try: #Foundation import can crash on some MacOS installs from Foundation import * except: pass if sys.platform.startswith('darwin'): if hasattr(sys, 'frozen'): try: resourceBasePath = NSBundle.mainBundle().resourcePath() except: resourceBasePath = os.path.join(os.path.dirname(__file__), "../../../../../") else: resourceBasePath = os.path.join(os.path.dirname(__file__), "../../resources") else: resourceBasePath = os.path.join(os.path.dirname(__file__), "../../resources") def getPathForResource(dir, subdir, resource_name): assert os.path.isdir(dir), "{p} is not a directory".format(p=dir) path = os.path.normpath(os.path.join(dir, subdir, resource_name)) if not os.path.isfile(path): return None return path def getPathForImage(name): return getPathForResource(resourceBasePath, 'images', name) def getPathForMesh(name): return getPathForResource(resourceBasePath, 'meshes', name) def getPathForFirmware(name): return getPathForResource(resourceBasePath, 'firmware', name) def getDefaultMachineProfiles(): path = os.path.normpath(os.path.join(resourceBasePath, 'machine_profiles', '*.ini')) return glob.glob(path) def setupLocalization(selectedLanguage = None): #Default to english languages = ['en'] if selectedLanguage is not None: for item in getLanguageOptions(): if item[1] == selectedLanguage and item[0] is not None: languages = [item[0]] locale_path = os.path.normpath(os.path.join(resourceBasePath, 'locale')) translation = gettext.translation('Cura', locale_path, languages, fallback=True) #translation.ugettext = lambda message: u'#' + message translation.install(unicode=True) def getLanguageOptions(): return [ ['en', 'English'], ['de', 'Deutsch'], ['fr', 'French'], # ['zh', 'Chinese'], # ['nl', 'Nederlands'], # ['es', 'Spanish'], # ['po', 'Polish'] ]
Python
0.999998
@@ -2098,24 +2098,44 @@ 'French'%5D,%0A +%09%09%5B'ko', 'Korean'%5D,%0A %09%09# %5B'zh', '
d2b08f37fce58cd5fedabe17430dc172a8b0c7d7
fix broken setup :-(
setup.py
setup.py
# -*- coding: utf-8 -*- from setuptools import setup, find_packages from coherence import __version__ setup( name="Coherence", version=__version__, description="""Coherence - DLNA/UPnP framework for the digital living""", long_description="""Coherence is a framework written in Python, providing a variety of UPnP MediaServer and UPnP MediaRenderer implementations for instant use. Furthermore it enables your application to participate in digital living networks, at the moment primarily the DLNA/UPnP universe. Its objective and demand is to relieve your application from all the membership/the UPnP related tasks as much as possible. New in this 0.5.4 - Fools Garden - release * a DesktopApplet to easily start a Coherence instance from your desktops panel Thx to Erwan Velu, Helio Chissini de Castro and Nicolas Lécureuil! * more efforts to simplify the ordinary user experience * allow now the backend definition via commandline, to just start up a MediaServer or anything else, without bothering oneself with the config file * specify logfile location and daemonization on the commandline too * a bit more usable --help output Thx again Erwan Velu! * a MediaServer backend for Ampache - a Web-based Audio file manager (http://ampache.org) Thx to the awesome help of Karl Vollmer! * device implementations for BinaryLight and DimmableLight * a little helper to extract device and service xml files and send them to us - a beginning of our UPnP device fingerprint program * and the usual bugfixes and enhancements """, author="Frank Scholz", author_email='coherence@beebits.net', license = "MIT", packages=['coherence','misc'], scripts = ['bin/coherence','misc/Desktop Applet/applet-coherence'], url = "http://coherence.beebits.net", download_url = 'https://coherence.beebits.net/download/Coherence-%s.tar.gz' % __version__, keywords=['UPnP', 'DLNA', 'multimedia', 'gstreamer'], classifiers = ['Development Status :: 4 - Beta', 'Environment :: Console', 'Environment :: Web Environment', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', ], entry_points=""" [coherence.plugins.backend.media_server] FSStore = coherence.backends.fs_storage:FSStore MediaStore = coherence.backends.mediadb_storage:MediaStore ElisaMediaStore = coherence.backends.elisa_storage:ElisaMediaStore FlickrStore = coherence.backends.flickr_storage:FlickrStore AxisCamStore = coherence.backends.axiscam_storage:AxisCamStore BuzztardStore = coherence.backends.buzztard_control:BuzztardStore IRadioStore = coherence.backends.iradio_storage:IRadioStore LastFMStore = coherence.backends.lastfm_storage:LastFMStore AmpacheStore = coherence.backends.ampache_storage:AmpacheStore [coherence.plugins.backend.media_renderer] ElisaPlayer = coherence.backends.elisa_renderer:ElisaPlayer GStreamerPlayer = coherence.backends.gstreamer_audio_player:GStreamerPlayer BuzztardPlayer = coherence.backends.buzztard_control:BuzztardPlayer [coherence.plugins.backend.binary_light] SimpleLight = coherence.backends.light:SimpleLight [coherence.plugins.backend.dimmable_light] BetterLight = coherence.backends.light:BetterLight """, package_data = { 'coherence': ['upnp/core/xml-service-descriptions/*.xml', 'web/static/*.css','web/static/*.js'], 'misc': ['Desktop Applet/*.png'], }, install_requires=[ 'Louie >= 1.1', 'ConfigObj >= 4.3', ], )
Python
0
@@ -98,16 +98,68 @@ sion__%0A%0A +packages = find_packages()%0Apackages.append('misc')%0A%0A setup(%0A @@ -1716,28 +1716,16 @@ ges= -%5B'coherence','misc'%5D +packages ,%0A
562f28a37d5187f1813545ca5a18f56b8a7d1e51
Linear interpolation
audio_to_img_to_audio/img_to_audio.py
audio_to_img_to_audio/img_to_audio.py
#!/usr/bin/env python import struct import sys import wave from PIL import Image width = None height = None sampleRate = 48000 duration = 3 frequency = 440.0 with Image.open(sys.argv[1]) as img: # convert image to 1-bit B&W image bw_image = img.convert() (width, height) = bw_image.size min_max = [] for w in range(width): max_h = 0 min_h = height for h in range(height): (r, g, b, _) = bw_image.getpixel((w, h)) if r + g + b != 0: max_h = max(max_h, h) min_h = min(min_h, h) if min_h > max_h: # w pixel column only has black pixels min_h = max_h = 0 assert min_h <= max_h, f"{w} x {h}: {min_h} / {max_h}" min_max.append((min_h, max_h)) # each column of the image will be "stretched" to this many "frame" of the wave file wave_frame = int(duration * sampleRate / len(min_max)) image_range = height # range of a 16-bit wave wave_min = -32768 wave_max = 32767 wave_range = wave_max - wave_min scale_value = lambda v: int(v * wave_range / image_range) + wave_min with wave.open("output-%s.wav" % sys.argv[1], 'w') as wave_file: wave_file.setnchannels(1) # mono wave_file.setsampwidth(2) wave_file.setframerate(sampleRate) for idx, (min_val, max_val) in enumerate(min_max): value = min_val if idx % 2 == 0 else max_val scaled_value = scale_value(value) # we repeat that same value 'wave_frame' time in the file for _ in range(wave_frame): data = struct.pack('<h', scaled_value) wave_file.writeframesraw(data)
Python
0.999927
@@ -1291,52 +1291,83 @@ -value = min_val if idx %25 2 == 0 else +scaled_min = scale_value(min_val)%0A scaled_max = scale_value( max_val +) %0A @@ -1375,107 +1375,132 @@ -scaled_value = scale_value(value)%0A # we repeat that same value 'wave_frame' time in the file +per_frame_diff = int((scaled_max - scaled_min) / wave_frame)%0A # we gradually go from min to max in %22wave_frame%22 steps %0A @@ -1508,17 +1508,20 @@ for -_ +step in rang @@ -1579,21 +1579,45 @@ scaled_ -value +min + (step * per_frame_diff) )%0A
b5d679b7dd10f928d487a43ba5e233598eeb6dfa
Renamed daily_prepost_onset() to daily_rel2onset(). Housekeeping
utils.py
utils.py
import sys sys.path.append('/home/jwalker/dynamics/python/atmos-tools') sys.path.append('/home/jwalker/dynamics/python/atmos-read') import numpy as np import xray import matplotlib.pyplot as plt import atmos as atm import merra # ---------------------------------------------------------------------- def daily_prepost_onset(data, d_onset, npre, npost, daynm='Day', yearnm='Year'): """Return subset of daily data aligned relative to onset day. Parameters ---------- data : xray.DataArray Daily data. d_onset : ndarray Array of onset date (day of year) for each year. npre, npost : int Number of days before and after onset to extract. daynm, yearnm : str, optional Name of day and year dimensions in data. Returns ------- data_out : xray.DataArray Subset of N days of daily data for each year, where N = npre + npost + 1 and the day dimension is dayrel = day - d_onset. """ name, attrs, coords, dimnames = atm.meta(data) years = atm.get_coord(data, coord_name=yearnm) if isinstance(d_onset, xray.DataArray): d_onset = d_onset.values dayrel = np.arange(-npre, npost + 1) relnm = daynm + 'rel' for y, year in enumerate(years): dmin, dmax = d_onset[y] - npre, d_onset[y] + npost sub = atm.subset(data, yearnm, year, None, daynm, dmin, dmax) sub = sub.rename({daynm : relnm}) sub[relnm] = dayrel sub[relnm].attrs['long_name'] = 'Day of year relative to onset day' if y == 0: data_out = sub else: data_out = xray.concat([data_out, sub], dim=yearnm) data_out.attrs['d_onset'] = d_onset return data_out # ---------------------------------------------------------------------- def comp_days_centered(ndays): """Return days for pre/onset/post composites centered on onset. Output days are day of year relative to onset day. """ ndays = int(ndays) n1 = int(ndays // 2) n2 = ndays - n1 reldays = {} reldays['pre'] = np.arange(-n1 - ndays, -n1) reldays['onset'] = np.arange(-n1, n2) reldays['post'] = np.arange(n2, n2 + ndays) return reldays
Python
0.999999
@@ -188,16 +188,35 @@ t as plt +%0Aimport collections %0A%0Aimport @@ -330,16 +330,12 @@ ily_ -p re -post_ +l2 onse @@ -1838,16 +1838,26 @@ ed(ndays +, offset=0 ):%0A %22 @@ -1928,30 +1928,348 @@ -Output days are day of +Parameters%0A ----------%0A ndays : int%0A Number of days to average in each composite.%0A offset : int, optional%0A Number of offset days between pre/onset and onset/post%0A day ranges.%0A%0A Returns%0A -------%0A reldays : dict of arrays%0A Components are 'pre', 'onset', and 'post', arrays of days%0A of the yea @@ -2291,16 +2291,36 @@ nset day +, for each composite .%0A %22%22 @@ -2409,10 +2409,33 @@ s = -%7B%7D +collections.OrderedDict() %0A @@ -2463,16 +2463,25 @@ arange(- +offset - n1 - nda @@ -2485,16 +2485,25 @@ ndays, - +offset - n1)%0A @@ -2576,11 +2576,29 @@ nge( -n2, +offset + n2, offset + n2
d9db045fda3607fb1d9ff6d5949f301d40491d9c
add timing steps for open / sync / close
commands/sync_benchmark/rps_detach_audit.py
commands/sync_benchmark/rps_detach_audit.py
import clr clr.AddReference("RevitAPI") from Autodesk.Revit.DB import OpenOptions, SynchronizeWithCentralOptions, DetachFromCentralOption, RelinquishOptions from Autodesk.Revit.DB import TransactWithCentralOptions from Autodesk.Revit.DB import FilePath from Autodesk.Revit.DB import WorksetConfiguration, WorksetConfigurationOption import System import os from datetime import datetime from collections import defaultdict iterations = 10 timing_map = defaultdict(float) time_now = str(datetime.now()) info = "" app = __revit__.Application if "RVT_QC_PRJ" not in os.environ: print("no model specified") else: active_nic = "" test_ip = "9.9.9.9" udp_conn = System.Net.Sockets.UdpClient(test_ip, 1) local_addr = udp_conn.Client.LocalEndPoint.Address for nic in System.Net.NetworkInformation.NetworkInterface.GetAllNetworkInterfaces(): ip_props = nic.GetIPProperties() for addr_info in ip_props.UnicastAddresses: if local_addr.ToString() == addr_info.Address.ToString(): active_nic = nic.Description project = os.environ["RVT_QC_PRJ"] model_path = os.environ["RVT_QC_PATH"] pc_stats = os.environ["pc_stats"] rvt_path = FilePath(model_path) ws_conf = WorksetConfiguration(WorksetConfigurationOption.CloseAllWorksets) open_opt = OpenOptions() open_opt.SetOpenWorksetsConfiguration(ws_conf) sync_opt = SynchronizeWithCentralOptions() relinquish_opt = RelinquishOptions(True) sync_opt.SetRelinquishOptions(relinquish_opt) sync_opt.SaveLocalAfter = True # sync_opt.Compact = True sync_opt.Comment = "syncing" trans_opt = TransactWithCentralOptions() print(time_now) print("machine stats:\n{}".format(pc_stats)) print(active_nic) print("timing: {} {} times".format(model_path, iterations)) for i in range(iterations): start = datetime.now() doc = app.OpenDocumentFile(rvt_path, open_opt) doc.SynchronizeWithCentral(trans_opt, sync_opt) doc.Close() end = datetime.now() timing_result = end - start timing_map[i] = timing_result.total_seconds() print(35*"=") print("iter:seconds") for iteration, timing in timing_map.items(): print("{}: {}".format(str(iteration).zfill(4), timing)) print(35*"=") print("average timing:") average = sum(timing_map.values()) / iterations print("{} seconds".format(average)) log_info = "{};".format(time_now) log_info += "{}:{};".format(app.VersionNumber, app.VersionBuild) model_path = os.environ["RVT_QC_PATH"] file_size = str(int(os.path.getsize(model_path))/1000000) log_dir = os.environ.get("RVT_LOG_PATH") project = os.environ.get("RVT_QC_PRJ") pc_stats = os.environ.get("pc_stats") log_info += "{};".format(file_size) log_info += pc_stats log_info += "average seconds:{};".format(average) log_info += "iterations:{};".format(iterations) if log_dir: log_file = os.path.join(log_dir, project + "_benchmark_" + ".csv") with open(log_file, "a") as csv_file: csv_file.write(log_info + "\n") if log_dir: log_file = os.path.join(log_dir, project + "_benchmark_single_iteration_timing_" + ".csv") with open(log_file, "a") as csv_file: for iternum, timing in timing_map.items(): csv_file.write("{};{};{}\n".format(time_now, iternum, timing))
Python
0.000001
@@ -1878,32 +1878,75 @@ datetime.now()%0A%0A + print(%22 start: %7B%7D%22.format(start))%0A doc = ap @@ -1996,74 +1996,249 @@ -doc.SynchronizeWithCentral(trans_opt, sync_opt)%0A doc.Close( +print(%22 openend: %7B%7D%22.format(str(datetime.now())))%0A doc.SynchronizeWithCentral(trans_opt, sync_opt)%0A print(%22 synced: %7B%7D%22.format(str(datetime.now())))%0A doc.Close()%0A print(%22 closed: %7B%7D%22.format(str(datetime.now())) )%0A%0A
a164a1afa5c39847b629b4acf5dddb0180e7480c
version bump
setup.py
setup.py
""" mipsy setup.py """ from distutils.core import setup setup( name='mipsy', version='0.1.0', author='Nick Miller', author_email='ngmiller@iastate.edu', packages=['mipsy'], scripts=['bin/mipsy'], url='https://github.com/ngmiller/mips-assembler', license='LICENSE', description='MIPS32 assembler.', install_requires=[ 'bitstring>=3.1.2', ], )
Python
0.000001
@@ -93,17 +93,17 @@ on='0.1. -0 +1 ',%0A a
966fd550508577d58e2002f2317d8190d1b31374
correct bug in writing static instead of host routes
autonetkit/compilers/device/ubuntu.py
autonetkit/compilers/device/ubuntu.py
from autonetkit.compilers.device.server_base import ServerCompiler import autonetkit.log as log class UbuntuCompiler(ServerCompiler): def compile(self, node): super(UbuntuCompiler, self).compile(node) # up route add -net ${route.network} gw ${router.gw} dev ${route.interface} self.static_routes(node) def static_routes(self, node): node.static_routes_v4 = [] # initialise for case of no routes -> simplifies template logic node.host_routes_v4 = [] # initialise for case of no routes -> simplifies template logic node.static_routes_v6 = [] # initialise for case of no routes -> simplifies template logic node.host_routes_v6 = [] # initialise for case of no routes -> simplifies template logic if not self.anm['phy'].data.enable_routing: log.info("Routing disabled, not configuring static routes for Ubuntu server %s" % node) return if self.anm['phy'].node(node).dont_configure_static_routing: log.info("Static routing disabled for server %s" % node) return l3_conn_node = self.anm['l3_conn'].node(node) phy_node = self.anm['phy'].node(node) gateway_list = [n for n in l3_conn_node.neighbors() if n.is_router] if not len(gateway_list): log.warning("Server %s is not directly connected to any routers" % node) return else: gateway = gateway_list[0] # choose first (and only gateway) if len(gateway_list) > 1: log.info("Server %s is multi-homed, using gateway %s" % (node, gateway)) #TODO: warn if server has no neighbors in same ASN (either in design or verification steps) #TODO: need to check that servers don't have any direct ebgp connections gateway_edge_l3 = self.anm['l3_conn'].edge(node, gateway) server_interface = gateway_edge_l3.src_int server_interface_id = self.nidb.interface(server_interface).id gateway_interface = gateway_edge_l3.dst_int gateway_ipv4 = gateway_ipv6 = None if node.ip.use_ipv4: gateway_ipv4 = gateway_interface['ipv4'].ip_address if node.ip.use_ipv6: gateway_ipv6 = gateway_interface['ipv6'].ip_address #TODO: look at aggregation #TODO: catch case of ip addressing being disabled #TODO: handle both ipv4 and ipv6 # IGP advertised infrastructure pool from same AS for infra_route in self.anm['ipv4'].data['infra_blocks'][phy_node.asn]: #host_routes_v4 route_entry = { "network": infra_route, "prefix": infra_route.network, "gw": gateway_ipv4, "interface": server_interface_id, "description": "Route to infra subnet in local AS %s via %s" % (phy_node.asn, gateway) } if infra_route.prefixlen == 32: node.host_routes_v4.append(route_entry) else: node.static_routes_v4.append(route_entry) # eBGP advertised loopbacks in all (same + other) ASes for asn, asn_routes in self.anm['ipv4'].data['loopback_blocks'].items(): for asn_route in asn_routes: route_entry = { "network": asn_route, "prefix": asn_route.network, "gw": gateway_ipv4, "interface": server_interface_id, "description": "Route to loopback subnet in AS %s via %s" % (asn, gateway), } if asn_route.prefixlen == 32: node.host_routes_v4.append(route_entry) else: node.static_routes_v4.append(route_entry) #TODO: combine the above logic into single step rather than creating dict then formatting with it cloud_init_static_routes = [] for entry in node.static_routes_v4: formatted =("route add -net %s gw %s dev %s" % (entry.network, entry.gw, entry.interface)) cloud_init_static_routes.append(formatted) for entry in node.static_routes_v4: formatted =("route add -host %s gw %s dev %s" % (entry.prefix, entry.gw, entry.interface)) cloud_init_static_routes.append(formatted) node.cloud_init.static_routes = cloud_init_static_routes # Render inline for packaging into yaml #TODO: no longer used, but keep as reference for later templates that require this format #import autonetkit.render #import os #lookup = autonetkit.render.initialise_lookup() #render_template = os.path.join("templates", "linux", "static_route.mako") #render_output = autonetkit.render.render_inline(node, render_template) #node.cloud_init.static_routes = render_output
Python
0.000001
@@ -4095,38 +4095,36 @@ r entry in node. +ho st -atic _routes_v4:%0A
7c0d9a7972f04ae9e6ff62be46030753fbc59c2e
remove useless comment
benchbuild/projects/polybench/polybench.py
benchbuild/projects/polybench/polybench.py
from os import path from benchbuild.project import Project from benchbuild.settings import CFG from benchbuild.utils.compiler import lt_clang from benchbuild.utils.downloader import Wget from benchbuild.utils.run import run from benchbuild.utils.cmd import tar, cp class PolyBenchGroup(Project): DOMAIN = 'polybench' GROUP = 'polybench' VERSION = '4.2' path_dict = { "correlation": "datamining", "covariance": "datamining", "2mm": "linear-algebra/kernels", "3mm": "linear-algebra/kernels", "atax": "linear-algebra/kernels", "bicg": "linear-algebra/kernels", "doitgen": "linear-algebra/kernels", "mvt": "linear-algebra/kernels", "cholesky": "linear-algebra/solvers", "durbin": "linear-algebra/solvers", "lu": "linear-algebra/solvers", "ludcmp": "linear-algebra/solvers", "gramschmidt": "linear-algebra/solvers", "trisolv": "linear-algebra/solvers", "gemm": "linear-algebra/blas", "gemver": "linear-algebra/blas", "gesummv": "linear-algebra/blas", "symm": "linear-algebra/blas", "syr2k": "linear-algebra/blas", "syrk": "linear-algebra/blas", "trmm": "linear-algebra/blas", "adi": "stencils", "fdtd-2d": "stencils", "heat-3d": "stencils", "jacobi-1d": "stencils", "jacobi-2d": "stencils", "seidel-2d": "stencils", "nussinov": "medley", "deriche": "medley", "floyd-warshall": "medley", } def __init__(self, exp): super(PolyBenchGroup, self).__init__(exp, "polybench") self.sourcedir = path.join( str(CFG["src_dir"]), "polybench", self.path_dict[self.name], self.name) self.setup_derived_filenames() src_dir = "polybench-c-{0}".format(VERSION) SRC_FILE = src_dir + ".tar.gz" src_uri = "http://downloads.sourceforge.net/project/polybench/" + SRC_FILE def download(self): Wget(self.src_uri, self.src_file) tar('xfz', path.join(self.builddir, self.src_file)) def configure(self): cp("-ar", path.join(self.src_dir, self.path_dict[self.name], self.name), self.name + ".dir") cp("-ar", path.join(self.src_dir, "utilities"), ".") def build(self): src_file = path.join(self.name + ".dir", self.name + ".c") clang = lt_clang(self.cflags, self.ldflags, self.compiler_extension) run(clang["-I", "utilities", "-I", self.name, "-DPOLYBENCH_USE_C99_PROTO", "-DEXTRALARGE_DATASET", "-DPOLYBENCH_USE_RESTRICT", "utilities/polybench.c", src_file, "-lm", "-o", self.run_f]) # Datamining class Correlation(PolyBenchGroup): NAME = 'correlation' class Covariance(PolyBenchGroup): NAME = 'covariance' class TwoMM(PolyBenchGroup): NAME = '2mm' class ThreeMM(PolyBenchGroup): NAME = '3mm' class Atax(PolyBenchGroup): NAME = 'atax' class BicG(PolyBenchGroup): NAME = 'bicg' class Doitgen(PolyBenchGroup): NAME = 'doitgen' class Mvt(PolyBenchGroup): NAME = 'mvt' class Gemm(PolyBenchGroup): NAME = 'gemm' class Gemver(PolyBenchGroup): NAME = 'gemver' class Gesummv(PolyBenchGroup): NAME = 'gesummv' class Symm(PolyBenchGroup): NAME = 'symm' class Syr2k(PolyBenchGroup): NAME = 'syr2k' class Syrk(PolyBenchGroup): NAME = 'syrk' class Trmm(PolyBenchGroup): NAME = 'trmm' class Cholesky(PolyBenchGroup): NAME = 'cholesky' class Durbin(PolyBenchGroup): NAME = 'durbin' class Gramschmidt(PolyBenchGroup): NAME = 'gramschmidt' class Lu(PolyBenchGroup): NAME = 'lu' class LuDCMP(PolyBenchGroup): NAME = 'ludcmp' class Trisolv(PolyBenchGroup): NAME = 'trisolv' class Deriche(PolyBenchGroup): NAME = 'deriche' class FloydWarshall(PolyBenchGroup): NAME = 'floyd-warshall' class Nussinov(PolyBenchGroup): NAME = 'nussinov' class Adi(PolyBenchGroup): NAME = 'adi' class FDTD2D(PolyBenchGroup): NAME = 'fdtd-2d' class Jacobi1D(PolyBenchGroup): NAME = 'jacobi-1d' class Jacobi2Dimper(PolyBenchGroup): NAME = 'jacobi-2d' class Seidel2D(PolyBenchGroup): NAME = 'seidel-2d' class Heat3D(PolyBenchGroup): NAME = 'heat-3d'
Python
0.000003
@@ -2748,22 +2748,8 @@ %5D)%0A%0A -# Datamining%0A%0A %0Acla
697ac96146aa1996e9e50aeb89703a2bb8eb5219
update compact-couchdb management command
mygpo/maintenance/management/commands/compact-couchdb.py
mygpo/maintenance/management/commands/compact-couchdb.py
import sys from datetime import datetime from time import sleep from couchdbkit import Database from django.core.management.base import BaseCommand from django.conf import settings from mygpo.decorators import repeat_on_conflict from mygpo.core.models import SanitizingRule from mygpo.utils import progress class Command(BaseCommand): """ Compacts the database and all views, and measures the required time """ def handle(self, *args, **options): db_url = settings.COUCHDB_DATABASES[0][1] db = Database(db_url) for name, compact, is_compacting, get_size in self.get_compacters(db): duration, size_before, size_after = self.compact_wait(compact, is_compacting, get_size) print '%-30s %17s %10s %10s' % (name, duration, self.prettySize(size_before), self.prettySize(size_after)) def get_compacters(self, db): """ Returns tuples containing compaction tasks """ compact_db = lambda: db.compact() db_is_compacting = lambda: db.info()['compact_running'] get_db_size = lambda: db.info()['disk_size'] yield ('database', compact_db, db_is_compacting, get_db_size) for design_doc in self.get_design_docs(db): compact_view = lambda: db.compact('%s' % design_doc) view_is_compacting = lambda: db.res.get('/_design/%s/_info' % design_doc).json_body['view_index']['compact_running'] get_view_size = lambda: db.res.get('/_design/%s/_info' % design_doc).json_body['view_index']['disk_size'] yield (design_doc, compact_view, view_is_compacting, get_view_size) @staticmethod def get_all_design_docs(db): """ Returns all design documents in the database """ prefix = '_design/' prefix_len = len(prefix) return (ddoc['key'][prefix_len:] for ddoc in db.view('_all_docs', startkey='_design/', endkey='_design0')) def get_design_docs(self, db): """ Return one design doc for each index file """ ddocs = {} for ddoc in self.get_all_design_docs(db): sig = db.res.get('/_design/%s/_info' % ddoc).json_body['view_index']['signature'] ddocs[sig] = ddoc return ddocs.values() @staticmethod def compact_wait(compact, is_compacting, get_size, sleep_time=300, inc_factor = 1): """ Compacts the view and waits for the compaction to finish Reports elapsed time and the view size, before and after the compaction """ start = datetime.utcnow() size_before = get_size() while True: try: compact() break except Exception, e: sleep(100) print >> sys.stderr, e while True: try: is_comp = is_compacting() if is_comp: sleep(sleep_time) sleep_time *= inc_factor else: break except Exception, e: sleep(100) print >> sys.stderr, e end = datetime.utcnow() size_after = get_size() return end - start, size_before, size_after @staticmethod def prettySize(size): # http://snippets.dzone.com/posts/show/5434 suffixes = [("B",2**10), ("K",2**20), ("M",2**30), ("G",2**40), ("T",2**50)] for suf, lim in suffixes: if size > lim: continue else: return round(size/float(lim/2**10),2).__str__()+suf
Python
0
@@ -56,16 +56,46 @@ rt sleep +%0Afrom urlparse import urlparse %0A%0Afrom c @@ -120,16 +120,46 @@ atabase%0A +from restkit import BasicAuth%0A from dja @@ -536,18 +536,39 @@ db_url +s = + set(db%5B1%5D for db in setting @@ -590,15 +590,266 @@ ASES -%5B0%5D%5B1%5D%0A +)%0A%0A filters = %5B%5D%0A%0A couchdb_admins = getattr(settings, 'COUCHDB_ADMINS', ())%0A if couchdb_admins:%0A username, passwd = couchdb_admins%5B0%5D%0A filters.append(BasicAuth(username, passwd))%0A%0A for db_url in db_urls:%0A @@ -872,27 +872,45 @@ e(db_url -)%0A%0A +, filters=filters)%0A for name @@ -901,19 +901,32 @@ + for + view_hash, name, c @@ -981,24 +981,28 @@ acters(db):%0A + @@ -1089,16 +1089,20 @@ t_size)%0A + @@ -1114,17 +1114,17 @@ rint '%25- -3 +4 0s %2517s @@ -1132,16 +1132,20 @@ 10s %2510s + %257s ' %25 (nam @@ -1214,16 +1214,31 @@ e_after) +, view_hash%5B:5%5D )%0A%0A%0A @@ -1518,17 +1518,20 @@ d (' -database' +', db.dbname , co @@ -1583,16 +1583,27 @@ for + view_hash, design_ @@ -1977,16 +1977,27 @@ yield ( +view_hash, design_d @@ -2670,13 +2670,12 @@ ocs. -value +item s()%0A @@ -3127,35 +3127,8 @@ e:%0A - sleep(100)%0A @@ -3153,32 +3153,59 @@ %3E%3E sys.stderr, e +%0A sleep(100) %0A%0A while @@ -3297,16 +3297,61 @@ s_comp:%0A + size_before = get_size()%0A @@ -3527,57 +3527,57 @@ -sleep(100)%0A print %3E%3E sys.stderr, e +print %3E%3E sys.stderr, e%0A sleep(100) %0A%0A
ff8b7ddbea7980c8464957880a1ae07afed49c64
Update antibody_lot.py
src/encoded/audit/antibody_lot.py
src/encoded/audit/antibody_lot.py
from ..auditor import ( AuditFailure, audit_checker, ) @audit_checker('antibody_lot') def audit_antibody_lot_target(value, system): ''' Antibody lots should not have associated characterizations for different target labels ''' if value['status'] in ['not pursued', 'deleted']: return if not value['characterizations']: return for char in value['characterizations']: if char['target']['@id'] not in value['targets']: detail = 'The antibody_lot {} has a characterization {} with target {}, which is not in the targets list'.format( value['accession'], char['target']['label'], char['uuid']) yield AuditFailure('target mismatch', detail, level='ERROR')
Python
0
@@ -631,24 +631,54 @@ ccession'%5D,%0A + char%5B'uuid'%5D,%0A @@ -704,17 +704,16 @@ 'label'%5D -, %0A @@ -721,28 +721,16 @@ -char%5B'uuid'%5D )%0A
9cd0098b4e353cfaaa0723ac28a797d7400188b4
Use jasperpath in brain.py
client/brain.py
client/brain.py
# -*- coding: utf-8-*- import logging import os import pkgutil import importlib def logError(): logger = logging.getLogger('jasper') fh = logging.FileHandler('jasper.log') fh.setLevel(logging.WARNING) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) logger.error('Failed to execute module', exc_info=True) class Brain(object): def __init__(self, mic, profile): """ Instantiates a new Brain object, which cross-references user input with a list of modules. Note that the order of brain.modules matters, as the Brain will cease execution on the first module that accepts a given input. Arguments: mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ self.mic = mic self.profile = profile self.modules = self.get_modules() @classmethod def get_modules(cls): """ Dynamically loads all the modules in the modules folder and sorts them by the PRIORITY key. If no PRIORITY is defined for a given module, a priority of 0 is assumed. """ module_locations = [os.path.join(os.path.dirname(__file__), 'modules')] module_names = [name for loader, name, ispkg in pkgutil.iter_modules(module_locations)] modules = [] for name in module_names: mod = importlib.import_module("modules.%s" % name) if hasattr(mod, 'WORDS'): modules.append(mod) modules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY') else 0, reverse=True) return modules def query(self, text): """ Passes user input to the appropriate module, testing it against each candidate module's isValid function. Arguments: text -- user input, typically speech, to be parsed by a module """ for module in self.modules: if module.isValid(text): try: module.handle(text, self.mic, self.profile) break except: logError() self.mic.say( "I'm sorry. I had some trouble with that operation. Please try again later.") break
Python
0.000003
@@ -73,16 +73,34 @@ portlib%0A +import jasperpath%0A %0A%0Adef lo @@ -1318,58 +1318,30 @@ = %5B -os.path.join(os.path.dirname(__file__), 'modules') +jasperpath.PLUGIN_PATH %5D%0A
b0dd91b60c95b6a22f12b785e6cd686d97c2c689
Clean log configuration after cache_gc tests.
tests/cache_gc_unittest.py
tests/cache_gc_unittest.py
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010-2011, GEM Foundation. # # OpenQuake is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 # only, as published by the Free Software Foundation. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License version 3 for more details # (a copy is included in the LICENSE file that accompanied this code). # # You should have received a copy of the GNU Lesser General Public License # version 3 along with OpenQuake. If not, see # <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License. import mock import unittest from bin import cache_gc from openquake import kvs from openquake.kvs import tokens class CacheGCTestCase(unittest.TestCase): """ Tests for the various functions in the bin/cache_gc.py script. """ @classmethod def setUpClass(cls): cls.client = kvs.get_client() cls.client.delete(tokens.CURRENT_JOBS) @classmethod def tearDownClass(cls): cls.client.delete(tokens.CURRENT_JOBS) def test_get_current_job_ids(self): """ Given the test data, make sure that :py:function:`bin.cache_gc._get_current_job_ids` returns the correct IDs. """ # create 3 jobs # this will add job keys to CURRENT_JOBS for job_id in range(1, 4): tokens.mark_job_as_current(job_id) job_ids = cache_gc._get_current_job_ids() self.assertEqual([1, 2, 3], job_ids) def test_clear_job_data_raises(self): """ Test that :py:function:`bin.cache_gc.clear_job_data` raises a ValueError on invalid input. """ self.assertRaises(ValueError, cache_gc.clear_job_data, '1234bad') def test_clear_job_data(self): """ Verify that :py:function:`openquake.kvs.cache_gc` is called. :py:function:`openquake.kvs.cache_gc` will be mocked in this test since the actual code is exercised in a separate. """ with mock.patch('openquake.kvs.cache_gc') as gc_mock: # we don't really care what the return val is gc_mock.return_value = 3 # make sure cache_gc was called and the args are correct cache_gc.clear_job_data(1) self.assertEqual(1, gc_mock.call_count) self.assertEqual( ((kvs.JOB_KEY_FMT % 1, ), {}), gc_mock.call_args) # same thing, but this time with a str for the ID cache_gc.clear_job_data('2') self.assertEqual(2, gc_mock.call_count) self.assertEqual( ((kvs.JOB_KEY_FMT % 2, ), {}), gc_mock.call_args)
Python
0.000056
@@ -948,16 +948,65 @@ tokens%0A%0A +from tests.utils.helpers import cleanup_loggers%0A%0A %0Aclass C @@ -1342,24 +1342,123 @@ RENT_JOBS)%0A%0A + def setUp(self):%0A cleanup_loggers()%0A%0A def tearDown(self):%0A cleanup_loggers()%0A%0A def test
763689a72d9453813a8be25f568cce11ba119a2a
Remove old settings from tests module
aldryn_search/tests.py
aldryn_search/tests.py
from django.template import Template from django.test import TestCase from django.test.utils import override_settings from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from cms.models.placeholdermodel import Placeholder from cms.models import CMSPlugin from aldryn_search.search_indexes import TitleIndex from .helpers import get_plugin_index_data, get_request test_settings = { 'ALLOWED_HOSTS': ['localhost'], 'CMS_LANGUAGES': {1: [{'code': 'en', 'name': 'English'}]}, 'CMS_TEMPLATES': (("whee.html", "Whee Template"),), 'LANGUAGES': (('en', 'English'),), 'LANGUAGE_CODE': 'en', 'TEMPLATE_LOADERS': ('aldryn_search.tests.FakeTemplateLoader',), } class FakeTemplateLoader(object): is_usable = True def __init__(self, name, dirs): pass def __iter__(self): yield self.__class__ yield "{{baz}}" class NotIndexedPlugin(CMSPluginBase): model = CMSPlugin plugin_content = 'rendered plugin content' render_template = Template(plugin_content) def render(self, context, instance, placeholder): return context plugin_pool.register_plugin(NotIndexedPlugin) @override_settings(**test_settings) class PluginIndexingTests(TestCase): def setUp(self): self.index = TitleIndex() self.request = get_request(language='en') def get_plugin(self): instance = CMSPlugin( language='en', plugin_type="NotIndexedPlugin", placeholder=Placeholder(id=1235) ) instance.cmsplugin_ptr = instance instance.pk = 1234 # otherwise plugin_meta_context_processor() crashes return instance def test_plugin_indexing_is_enabled_by_default(self): cms_plugin = self.get_plugin() indexed_content = self.index.get_plugin_search_text(cms_plugin, self.request) self.assertEqual(NotIndexedPlugin.plugin_content, indexed_content) def test_plugin_indexing_can_be_disabled_on_model(self): cms_plugin = self.get_plugin() cms_plugin.search_fulltext = False indexed_content = self.index.get_plugin_search_text(cms_plugin, self.request) self.assertEqual('', indexed_content) def test_plugin_indexing_can_be_disabled_on_plugin(self): NotIndexedPlugin.search_fulltext = False try: self.assertEqual('', self.index.get_plugin_search_text(self.get_plugin(), self.request)) finally: del NotIndexedPlugin.search_fulltext def test_page_title_is_indexed_using_prepare(self): """This tests the indexing path way used by update_index mgmt command""" from cms.api import create_page page = create_page(title="Whoopee", template="whee.html", language="en") from haystack import connections from haystack.constants import DEFAULT_ALIAS search_conn = connections[DEFAULT_ALIAS] unified_index = search_conn.get_unified_index() from cms.models import Title index = unified_index.get_index(Title) title = Title.objects.get(pk=page.title_set.all()[0].pk) index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias indexed = index.prepare(title) self.assertEqual('Whoopee', indexed['title']) self.assertEqual('Whoopee', indexed['text']) def test_page_title_is_indexed_using_update_object(self): """This tests the indexing path way used by the RealTimeSignalProcessor""" from cms.api import create_page page = create_page(title="Whoopee", template="whee.html", language="en") from haystack import connections from haystack.constants import DEFAULT_ALIAS search_conn = connections[DEFAULT_ALIAS] unified_index = search_conn.get_unified_index() from cms.models import Title index = unified_index.get_index(Title) title = Title.objects.get(pk=page.title_set.all()[0].pk) index.update_object(title, using=DEFAULT_ALIAS) indexed = index.prepared_data self.assertEqual('Whoopee', indexed['title']) self.assertEqual('Whoopee', indexed['text'])
Python
0.000001
@@ -67,56 +67,52 @@ ase%0A +%0A from -django.test.utils import override_settings%0A +cms.api import create_page, add_plugin %0Afro @@ -390,319 +390,8 @@ t%0A%0A%0A -test_settings = %7B%0A 'ALLOWED_HOSTS': %5B'localhost'%5D,%0A 'CMS_LANGUAGES': %7B1: %5B%7B'code': 'en', 'name': 'English'%7D%5D%7D,%0A 'CMS_TEMPLATES': ((%22whee.html%22, %22Whee Template%22),),%0A 'LANGUAGES': (('en', 'English'),),%0A 'LANGUAGE_CODE': 'en',%0A 'TEMPLATE_LOADERS': ('aldryn_search.tests.FakeTemplateLoader',),%0A%7D%0A%0A %0Acla @@ -858,44 +858,8 @@ )%0A%0A%0A -@override_settings(**test_settings)%0A clas @@ -2295,48 +2295,8 @@ %22%22%22%0A - from cms.api import create_page%0A @@ -2317,38 +2317,35 @@ ate_page(title=%22 -W ho -ope +m e%22, template=%22wh @@ -2334,35 +2334,35 @@ ome%22, template=%22 -whe +pag e.html%22, languag @@ -2856,38 +2856,35 @@ lf.assertEqual(' -W ho -ope +m e', indexed%5B'tit @@ -2907,38 +2907,35 @@ lf.assertEqual(' -W ho -ope +m e', indexed%5B'tex @@ -3089,48 +3089,8 @@ %22%22%22%0A - from cms.api import create_page%0A @@ -3119,22 +3119,19 @@ (title=%22 -W ho -ope +m e%22, temp @@ -3140,11 +3140,11 @@ te=%22 -whe +pag e.ht @@ -3155,33 +3155,32 @@ language=%22en%22)%0A -%0A from hay @@ -3625,38 +3625,35 @@ lf.assertEqual(' -W ho -ope +m e', indexed%5B'tit @@ -3688,14 +3688,11 @@ al(' -W ho -ope +m e',
bff7cf3d3cc2b636fb46a37b1edb60f7935e646e
Update consultants model
radar/radar/models/consultants.py
radar/radar/models/consultants.py
from sqlalchemy import Integer, Column, String from sqlalchemy.orm import relationship from radar.database import db class Consultant(db.Model): __tablename__ = 'consultants' id = Column(Integer, primary_key=True) first_name = Column(String, nullable=False) last_name = Column(String, nullable=False) organisation_consultants = relationship('OrganisationConsultant') @property def organisations(self): return [x.organisation for x in self.organisation_consultants]
Python
0
@@ -215,24 +215,67 @@ _key=True)%0A%0A + title = Column(String, nullable=False)%0A first_na @@ -356,16 +356,114 @@ e=False) +%0A email = Column(String)%0A telephone_number = Column(String)%0A gmc_number = Column(Integer) %0A%0A or
d650b8798f3f6cb02038206bf41d8e56d4cff420
Use the option index instead of 'Yes' or 'No'.
src/epiweb/apps/survey/example.py
src/epiweb/apps/survey/example.py
# -*- coding: utf-8 -*- from epiweb.apps.survey import definitions as d _ = lambda x: x class RepQ01(d.Question): question = _('Did you have one or more of the following symptoms since your last visit?') type = 'option-multiple' blank = True options = ( _('Runny nose'), _('Stuffy nose'), _('Hacking cough'), _('Dry cough'), _('Sneezing'), _('Sore throat'), _('Muscle pain'), _('Headache'), _('Chest pain'), _('Feeling exhausted'), _('Feeling tired'), _('Loss of appetite'), _('Nausea'), _('Vomiting'), _('Diarrhoea'), _('Watery, bloodshot eyes'), _('Chills and feverish feeling'), _('Coloured sputum'), ) class RepQ02(d.Question): question = _('When did these symptoms started?') type = 'date' class RepQ03(d.Question): question = _('Did you have fever? If yes, what was the highest temperature measured? Please estimate if you had fever, but did not measure.') type = 'option-single' options = ( _('No'), _('Less than 37°C'), _('37°C'), _('37° - 37.5°C'), _('37.5° - 38°C'), _('38°'), _('38.5°C'), _('38.5° - 39°C'), _('39° - 39.5°C'), _('39.5° - 40°C'), _('More than 40°C'), ) class RepQ04(d.Question): question = _('When was your temperature for the first time above 38°C?') type = 'date' class RepQ05(d.Question): question = _('Did these symptoms develop abruptly with sudden high fever or chills?') type = 'option-single' options = ( _('No'), _('Yes'), _("Don't know"), ) class RepQ06(d.Question): question = _('Did you consult a medical doctor for these symptoms?') type = 'option-single' options = ('No', 'Yes') class RepQ07(d.Question): question = _('Did you take medication for these symptoms?') type = 'option-single' options = ( _('Tamiflu, Relenza, or another anti viral drug'), _('Antibiotics'), _('Antipyretics'), _('Anti-inflammatory drugs'), _('Vitamins'), _('Other'), ) class RepQ08(d.Question): question = _('Did you change your occupations due to these symptoms?') type = 'option-single' options = ( _('No'), _('Yes, I staid at home'), _('Yes, but went to work/school as usual'), _('I staid at home, but was able to work'), ) class RepQ09(d.Question): question = _('How long did you staid at home?') type = 'option-single' options = ( _('1 day'), _('2 days'), _('3 days'), _('4 days'), _('5 days'), _('6 days'), _('1 week'), _('Less than 2 weeks'), _('Less than 3 weeks'), _('More than 3 weeks'), ) class RepQ10(d.Question): question = _('Do other people from your family/home have/had comparable symptoms?') type = 'option-single' options = ('No', 'Yes') class RepQ11(d.Question): question = _('According to our data you did not receive a seasonal flu vaccination?') type = 'option-single' options = ( _('Yes'), _('No, meanwhile I have received a seasonal flu vaccination'), ) class RepQ12(d.Question): question = _('According to our data you did not receive a Mexican flu vaccination?') type = 'option-single' options = ( _('Yes'), _('No, meanwhile I have received a Mexican flu vaccination'), ) class Survey(d.Survey): rules = ( RepQ01, { (RepQ01, 'is-not', d.Empty) : ( RepQ02, RepQ03, RepQ04, RepQ05, RepQ06, RepQ07, RepQ08, { (RepQ08, 'is-in', d.Items(1, 3)) : ( RepQ09 ) }, RepQ10, { (d.Profile('seasonal-flu-vaccine'), 'is-not', 'Yes') : ( RepQ11 ) }, { (d.Profile('mexican-flu-vaccine'), 'is-not', 'Yes') : ( RepQ12 ) }, ) } ) survey = Survey
Python
0.001779
@@ -3944,37 +3944,33 @@ ne'), 'is-not', -'Yes' +1 ) : (%0A @@ -4058,21 +4058,17 @@ s-not', -'Yes' +1 ) : (%0A
c26dc22512e9b36a497930df544891cd0d96310b
Remove useless function
utils.py
utils.py
import string import random from werkzeug.security import generate_password_hash, check_password_hash from flask import current_app as app, url_for, redirect, session from functools import wraps def hash_password(s): return generate_password_hash(s) def check_password(hashed_password, try_password): return check_password_hash(hashed_password, try_password) def generate_string(length): return "".join([random.choice(string.letters + string.digits) for x in range(length)]) def admins_only(f): @wraps(f) def decorated_function(*args, **kwargs): if "admin" not in session or not session['admin']: session.clear() return redirect(url_for("index")) return f(*args, **kwargs) return decorated_function def redirect_if_not_logged_in(f): @wraps(f) def decorated_function(*args, **kwargs): if "tid" not in session or "logged_in" not in session or not session["logged_in"]: return redirect(url_for("index")) return f(*args, **kwargs) return decorated_function def convert(dictionary): """Recursively converts dictionary keys to strings.""" if not isinstance(dictionary, dict): return dictionary return dict((str(k), convert(v)) for k, v in dictionary.items())
Python
0.000904
@@ -1059,237 +1059,4 @@ ion%0A -%0Adef convert(dictionary):%0A %22%22%22Recursively converts dictionary keys to strings.%22%22%22%0A if not isinstance(dictionary, dict):%0A return dictionary%0A return dict((str(k), convert(v))%0A for k, v in dictionary.items())%0A
8833c91a0292a9fe241d7f47d3a8aa387d23709f
Version up to 3.2.0
setup.py
setup.py
#!/usr/bin/env python """ Sentry ====== Sentry is a realtime event logging and aggregation platform. It specializes in monitoring errors and extracting all the information needed to do a proper post-mortem without any of the hassle of the standard user feedback loop. Sentry is a Server ------------------ The Sentry package, at its core, is just a simple server and web UI. It will handle authentication clients (such as `Raven <https://github.com/dcramer/raven>`_) and all of the logic behind storage and aggregation. That said, Sentry is not limited to Python. The primary implementation is in Python, but it contains a full API for sending events from any language, in any application. :copyright: (c) 2011-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from setuptools import setup, find_packages tests_require = [ 'nose==1.1.2', 'django-nose==0.1.3', ] install_requires = [ 'cssutils>=0.9.9', 'BeautifulSoup>=3.2.1', 'Django>=1.2,<1.4', 'django-indexer>=0.3.0', 'django-paging>=0.2.4', 'django-templatetag-sugar>=0.1.0', 'eventlet>=0.9.15', 'kombu>=2.1.0,<3.0', 'gunicorn>=0.13.4', 'logan>=0.2.1', 'pynliner>=0.4.0', 'pytz>=2011n', 'raven>=1.4.4', 'South>=0.7', ] setup( name='sentry', version='3.1.4', author='David Cramer', author_email='dcramer@gmail.com', url='http://github.com/dcramer/sentry', description='A realtime logging an aggregation server.', long_description=__doc__, packages=find_packages(exclude=['tests']), zip_safe=False, install_requires=install_requires, tests_require=tests_require, extras_require={'test': tests_require}, test_suite='runtests.runtests', license='BSD', include_package_data=True, entry_points={ 'console_scripts': [ 'sentry = sentry.utils.runner:main', ], }, classifiers=[ 'Framework :: Django', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: OS Independent', 'Topic :: Software Development' ], )
Python
0.000001
@@ -1334,19 +1334,19 @@ sion='3. -1.4 +2.0 ',%0A a
c077fb38cc01e38418adf3bf1281074ed6fa80cd
allow empty error messages
utils.py
utils.py
import datetime import pickle # extjs special encoder from django.http import Http404, HttpResponse, HttpResponseRedirect def set_cookie(response, key, value, days_expire = 7): if days_expire is None: max_age = 365*24*60*60 #one year else: max_age = days_expire*24*60*60 expires = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT") response.set_cookie(key, value, max_age=max_age, expires=expires) return response def set_pickle_cookie(response, key, value, days_expire = 7): if days_expire is None: max_age = 365*24*60*60 #one year else: max_age = days_expire*24*60*60 value = pickle.dumps(value) expires = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT") response.set_cookie(key, value, max_age=max_age, expires=expires) return response return pickle.loads(value) def get_pickle_cookie(request, key): value = request.COOKIES.get(key) if value: try: value = pickle.loads(value) except: print ' * ERROR unpickling cookie %s' % key value = None return value def get_cookie(request, key): return request.COOKIES.get(key) def DateFormatConverter(to_extjs = None, to_python = None): """ convert date formats between ext and python """ f = {} f['a'] = 'D' f['A'] = 'l' f['b'] = 'M' f['B'] = 'F' #f['c'] = f['d'] = 'd' f['H'] = 'H' f['I'] = 'h' f['j'] = 'z' f['m'] = 'm' f['M'] = 'i' f['p'] = 'A' f['S'] = 's' f['U'] = 'W' #f['w'] = f['W'] = 'W' #f['x'] = #f['X'] = f['y'] = 'y' f['Y'] = 'Y' f['Z'] = 'T' out = '' if to_extjs: for char in to_extjs.replace('%',''): out += f.get(char, char) elif to_python: for char in to_python: if char in f.values(): key = [key for key, val in f.items() if f[key] == char][0] out += '%%%s' % key else: out += char return out def JsonResponse(contents, status=200): return HttpResponse(contents, mimetype='text/javascript', status=status) def JsonSuccess(params = {}): d = {"success":True} d.update(params) return JsonResponse(JSONserialise(d)) def JsonError(error): return JsonResponse('{"success":false, "msg":"%s"}' % JsonCleanstr(error)) def JSONserialise(obj, sep = '"', escapeStrings = True): import decimal from django.db import models if type(obj)==type({}): return JSONserialise_dict(obj) elif type(obj)==type(True): return obj and "true" or "false" elif type(obj)==type([]): # if len(obj) > 50: # print '*********', 'list', len(obj), type(obj) return "[%s]" % ','.join(map(JSONserialise, obj)) # data = [] # for item in obj: # data.append(JSONserialise(item)) # return "[%s]" % ",".join(data) elif type(obj) in [type(0), type(0.0), long, decimal.Decimal]: return '%s' % obj elif type(obj) in [datetime.datetime , datetime.date]: return u'%s%s%s' % (sep, obj, sep) elif type(obj) in [type(''), type(u'')] or isinstance(obj, models.Model): #print obj, isinstance(obj, str), isinstance(obj, unicode) if obj == "False": return "false" elif obj == "True": return "true" else: if escapeStrings: return u'%s%s%s' % (sep, JsonCleanstr(obj), sep) else: return u'%s%s%s' % (sep, obj, sep) elif not obj: return u'%s%s' % (sep, sep) else: print 'JSONserialise unknown type', obj, type(obj), obj.__class__.__name__, isinstance(obj, models.Model) return u'%s' % obj return None def JSONserialise_dict_item(key, value, sep = '"'): # quote the value except for ExtJs keywords if key in ['renderer', 'editor', 'hidden', 'sortable', 'sortInfo', 'listeners', 'view', 'failure', 'success','scope', 'fn','store','handler']: if u'%s' % value in ['True', 'False']: value = str(value).lower() else: # doint escape strings inside these special values (eg; store data) value = JSONserialise(value, sep='', escapeStrings = False) return '"%s":%s' % (key, value) else: value = JSONserialise(value, sep) return '"%s":%s' % (key, value) def JSONserialise_dict(inDict): data=[] for key in inDict.keys(): # skip quotes for ExtJs reserved names data.append(JSONserialise_dict_item(key, inDict[key])) #if key in ['store', 'listeners', 'fn', 'handler', 'failure', 'success', 'scope']: # val = inDict[key] # if u'%s' % val in ['True', 'False']: # val = str(val).lower() #else: # val = JSONserialise(inDict[key]) #data.append('%s:%s' % (key,val)) data = ",".join(data) return "{%s}" % data def JsonCleanstr(inval): try: inval = u'%s' % inval except: print "ERROR nunicoding %s" % inval pass return inval.replace('"','\\"').replace('\n','\\n') #.replace('\r','-')
Python
0.000003
@@ -2470,16 +2470,21 @@ or(error + = '' ):%0A r
3450712ec629c1720b6a6af28835d95a91b8fce7
Use classifiers to specify the license.
setup.py
setup.py
#!/usr/bin/python import os import re from setuptools import setup from m2r import parse_from_file import restructuredtext_lint # Parser README.md into reStructuredText format rst_readme = parse_from_file('README.md') # Validate the README, checking for errors errors = restructuredtext_lint.lint(rst_readme) # Raise an exception for any errors found if errors: print(rst_readme) raise ValueError('README.md contains errors: ', ', '.join([e.message for e in errors])) # Attempt to get version number from TravisCI environment variable version = os.environ.get('TRAVIS_TAG', default='0.0.0') # Remove leading 'v' version = re.sub('^v', '', version) setup( name='anybadge', description='Simple, flexible badge generator for project badges.', long_description=rst_readme, version=version, author='Jon Grace-Cox', author_email='jongracecox@gmail.com', py_modules=['anybadge', 'anybadge_server'], setup_requires=['setuptools', 'wheel'], tests_require=['unittest'], install_requires=[], data_files=[], options={ 'bdist_wheel': {'universal': True} }, url='https://github.com/jongracecox/anybadge', entry_points={ 'console_scripts': ['anybadge=anybadge:main', 'anybadge-server=anybadge_server:main'], } )
Python
0
@@ -1330,11 +1330,85 @@ %5D,%0A %7D +,%0A classifiers=%5B%0A 'License :: OSI Approved :: MIT License'%0A %5D %0A)%0A
22cb94902f5bbe32d636009c2599eae7aa66282c
fix extraction(closes #4319)
youtube_dl/extractor/stretchinternet.py
youtube_dl/extractor/stretchinternet.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import int_or_none class StretchInternetIE(InfoExtractor): _VALID_URL = r'https?://portal\.stretchinternet\.com/[^/]+/portal\.htm\?.*?\beventId=(?P<id>\d+)' _TEST = { 'url': 'https://portal.stretchinternet.com/umary/portal.htm?eventId=313900&streamType=video', 'info_dict': { 'id': '313900', 'ext': 'mp4', 'title': 'Augustana (S.D.) Baseball vs University of Mary', 'description': 'md5:7578478614aae3bdd4a90f578f787438', 'timestamp': 1490468400, 'upload_date': '20170325', } } def _real_extract(self, url): video_id = self._match_id(url) stream = self._download_json( 'https://neo-client.stretchinternet.com/streamservice/v1/media/stream/v%s' % video_id, video_id) video_url = 'https://%s' % stream['source'] event = self._download_json( 'https://neo-client.stretchinternet.com/portal-ws/getEvent.json', video_id, query={ 'clientID': 99997, 'eventID': video_id, 'token': 'asdf', })['event'] title = event.get('title') or event['mobileTitle'] description = event.get('customText') timestamp = int_or_none(event.get('longtime')) return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'url': video_url, }
Python
0
@@ -205,22 +205,31 @@ m/%5B%5E/%5D+/ +(?: portal +%7Cfull) %5C.htm%5C?. @@ -343,22 +343,22 @@ eventId= -313900 +573272 &streamT @@ -415,14 +415,14 @@ ': ' -313900 +573272 ',%0A @@ -472,122 +472,51 @@ ': ' -Augustana (S.D.) Baseball vs University of Mary',%0A 'description': 'md5:7578478614aae3bdd4a90f578f787438 +University of Mary Wrestling vs. Upper Iowa ',%0A @@ -544,17 +544,17 @@ ': 1 -490468400 +575668361 ,%0A @@ -586,13 +586,13 @@ '201 -70325 +91206 ',%0A @@ -689,22 +689,21 @@ -stream +event = self. @@ -735,34 +735,27 @@ 'https:// -neo-client +api .stretchinte @@ -767,274 +767,81 @@ com/ -s tr -eamservice/v1/media/stream/v%25s'%0A %25 video_id, video_id)%0A%0A video_url = 'https://%25s' %25 stream%5B'source'%5D%0A%0A event = self._download_json(%0A 'https://neo-client.stretchinternet.com/portal-ws/getEvent.json',%0A video_id, query= +inity/event/tcg/' + video_id,%0A video_id)%5B0%5D%0A%0A return %7B%0A @@ -854,55 +854,11 @@ - - 'clientID': 99997,%0A 'eventID +'id ': v @@ -882,99 +882,24 @@ - - 't -oken': 'asdf',%0A %7D)%5B'event'%5D%0A%0A title = event.get('title') or +itle': event%5B' mobi @@ -898,21 +898,16 @@ nt%5B' -mobileT +t itle'%5D +, %0A @@ -915,54 +915,13 @@ -description = event.get('customText')%0A +' time @@ -925,18 +925,18 @@ imestamp - = +': int_or_ @@ -955,198 +955,86 @@ et(' -longtime'))%0A%0A return %7B%0A 'id': video_id,%0A 'title': title,%0A 'description': description,%0A 'timestamp': timestamp,%0A 'url': video_url +dateCreated'), 1000),%0A 'url': 'https://' + event%5B'media'%5D%5B0%5D%5B'url'%5D ,%0A
8c9ea5084d85ee26ce7ce04cb75c96edda1979e3
fix gust being a peice of json
alexBot/cogs/flight.py
alexBot/cogs/flight.py
from datetime import datetime, timezone import logging import aiohttp import discord import humanize from discord.ext import commands log = logging.getLogger(__name__) from alexBot.tools import Cog, get_json, get_xml class Flight(Cog): @commands.command() async def metar(self, ctx: commands.Context, *, stations): """ returns the METAR for a given station or set of stations. also works with cord pairs (40.38,-73.46) """ await ctx.trigger_typing() station = stations.upper() try: data = await get_json(self.bot.session, f'https://avwx.rest/api/metar/{station}' f'?options=info,speech,translate' f'&onfail=cache' f'&token={self.bot.config.avwx_token}') if data is None: raise commands.BadArgument('It Appears that station doesnt have METAR data available.') except aiohttp.ClientResponseError: return await ctx.send(f"something happened. try again?") if 'note' in data or 'Note' in data: try: await self.bot.get_channel(384087096735956995).send(data['note']) except: pass if 'error' in data or 'Error' in data: try: e = data['help'] except KeyError: try: e = data['Help'] except KeyError: try: e = data['error'] except KeyError: e = data['Error'] raise commands.errors.BadArgument(e) embed = discord.Embed() now = datetime.utcnow() try: report_time = datetime.strptime(data['time']['dt'], "%Y-%m-%dT%H:%M:%SZ") except ValueError: report_time = datetime.strptime(data['time']['dt'].replace(':00Z', '00Z'), "%Y-%m-%dT%H:%M:%S%zZ") # '2019-10-13T20:51:00+00:00Z' now = datetime.now(tz=timezone.utc) # report_time = report_time.replace(year=now.year, month=now.month) # this will fail around end of month/year embed.set_footer(text=f"report {humanize.naturaldelta(report_time - now)} old, " f"please only use this data for planning purposes.") info = data['info'] magdec = "" if data['wind_direction'] not in ['VRB', '',] and self.bot.config.government_is_working: magdec = await get_xml(ctx.bot.session, f"https://www.ngdc.noaa.gov/geomag-web/calculators/calculateDeclination" f"?lat1={info['latitude']}&lon1={info['longitude']}&resultFormat=xml") magdec = float(magdec['maggridresult']['result']['declination']['#text']) magdec = magdec + int(data['wind_direction']['value']) # add the magdec to the direction of the wind if magdec > 360: # if the declaration ends up being more than 360, subtract the extra. magdec = magdec - 360 elif magdec < 0: # same as above, but for less than 0 condition. magdec = magdec + 360 log.debug('magdec fail') # magdec = "" color = data['flight_rules'] if color == "VFR": color = discord.Color.green() elif color == "MVFR": color = discord.Color.blue() elif color == "IFR": color = discord.Color.red() elif color == "LIFR": color = discord.Color.magenta() else: color = discord.Color.default() embed.colour = color try: if info['city'] == '': city = None else: city = info['city'] if info['state'] == '': state = None else: state = info['state'] if info['country'] == '': country = None else: country = info['country'] except KeyError: city = None state = None country = None try: if info['Name'] == '': embed.title = station else: embed.title = info['name'] except KeyError: embed.title = station if city is not None: embed.title += f", {city}" if state is not None: embed.title += f", {state}" if country is not None: embed.title += f", {country}" embed.title = f"{embed.title} ({station.split()[0]})" embed.add_field(name="Raw", value=data['raw'], inline=False) embed.add_field(name="Readable", value=data['speech'], inline=False) translations = data['translate'] translations['clouds'] = translations['clouds'].replace(', ', '\n') if translations['clouds'] != "": embed.add_field(name="Clouds", value=translations['clouds'], inline=False) if translations['wind'] != "": if magdec != "": if data['wind_gust'] is not None: embed.add_field(name="Wind", value=f"{data['wind_direction']['repr']}@{data['wind_speed']['repr']}" f"G{data['wind_gust']}(True)\n" f"{magdec:0f}@{data['wind_speed']['repr']}G{data['wind_gust']}" f" (with Variation") else: embed.add_field(name="Wind", value=f"{data['wind_direction']['repr']}@{data['wind_speed']['repr']} (True)\n " f"{magdec:.0f}@{data['wind_speed']['repr']} (with variation)") else: embed.add_field(name="Wind", value=translations['wind'], inline=False) if translations['altimeter'] != "": embed.add_field(name="Altimeter", value=translations['altimeter'], inline=False) if translations['temperature'] != "": embed.add_field(name="Temperature", value=translations['temperature'], inline=False) if data['flight_rules'] != "": embed.add_field(name="Flight Rule", value=data['flight_rules'], inline=False) if translations['visibility'] != "": embed.add_field(name="Visibility", value=translations['visibility'], inline=False) embed.timestamp = report_time if color == discord.Color.red() or color == discord.Color.magenta(): await ctx.send('you might want to reconsider flying.', embed=embed) else: await ctx.send(embed=embed) def setup(bot): bot.add_cog(Flight(bot))
Python
0.000004
@@ -5280,16 +5280,24 @@ d_gust'%5D +%5B'repr'%5D %7D(True)%5C @@ -5380,16 +5380,24 @@ d_gust'%5D +%5B'repr'%5D %7D%22%0A
2b8535c34d92089fe84203f1f06e82472397eaea
Update version number
core/context_processors.py
core/context_processors.py
from django.conf import settings def common(request=None): return {'logo_url': settings.LOGO_URL, 'parent_site_url': settings.PARENT_SITE_URL, 'version': '1.3', 'GOOGLE_API_KEY': settings.GOOGLE_API_KEY, 'demo_mode': settings.DEMO}
Python
0.000002
@@ -183,9 +183,9 @@ '1. -3 +4 ',%0A
62ccee03efd3fb5d53139f89ae974708d3a82e32
Add switches for cProfiling and verbosity output
tests/example_peninsula.py
tests/example_peninsula.py
from parcels import NEMOGrid, Particle, ParticleSet from argparse import ArgumentParser def pensinsula_example(filename, npart, degree=3): """Example configuration of particle flow around an idealised Peninsula :arg filename: Basename of the input grid file set :arg npart: Number of particles to intialise""" # Open grid file set grid = NEMOGrid(filename, degree=degree) # Initialise particles pset = ParticleSet(npart, grid) for p in range(npart): lat = p * grid.lat_u.valid_max / npart + 0.45 / 1.852 / 60. pset.add_particle(Particle(lon=3 / 1.852 / 60., lat=lat)) print "Initial particle positions:" for p in pset._particles: print p # Advect the particles for 24h time = 86400. dt = 36. timesteps = int(time / dt) pset.advect(timesteps=timesteps, dt=dt) print "Final particle positions:" for p in pset._particles: print p if __name__ == "__main__": p = ArgumentParser(description=""" Example of particle advection around an idealised peninsula""") p.add_argument('-p', '--particles', type=int, default=20, help='Number of particles to advect') p.add_argument('-d', '--degree', type=int, default=3, help='Degree of spatial interpolation') args = p.parse_args() pensinsula_example('peninsula', args.particles, degree=args.degree)
Python
0
@@ -131,16 +131,31 @@ degree=3 +, verbose=False ):%0A %22 @@ -630,16 +630,36 @@ =lat))%0A%0A + if verbose:%0A prin @@ -682,32 +682,36 @@ cle positions:%22%0A + for p in pse @@ -716,32 +716,36 @@ set._particles:%0A + print p%0A @@ -887,16 +887,36 @@ dt=dt)%0A%0A + if verbose:%0A prin @@ -945,24 +945,28 @@ tions:%22%0A + + for p in pse @@ -971,32 +971,36 @@ set._particles:%0A + print p%0A @@ -1366,16 +1366,306 @@ ation')%0A + p.add_argument('-v', '--verbose', action='store_true', default=False,%0A help='Print particle information before and after execution')%0A p.add_argument('--profiling', action='store_true', default=False,%0A help='Print profiling information after run')%0A args @@ -1686,76 +1686,470 @@ s()%0A +%0A -pensinsula_example('peninsula', args.particles, degree=args.degre +if args.profiling:%0A from cProfile import runctx%0A from pstats import Stats%0A runctx(%22pensinsula_example('peninsula', args.particles, degree=args.degree, verbose=args.verbose)%22,%0A globals(), locals(), %22Profile.prof%22)%0A Stats(%22Profile.prof%22).strip_dirs().sort_stats(%22time%22).print_stats(10)%0A else:%0A pensinsula_example('peninsula', args.particles, degree=args.degree,%0A verbose=args.verbos e)%0A
2de9eab0a0ed390b6c13d63ba4beaaa70fe8c195
Normalize matrix log command docstring quotation mark type
Discord/cogs/matrix.py
Discord/cogs/matrix.py
from discord.ext import commands import ast import numpy import scipy from utilities import checks async def setup(bot): await bot.add_cog(Matrix()) class Matrix(commands.Cog): # TODO: move to converters file class Matrix(commands.Converter): async def convert(self, ctx, argument): try: return ast.literal_eval(argument) except SyntaxError: raise commands.BadArgument("Syntax Error") # TODO: check matrix async def cog_check(self, ctx): return await checks.not_forbidden().predicate(ctx) @commands.group( aliases = ["matrices"], case_insensitive = True, invoke_without_command = True ) async def matrix(self, ctx): """ Matrix operations Input matrices as a list of lists (array of arrays) e.g.: [[1,2],[3,4]] """ await ctx.send_help(ctx.command) @matrix.command(aliases = ["addition", "plus", '+']) async def add(self, ctx, matrix_a: Matrix, matrix_b: Matrix): """Add two matrices""" # TODO: unlimited number? await ctx.embed_reply( str(numpy.matrix(matrix_a) + numpy.matrix(matrix_b)) ) @matrix.group( aliases = ["cosine"], case_insensitive = True, invoke_without_command = True ) async def cos(self, ctx, *, matrix: Matrix): """Cosine of a matrix""" await ctx.embed_reply(str(scipy.linalg.cosm(matrix))) @cos.command(name = "hyperbolic", aliases = ['h']) async def cos_hyperbolic(self, ctx, *, matrix: Matrix): """Hyperbolic cosine of a matrix""" await ctx.embed_reply(str(scipy.linalg.coshm(matrix))) @matrix.command() async def determinant(self, ctx, *, matrix: Matrix): """Determinant of a matrix""" await ctx.embed_reply(scipy.linalg.det(matrix)) @matrix.command(aliases = ["division", '/']) async def divide(self, ctx, matrix_a: Matrix, matrix_b: Matrix): """Divide two matrices""" await ctx.embed_reply( str(numpy.matrix(matrix_a) / numpy.matrix(matrix_b)) ) @matrix.command(naliases = ["exponential"]) async def exp(self, ctx, matrix: Matrix): """Compute the matrix exponential using Pade approximation""" await ctx.embed_reply(str(scipy.linalg.expm(matrix))) @matrix.command() async def inverse(self, ctx, *, matrix: Matrix): """Inverse of a matrix""" await ctx.embed_reply(str(numpy.matrix(matrix).I)) @matrix.command(aliases = ["logarithm"]) async def log(self, ctx, *, matrix: Matrix): '''Compute matrix logarithm''' await ctx.embed_reply(str(scipy.linalg.logm(matrix))) @matrix.command() async def lu(self, ctx, *, matrix: Matrix): '''LU decomposition of a matrix''' p, l, u = scipy.linalg.lu(matrix) await ctx.embed_reply(fields = (("P", p), ("L", l), ("U", u))) @matrix.group(aliases = ["times", '*'], invoke_without_command = True, case_insensitive = True) async def multiply(self, ctx, matrix_a: Matrix, matrix_b: Matrix): '''Multiply two matrices''' await ctx.embed_reply(str(numpy.matrix(matrix_a) * numpy.matrix(matrix_b))) @multiply.command(name = "scalar") async def multiply_scalar(self, ctx, matrix: Matrix, scalar: float): '''Multiply a matrix by a scalar''' await ctx.embed_reply(str(numpy.matrix(matrix) * scalar)) @matrix.command(aliases = ['^', "**"]) async def power(self, ctx, matrix: Matrix, power: int): '''Raise a matrix to a power''' try: await ctx.embed_reply(str(numpy.matrix(matrix) ** power)) except ValueError as e: # not square matrix await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: {e}") @matrix.command() async def rank(self, ctx, matrix: Matrix): '''Rank of a matrix''' await ctx.embed_reply(numpy.linalg.matrix_rank(matrix)) @matrix.command() async def sign(self, ctx, matrix: Matrix): '''Matrix sign function''' await ctx.embed_reply(str(scipy.linalg.signm(matrix))) @matrix.group(aliases = ["sine"], invoke_without_command = True, case_insensitive = True) async def sin(self, ctx, *, matrix: Matrix): '''Sine of a matrix''' await ctx.embed_reply(str(scipy.linalg.sinm(matrix))) @sin.command(name = "hyperbolic", aliases = ['h']) async def sin_hyperbolic(self, ctx, *, matrix: Matrix): '''Hyperbolic sine of a matrix''' await ctx.embed_reply(str(scipy.linalg.sinhm(matrix))) @matrix.command(aliases = ["squareroot", "square_root", '√']) async def sqrt(self, ctx, *, matrix: Matrix): '''Square root of a matrix''' await ctx.embed_reply(str(scipy.linalg.sqrtm(matrix))) @matrix.command(aliases = ["subtraction", "minus", '-']) async def subtract(self, ctx, matrix_a: Matrix, matrix_b: Matrix): '''Subtract two matrices''' await ctx.embed_reply(str(numpy.matrix(matrix_a) - numpy.matrix(matrix_b))) @matrix.group(aliases = ["tangent"], invoke_without_command = True, case_insensitive = True) async def tan(self, ctx, *, matrix: Matrix): '''Tangent of a matrix''' await ctx.embed_reply(str(scipy.linalg.tanm(matrix))) @tan.command(name = "hyperbolic", aliases = ['h']) async def tan_hyperbolic(self, ctx, *, matrix: Matrix): '''Hyperbolic tangent of a matrix''' await ctx.embed_reply(str(scipy.linalg.tanhm(matrix))) @matrix.group(aliases = ["transposition"], invoke_without_command = True, case_insensitive = True) async def transpose(self, ctx, *, matrix: Matrix): '''Transpose of a matrix''' await ctx.embed_reply(str(numpy.matrix(matrix).T)) @transpose.command(name = "conjugate") async def transpose_conjugate(self, ctx, *, matrix: Matrix): '''Conjugate trasponse of a matrix''' await ctx.embed_reply(str(numpy.matrix(matrix).H))
Python
0.000001
@@ -2327,19 +2327,19 @@ rix):%0A%09%09 -''' +%22%22%22 Compute @@ -2354,19 +2354,19 @@ ogarithm -''' +%22%22%22 %0A%09%09await
d5ed783c7dc691d7d0b847aa243989b626d90e9b
Add return None
alg_decimal_to_base.py
alg_decimal_to_base.py
from __future__ import print_function from ds_stack import Stack def convert_decimal_to_base2(dec_num): """Convert decimal number to binary number.""" rem_stack = Stack() while dec_num > 0: rem = dec_num % 2 rem_stack.push(rem) dec_num = dec_num // 2 bin_str = '' while not rem_stack.is_empty(): bin_str = bin_str + str(rem_stack.pop()) return bin_str def convert_decimal_to_base(dec_num, base): """Convert decimal number to any base.""" rem_stack = Stack() digits = '0123456789ABCDEF' while dec_num > 0: rem = dec_num % base rem_stack.push(rem) dec_num = dec_num // base bin_str = '' while not rem_stack.is_empty(): bin_str = bin_str + digits[rem_stack.pop()] return bin_str def _recur_decimal_to_base(dec_num, base, rem_stack): digits = '0123456789ABCDEF' if dec_num < base: rem_stack.push(digits[dec_num]) else: rem_stack.push(digits[dec_num % base]) _recur_decimal_to_base( dec_num // base, base, rem_stack) def convert_decimal_to_base_by_recur(dec_num, base): """Convert decimal number to any base by recussion with Stack.""" rem_stack = Stack() _recur_decimal_to_base(dec_num, base, rem_stack) bin_str = '' while not rem_stack.is_empty(): bin_str = bin_str + rem_stack.pop() return bin_str def main(): dec_num = 1024 print('Convert {} to base 2: {}' .format(dec_num, convert_decimal_to_base2(dec_num))) dec_num = 233 print('Convert {} to base 2: {}' .format(dec_num, convert_decimal_to_base2(dec_num))) print('Convert {} to base 8: {}' .format(dec_num, convert_decimal_to_base(dec_num, 8))) print('Convert {} to base 16: {}' .format(dec_num, convert_decimal_to_base(dec_num, 16))) print('Convert {} to base 16: {}' .format(dec_num, convert_decimal_to_base_by_recur(dec_num, 16))) if __name__ == '__main__': main()
Python
0.999999
@@ -1086,16 +1086,37 @@ m_stack) +%0A %0A return None %0A%0Adef co @@ -1309,17 +1309,16 @@ _stack)%0A -%0A bin_ @@ -1406,17 +1406,16 @@ k.pop()%0A -%0A retu
0205e519c2662bf33b59e20668f90a17a50c29e1
Add github URL to setup.py
setup.py
setup.py
# Copyright 2020 The ML Collections Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python 3 """Setup for pip package.""" from setuptools import find_namespace_packages from setuptools import setup def _parse_requirements(requirements_txt_path): with open(requirements_txt_path) as fp: return fp.read().splitlines() _VERSION = '0.1.0' setup( name='ml_collections', version=_VERSION, author='ML Collections Authors', author_email='ml-collections@google.com', description='ML Collections is a library of Python collections designed for ML usecases.', long_description=open('README.md').read(), long_description_content_type='text/markdown', # TODO(mohitreddy): Uncomment once private repo is created. # url='https://github.com/google/ml_collections', license='Apache 2.0', # Contained modules and scripts. packages=find_namespace_packages(exclude=['*_test.py']), install_requires=_parse_requirements('requirements.txt'), tests_require=_parse_requirements('requirements-test.txt'), # TODO(mohitreddy): Double check python versions supported. python_requires='>=2.6', include_package_data=True, zip_safe=False, # PyPI package information. classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python' 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
Python
0.000001
@@ -1199,74 +1199,8 @@ %0A - # TODO(mohitreddy): Uncomment once private repo is created.%0A # url @@ -1498,72 +1498,8 @@ '),%0A - # TODO(mohitreddy): Double check python versions supported.%0A
3419b45d481be416d30abfecdadb26e144bcbcb7
Correct spelling of template
aiohttp_admin/admin.py
aiohttp_admin/admin.py
from aiohttp_jinja2 import render_template from aiohttp_security import remember, forget from yarl import URL from .consts import TEMPLATE_APP_KEY from .exceptions import JsonValidaitonError from .security import authorize from .utils import json_response, validate_payload, LoginForm __all__ = ['AdminHandler', 'setup_admin_handlers'] class AdminHandler: def __init__(self, admin, *, resources, name=None, template=None, loop): self._admin = admin self._loop = loop self._name = name or 'aiohttp_admin' self._temalate = template or 'admin.html' self._login_template = 'login.html' for r in resources: r.setup(self._admin, URL('/')) self._resources = tuple(resources) @property def template(self): return self._temalate @property def name(self): return self._name @property def resources(self): return self._resources async def index_page(self, request): t = self._temalate context = {'name': self._name} return render_template(t, request, context, app_key=TEMPLATE_APP_KEY) async def login_page(self, request): t = self._login_template context = {} return render_template(t, request, context, app_key=TEMPLATE_APP_KEY) async def token(self, request): raw_payload = await request.read() data = validate_payload(raw_payload, LoginForm) await authorize(request, data['username'], data['password']) router = request.app.router location = router["admin.index"].url() payload = {"location": location} response = json_response(payload) await remember(request, response, data['username']) return response async def logout(self, request): if "Authorization" not in request.headers: msg = "Auth header is not present, can not destroy token" raise JsonValidaitonError(msg) router = request.app.router location = router["admin.login"].url() payload = {"location": location} response = json_response(payload) await forget(request, response) return response def setup_admin_handlers(admin, admin_handler, static_folder, admin_conf_path): add_route = admin.router.add_route add_static = admin.router.add_static a = admin_handler add_route('GET', '', a.index_page, name='admin.index') add_route('GET', '/login', a.login_page, name='admin.login') add_route('POST', '/token', a.token, name='admin.token') add_route('DELETE', '/logout', a.logout, name='admin.logout') add_static('/static', path=static_folder, name='admin.static') add_static('/config', path=admin_conf_path, name='admin.config')
Python
0.000085
@@ -544,25 +544,25 @@ self._tem -a +p late = templ @@ -799,25 +799,25 @@ rn self._tem -a +p late%0A%0A @p @@ -1004,17 +1004,17 @@ elf._tem -a +p late%0A
f2b796b94ea1cd9c71500521404ef39d10ca091d
improve to_big_endian_binary function
utils.py
utils.py
from binascii import unhexlify def to_big_endian_binary(val): # one (1) hex digit per four (4) bits width = val.bit_length() # unhexlify wants an even multiple of eight (8) bits, but we don't # want more digits than we need (hence the ternary-ish 'or') width += 8 - ((width % 8) or 8) # format width specifier: four (4) bits per hex digit fmt = '%%0%dx' % (width // 4) # prepend zero (0) to the width, to zero-pad the output return unhexlify(fmt % val)
Python
0.998572
@@ -1,36 +1,4 @@ -from binascii import unhexlify%0A%0A def @@ -32,430 +32,85 @@ -# one (1) hex digit per four (4) bits%0A width = val.bit_length()%0A%0A # unhexlify wants an even multiple of eight (8) bits, but we don't%0A # want more digits than we need (hence the ternary-ish 'or')%0A width += 8 - ((width %25 8) or 8)%0A%0A # format width specifier: four (4) bits per hex digit%0A fmt = '%25%250%25dx' %25 (width // 4)%0A%0A # prepend zero (0) to the width, to zero-pad the output%0A return unhexlify(fmt %25 val +s = '%25x' %25 val%0A if len(s) & 1:%0A s = '0' + s%0A return s.decode('hex' )%0A
4d85b334298bcfc58c9bfd2bdfae123302caa48e
Bump coveralls from 2.1.0 to 2.1.1 (#18)
setup.py
setup.py
#!/usr/bin/env python3 from os import path from setuptools import setup, find_packages from ogn.client.settings import PACKAGE_VERSION here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='ogn-client', version=PACKAGE_VERSION, description='A python module for the Open Glider Network', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/glidernet/python-ogn-client', author='Konstantin Gründger aka Meisterschueler, Fabian P. Schmidt aka kerel', author_email='kerel-fs@gmx.de', license='AGPLv3', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: GIS', 'License :: OSI Approved :: GNU Affero General Public License v3', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9' ], keywords='gliding ogn', packages=['ogn.{}'.format(package) for package in find_packages(where='ogn')], python_requires='>=3', install_requires=[], extras_require={ 'dev': [ 'nose==1.3.7', 'coveralls==2.1.0', 'flake8==3.8.3' ] }, zip_safe=False )
Python
0
@@ -1576,9 +1576,9 @@ 2.1. -0 +1 ',%0A
8fc2e0ebf9fe8f753f7e9cdc6ad67ed22604e022
support img src attr too
interlinks/interlinks.py
interlinks/interlinks.py
# -*- coding: utf-8 -*- """ Interlinks ========================= This plugin allows you to include "interwiki" or shortcuts links into the blog, as keyword>rest_of_url """ from bs4 import BeautifulSoup from pelican import signals import re interlinks = {} def getSettings (generator): global interlinks interlinks = {'this': generator.settings['SITEURL']+"/"} if 'INTERLINKS' in generator.settings: for key, value in generator.settings['INTERLINKS'].items(): interlinks[key] = value def content_object_init(instance): if instance._content is not None: content = instance._content # use Python's built-in parser so no duplicated html & body tags appear, or use tag.unwrap() text = BeautifulSoup(content, "html.parser") if 'a' in content: for link in text.find_all(href=re.compile("(.+?)>")): url = link.get('href') m = re.search(r"(.+?)>", url).groups() name = m[0] if name in interlinks: hi = url.replace(name+">",interlinks[name]) link['href'] = hi instance._content = text.decode() def register(): signals.generator_init.connect(getSettings) signals.content_object_init.connect(content_object_init)
Python
0
@@ -1002,16 +1002,282 @@ f'%5D = hi +%0A%09%09if 'img' in content:%0A%09%09%09for img in text.find_all('img', src=re.compile(%22(.+?)%3E%22)):%0A%09%09%09%09url = img.get('src')%0A%09%09%09%09m = re.search(r%22(.+?)%3E%22, url).groups()%0A%09%09%09%09name = m%5B0%5D%0A%09%09%09%09if name in interlinks:%0A%09%09%09%09%09hi = url.replace(name+%22%3E%22,interlinks%5Bname%5D)%0A%09%09%09%09%09img%5B'src'%5D = hi %0A%0A%09%09inst
24f93c560c2fa19c512d2d88b8e1219690e2db68
Bump the version up to 0.8 for release
setup.py
setup.py
#!/usr/bin/env python ## Copyright 2014 Cognitect. All Rights Reserved. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS-IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. from setuptools import setup, find_packages import subprocess revision = subprocess.check_output("./bin/revision") setup(name="transit-python", version="0.1."+revision, description="Transit marshalling for Python", author="Cognitect", url="https://github.com/cognitect/transit-python", packages=find_packages(), install_requires=["python-dateutil", "msgpack-python"])
Python
0.000084
@@ -788,17 +788,17 @@ sion=%220. -1 +8 .%22+revis
f6a2a21d91e06d417da8cd93fb2a28f28385ed19
fix test
tests/test_coding/test_algos.py
tests/test_coding/test_algos.py
# coding=UTF-8 from __future__ import print_function, absolute_import, division import six import unittest from satella.coding import merge_dicts class TestMergeDicts(unittest.TestCase): def test_merge_dicts(self): tak = merge_dicts({'kupujemy': 'tak'}, {'kupujemy': 'nie'}) nie = merge_dicts({'kupujemy': 'nie'}, {'kupujemy': 'tak'}) self.assertEquals(tak['kupujemy'], 'nie') self.assertEquals(nie['kupujemy'], 'tak') def test_merge_lists(self): tak = merge_dicts({'kupujemy': ['tak']}, {'kupujemy': ['nie']}) self.assertEqual(set(tak), set(['tak', 'nie']))
Python
0.000002
@@ -599,16 +599,28 @@ (set(tak +%5B'kupujemy'%5D ), set(%5B
b09197a38ebbf32abe45a787c475ae6706beaa53
set ignore property
pyamg/relaxation/info.py
pyamg/relaxation/info.py
""" Relaxation methods ------------------ The multigrid cycle is formed by two complementary procedures: relaxation and coarse-grid correction. The role of relaxation is to rapidly damp oscillatory (high-frequency) errors out of the approximate solution. When the error is smooth, it can then be accurately represented on the coarser grid, where a solution, or approximate solution, can be computed. Iterative methods for linear systems that have an error smoothing property are valid relaxation methods. Since the purpose of a relaxation method is to smooth oscillatory errors, its effectiveness on non-oscillatory errors is not important. This point explains why simple iterative methods like Gauss-Seidel iteration are effective relaxation methods while being very slow to converge to the solution of Ax=b. PyAMG implements relaxation methods of the following varieties: 1. Jacobi iteration 2. Gauss-Seidel iteration 3. Successive Over-Relaxation 4. Polynomial smoothing (e.g. Chebyshev) 5. Kaczmarz iteration Refer to the docstrings of the individual methods for additional information. """ #TODO: explain separation of basic methods from interface methods. postpone_import = 1
Python
0.000002
@@ -1189,16 +1189,105 @@ methods. +%0A#TODO: explain why each class of methods exist (parallel vs. serial, SPD vs. indefinite) %0A%0Apostpo
be8625d983f147385956079c1c1b4bbc2b3ccb17
fix flake8
aioresponses/compat.py
aioresponses/compat.py
# -*- coding: utf-8 -*- import asyncio # noqa: F401 import sys from typing import Dict, Optional, Tuple, Union # noqa from urllib.parse import parse_qsl, urlencode from aiohttp import __version__ as aiohttp_version, StreamReader from multidict import MultiDict from pkg_resources import parse_version from yarl import URL if sys.version_info < (3, 7): from re import _pattern_type as Pattern else: from re import Pattern AIOHTTP_VERSION = parse_version(aiohttp_version) if AIOHTTP_VERSION >= parse_version('3.0.0'): from aiohttp.client_proto import ResponseHandler def stream_reader_factory( # noqa loop: 'Optional[asyncio.AbstractEventLoop]' = None ): protocol = ResponseHandler(loop=loop) return StreamReader(protocol, limit=2 ** 16, loop=loop) else: def stream_reader_factory(loop=None): return StreamReader() def merge_params(url: 'Union[URL, str]', params: 'Dict' = None) -> 'URL': url = URL(url) if params: query_params = MultiDict(url.query) query_params.extend(url.with_query(params).query) return url.with_query(query_params) return url def normalize_url(url: 'Union[URL, str]') -> 'URL': """Normalize url to make comparisons.""" url = URL(url) return url.with_query(urlencode(sorted(parse_qsl(url.query_string)))) try: from aiohttp import RequestInfo except ImportError: class RequestInfo(object): __slots__ = ('url', 'method', 'headers', 'real_url') def __init__(self, url: URL, method: str, headers: Dict, real_url: str): self.url = url self.method = method self.headers = headers self.real_url = real_url __all__ = [ 'URL', 'Pattern', 'RequestInfo', 'AIOHTTP_VERSION', 'merge_params', 'stream_reader_factory', 'normalize_url', ]
Python
0
@@ -1513,16 +1513,29 @@ _init__( +%0A self, ur @@ -1583,16 +1583,25 @@ url: str +%0A ):%0A
5eabe658d3c20f25fa78d1fc4fe2d2d692390e75
Make requests.get(...) a bit more robust
PowerToThePeople.py
PowerToThePeople.py
#!/usr/bin/env python import serial from requests import get from time import time, strftime, asctime from sys import stdout from subprocess import check_output try: from config import * except ImportError: from defaults import * print 'Warning! copy defaults.py to config.py and edit that file!' PVOUTPUT_INTERVAL = 300 #5 minutes between sending updates def main(): usbDevice = check_output('ls /dev/ttyACM*', shell=True).strip() ser = serial.Serial(usbDevice, 115200) ser.flushInput() ser.readline() #Skip first led flash to get a proper duration after this lastPvOutputTime = lastLedFlashTime = time() #first impression duration will be inaccurate nLedFlashes = 0 while True: s = ser.readline() #print 'Arduino: ', s, now = time() watt = 3600 / (now - lastLedFlashTime) lastLedFlashTime = now nLedFlashes += 1 print '%s : %4d Watt' % (asctime(), watt) r = get('http://localhost:8083/watt/%d Watt' % watt) #update webcache if now >= lastPvOutputTime + PVOUTPUT_INTERVAL: #XXX should post average power consumption watt_average = nLedFlashes * 3600 / (now - lastPvOutputTime) #print 'Watt Average %d' % watt_average payload = { 'key' : pvoutput_key, 'sid' : pvoutput_sid, 'd' : strftime('%Y%m%d'), 't' : strftime('%H:%M'), 'v4' : watt_average } r = get('http://pvoutput.org/service/r2/addstatus.jsp', params=payload) lastPvOutputTime = now nLedFlashes = 0 stdout.flush() if __name__ == '__main__': main()
Python
0
@@ -55,16 +55,73 @@ ort get%0A +from requests.exceptions import Timeout, ConnectionError%0A from tim @@ -940,16 +940,24 @@ , watt)%0A +%09%09try:%0A%09 %09%09r = ge @@ -970,17 +970,17 @@ p:// -localhost +127.0.0.1 :808 @@ -1001,16 +1001,29 @@ ' %25 watt +, timeout=1.0 )%09#updat @@ -1032,16 +1032,77 @@ webcache +%0A%09%09except Timeout:%0A%09%09%09print 'Warning: webcache update failed' %0A%0A%09%09if n @@ -1457,16 +1457,25 @@ e%0A%09%09%09%09%7D%0A +%09%09%09try:%0A%09 %09%09%09r = g @@ -1539,17 +1539,101 @@ =payload -) +, timeout=5.0)%0A%09%09%09except ConnectionError:%0A%09%09%09%09print 'Warning: pvoutput update failed' %0A%09%09%09last
20db5eb25162665e817bef993ea84bbd1b9e3a45
Update setup.py
setup.py
setup.py
# -*- coding: utf-8 -*- """ @author: uwe """ import sys import os from setuptools import setup setup(name='feedinlib', version='0.0.12', description='Creating time series from pv or wind power plants.', url='http://github.com/oemof/feedinlib', author='oemof developer group', author_email='birgit.schachler@rl-institut.de', license='GPL3', packages=['feedinlib'], zip_safe=False, install_requires=['numpy >= 1.7.0', 'pandas >= 0.13.1', 'pvlib[optional] >= 0.5.0', 'windpowerlib >= 0.0.6', 'scipy', 'shapely'])
Python
0.000001
@@ -1,50 +1,4 @@ -# -*- coding: utf-8 -*-%0A%22%22%22%0A@author: uwe%0A%22%22%22%0A%0A impo @@ -513,9 +513,9 @@ = 0. -5 +6 .0', @@ -558,19 +558,19 @@ ib %3E= 0. -0.6 +2.0 ',%0A
888f2ee4c423e18a40cbcaec3eb9f4f29f993e44
add mock payment as default for OrderPaymentFactory
bluebottle/test/factory_models/payments.py
bluebottle/test/factory_models/payments.py
import factory from bluebottle.payments.models import Payment, OrderPayment from bluebottle.payments_logger.models import PaymentLogEntry from .orders import OrderFactory class OrderPaymentFactory(factory.DjangoModelFactory): FACTORY_FOR = OrderPayment amount = 100 order = factory.SubFactory(OrderFactory) class PaymentFactory(factory.DjangoModelFactory): FACTORY_FOR = Payment order_payment = factory.SubFactory(OrderPaymentFactory)
Python
0
@@ -250,24 +250,52 @@ derPayment%0A%0A + payment_method = 'mock'%0A amount =
ed13a4d6ea21842568d1ef63797d50169b6dd040
Add rpath
recipes/py2app/fix_macos_rpath.py
recipes/py2app/fix_macos_rpath.py
""" Tool for initial rpath fix for prebuilt binaries """ from __future__ import absolute_import, division, print_function import os import glob from subprocess import CalledProcessError, check_output # ============================================================================= if __name__ == '__main__': main_files = glob.glob('py2app/apptemplate/prebuilt/main*') secondary_files = glob.glob('py2app/apptemplate/prebuilt/secondary*') for bin_file in main_files + secondary_files: if os.path.isfile(bin_file): print(bin_file) libraries = list() try: libraries = check_output(['otool', '-L', bin_file]).decode('utf8').split('\n') except CalledProcessError: pass for line in libraries[1:]: lib = line.strip().split() if len(lib) > 0: lib = lib[0] new_lib = None if 'libgcc_s' in lib: new_lib = os.path.join('@rpath', lib.split('/')[-1]) if new_lib is not None: print('Changing {lib} to {new_lib}'.format(lib=lib, new_lib=new_lib)) cmd = ['install_name_tool', '-change', lib, new_lib, bin_file] print(' '.join(cmd)) output = check_output(cmd)
Python
0.000002
@@ -926,16 +926,23 @@ @rpath', + 'lib', lib.spl @@ -1217,8 +1217,165 @@ ut(cmd)%0A + cmd = %5B'install_name_tool', '-add_rpath', os.getenv('PREFIX'), bin_file%5D%0A print(' '.join(cmd))%0A output = check_output(cmd)%0A
8ae98e37fbd558f87428255456cc1fa4b5e98a90
fix for #5
flaskext/wtf/__init__.py
flaskext/wtf/__init__.py
# -*- coding: utf-8 -*- """ flaskext.wtf ~~~~~~~~~~~~ Flask-WTF extension :copyright: (c) 2010 by Dan Jacob. :license: BSD, see LICENSE for more details. """ import warnings import uuid from wtforms.fields import BooleanField, DecimalField, DateField, \ DateTimeField, FieldList, FloatField, FileField, FormField, \ HiddenField, IntegerField, PasswordField, RadioField, SelectField, \ SelectMultipleField, SubmitField, TextField, TextAreaField from wtforms.validators import Email, email, EqualTo, equal_to, \ IPAddress, ip_address, Length, length, NumberRange, number_range, \ Optional, optional, Required, required, Regexp, regexp, \ URL, url, AnyOf, any_of, NoneOf, none_of from wtforms.widgets import CheckboxInput, FileInput, HiddenInput, \ ListWidget, PasswordInput, RadioInput, Select, SubmitInput, \ TableWidget, TextArea, TextInput try: import sqlalchemy _is_sqlalchemy = True except ImportError: _is_sqlalchemy = False from wtforms import Form as BaseForm from wtforms import fields, widgets, validators, ValidationError from flask import request, session, current_app from jinja2 import Markup from flaskext.wtf import recaptcha from flaskext.wtf.recaptcha.fields import RecaptchaField from flaskext.wtf.recaptcha.widgets import RecaptchaWidget from flaskext.wtf.recaptcha.validators import Recaptcha fields.RecaptchaField = RecaptchaField widgets.RecaptchaWidget = RecaptchaWidget validators.Recaptcha = Recaptcha __all__ = ['Form', 'ValidationForm', 'fields', 'validators', 'widgets'] __all__ += fields.__all__ __all__ += validators.__all__ __all__ += widgets.__all__ __all__ += recaptcha.__all__ if _is_sqlalchemy: from wtforms.ext.sqlalchemy.fields import QuerySelectField, \ QuerySelectMultipleField, ModelSelectField __all__ += ['QuerySelectField', 'QuerySelectMultipleField', 'ModelSelectField'] for field in (QuerySelectField, QuerySelectMultipleField, ModelSelectField): setattr(fields, field.__name__, field) def _generate_csrf_token(): return str(uuid.uuid4()) class Form(BaseForm): """ Subclass of WTForms **Form** class. The main difference is that **request.form** is passed as `formdata` argument to constructor so can handle request data implicitly. In addition this **Form** implementation has automatic CSRF handling. """ csrf = fields.HiddenField() def __init__(self, formdata=None, *args, **kwargs): csrf_enabled = kwargs.pop('csrf_enabled', None) if csrf_enabled is None: csrf_enabled = current_app.config.get('CSRF_ENABLED', True) self.csrf_enabled = csrf_enabled self.csrf_session_key = kwargs.pop('csrf_session_key', None) if self.csrf_session_key is None: self.csrf_session_key = \ current_app.config.get('CSRF_SESSION_KEY', '_csrf_token') csrf_token = session.get(self.csrf_session_key, None) if csrf_token is None: csrf_token = self.reset_csrf() super(Form, self).__init__(formdata, csrf=csrf_token, *args, **kwargs) def is_submitted(self): """ Checks if form has been submitted. The default case is if the HTTP method is **PUT** or **POST**. """ return request and request.method in ("PUT", "POST") def process(self, formdata=None, obj=None, **kwargs): if self.is_submitted(): if formdata is None: formdata = request.form # ensure csrf validation occurs ONLY when formdata is passed # in case "csrf" is the only field in the form if not formdata: self.csrf_is_valid = False else: self.csrf_is_valid = None if request.files: for name, field in self._fields.iteritems(): if isinstance(field, FileField) and name in request.files: field.file = request.files[name] super(Form, self).process(formdata, obj, **kwargs) @property def csrf_token(self): """ Renders CSRF field inside a hidden DIV. :deprecated: Use **hidden_tag** instead. """ warnings.warn("csrf_token is deprecated. Use hidden_tag instead", DeprecationWarning) return self.hidden_tag('csrf') def reset_csrf(self): """ Resets the CSRF token in the session. If you are reusing the form in the same view (i.e. you are not redirecting somewhere else) it's recommended you call this before rendering the form. """ csrf_token = _generate_csrf_token() session[self.csrf_session_key] = csrf_token return csrf_token def validate_csrf(self, field): if not self.csrf_enabled or request.is_xhr: return csrf_token = session.pop(self.csrf_session_key, None) is_valid = field.data and \ field.data == csrf_token and \ self.csrf_is_valid is not False # reset this field, otherwise stale token is displayed field.data = self.reset_csrf() # we set this flag to ensure consistent behaviour when # calling validate() more than once self.csrf_is_valid = bool(is_valid) if not is_valid: raise ValidationError, "Missing or invalid CSRF token" def hidden_tag(self, *fields): """ Wraps hidden fields in a hidden DIV tag, in order to keep XHTML compliance. :versionadded: 0.3 :param fields: list of hidden field names. If not provided will render all hidden fields, including the CSRF field. """ if not fields: fields = [f.name for f in self if isinstance(f, HiddenField)] rv = [u'<div style="display:none;">'] rv += [unicode(getattr(self, field)) for field in fields] rv.append(u"</div>") return Markup(u"".join(rv)) def validate_on_submit(self): """ Checks if form has been submitted and if so runs validate. This is a shortcut, equivalent to ``form.is_submitted() and form.validate()`` """ return self.is_submitted() and self.validate()
Python
0
@@ -1529,20 +1529,21 @@ lidation -F +Err or -m ',%0A