commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
17157a4eb9d4aa934da89892f4f42f851902b44c
fix missing return in celery SiteRouter :rage1:
frappe/celery_app.py
frappe/celery_app.py
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals, absolute_import from celery import Celery # initiate logger from celery.utils.log import get_task_logger task_logger = get_task_logger(__name__) from datetime import timedelta import frappe import json import os SITES_PATH = os.environ.get('SITES_PATH', '.') # defaults DEFAULT_CELERY_BROKER = "redis://localhost" DEFAULT_CELERY_BACKEND = None DEFAULT_SCHEDULER_INTERVAL = 300 LONGJOBS_PREFIX = "longjobs@" _app = None def get_celery(): global _app if not _app: conf = frappe.get_site_config(sites_path=SITES_PATH) _app = Celery('frappe', broker=conf.celery_broker or DEFAULT_CELERY_BROKER, backend=conf.celery_result_backend or DEFAULT_CELERY_BACKEND) setup_celery(_app, conf) return _app def setup_celery(app, conf): app.autodiscover_tasks(frappe.get_all_apps(with_frappe=True, with_internal_apps=False, sites_path=SITES_PATH)) app.conf.CELERY_TASK_SERIALIZER = 'json' app.conf.CELERY_ACCEPT_CONTENT = ['json'] app.conf.CELERY_TIMEZONE = 'UTC' if conf.celery_queue_per_site: app.conf.CELERY_ROUTES = (SiteRouter(),) app.conf.CELERYBEAT_SCHEDULE = get_beat_schedule(conf) class SiteRouter(object): def route_for_task(self, task, args=None, kwargs=None): if hasattr(frappe.local, 'site'): if kwargs and kwargs.get("event", "").endswith("_long"): get_queue(frappe.local.site, LONGJOBS_PREFIX) else: get_queue(frappe.local.site) return None def get_queue(site, prefix=None): return {'queue': "{}{}".format(prefix or "", site)} def get_beat_schedule(conf): schedule = { 'scheduler': { 'task': 'frappe.tasks.enqueue_scheduler_events', 'schedule': timedelta(seconds=conf.scheduler_interval or DEFAULT_SCHEDULER_INTERVAL) }, } if conf.celery_queue_per_site: schedule['sync_queues'] = { 'task': 'frappe.tasks.sync_queues', 'schedule': timedelta(seconds=conf.scheduler_interval or DEFAULT_SCHEDULER_INTERVAL) } return schedule def celery_task(*args, **kwargs): return get_celery().task(*args, **kwargs) if __name__ == '__main__': get_celery().start()
Python
0
@@ -1437,24 +1437,31 @@ long%22):%0A%09%09%09%09 +return get_queue(fr @@ -1507,16 +1507,23 @@ se:%0A%09%09%09%09 +return get_queu
619b51a579801b8ae211cb062b00149c19af11a7
Make manage task more general
admin/tasks.py
admin/tasks.py
# -*- coding: utf-8 -*- from __future__ import absolute_import import os from invoke import task, run from tasks.utils import pip_install from website import settings HERE = os.path.dirname(os.path.abspath(__file__)) WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE') @task() def manage(cmd_str, target=''): """Take command string and target (-t) for manage commands :param args: ex. runserver, migrate """ manage_cmd = os.path.join(HERE, '..', 'manage.py') env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"' cmd = '{} python {} {} {}'.format(env, manage_cmd, cmd_str, target) run(cmd, echo=True, pty=True) @task() def assets(dev=False, watch=False): """Install and build static assets for admin.""" if os.getcwd() != HERE: os.chdir(HERE) npm = 'npm install' if not dev: npm += ' --production' run(npm, echo=True) bower_install() # Always set clean=False to prevent possible mistakes # on prod webpack(clean=False, watch=watch, dev=dev) @task(aliases=['pack']) def webpack(clean=False, watch=False, dev=False): """Build static assets with webpack.""" if clean: clean_assets() if os.getcwd() != HERE: os.chdir(HERE) webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js') args = [webpack_bin] if settings.DEBUG_MODE and dev: args += ['--colors'] else: args += ['--progress'] if watch: args += ['--watch'] config_file = 'webpack.admin.config.js' if dev else 'webpack.prod.config.js' args += ['--config {0}'.format(config_file)] command = ' '.join(args) run(command, echo=True) @task def clean_assets(): """Remove built JS files.""" public_path = os.path.join(HERE, 'static', 'public') js_path = os.path.join(public_path, 'js') run('rm -rf {0}'.format(js_path), echo=True) @task(aliases=['bower']) def bower_install(): if os.getcwd() != HERE: os.chdir(HERE) bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower') run('{} prune'.format(bower_bin), echo=True) run('{} install'.format(bower_bin), echo=True) @task(aliases=['req']) def requirements(): req_file = os.path.join(HERE, '..', 'requirements', 'admin.txt') run(pip_install(req_file), echo=True)
Python
0.000954
@@ -292,19 +292,8 @@ _str -, target='' ):%0A @@ -538,19 +538,16 @@ ython %7B%7D - %7B%7D %7B%7D'.for @@ -578,16 +578,8 @@ _str -, target )%0A
453b53cf3a72e1e982a0b4bf7d21fd12fc143a93
Refactor binary search sols to better search
alg_peak_1d.py
alg_peak_1d.py
""" Find a peak position in 1D array. Support nums is an array of length n. In general, nums[k] is a peak iff nums[k] > nums[k - 1] and nums[k] > nums[k + 1]. If nums[0] > nums[1], then nums[0] is a peak. If nums[n - 2] < nums[n - 1], then nums[n - 1] is a peak. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function def peak_1d_brute_force(nums): """Find peak by naive iteration. Time complexity: O(n). Space complexity: O(1). """ # Iterate to check element is greater than its neighbors. for i in range(len(nums)): if ((i == 0 or nums[i] >= nums[i - 1]) and (i == len(nums) - 1 or nums[i] >= nums[i + 1])): return i def _binary_search_recur(nums, left, right): """Helper function for peak_1d_binary_search_recur().""" if right - left == 0: return left else: mid = left + (right - left) // 2 if nums[mid] < nums[mid + 1]: # If mid < mid's right, search right part. return _binary_search_recur(nums, mid + 1, right) else: # Otherwise, search left part. return _binary_search_recur(nums, left, mid) def peak_1d_binary_search_recur(nums): """Find peak by recursive binary search algorithm. Time complexity: O(logn). Space complexity: O(1). """ return _binary_search_recur(nums, 0, len(nums) - 1) def peak_1d_binary_search_iter(nums): """Find peak by iterative binary search algorithm. Time complexity: O(logn). Space complexity: O(1). """ left, right = 0, len(nums) - 1 while left < right: mid = left + (right - left) // 2 if nums[mid] < nums[mid + 1]: # If mid < mid's right, search right part. left = mid + 1 else: # Otherwise, search left part. right = mid # For left = right. return left def main(): import time import numpy as np # numsay of length 5 with peak at 3. nums = [0, 1, 2, 4, 3] print('nums', nums) start_time = time.time() print('By brute force: {}'.format(peak_1d_brute_force(nums))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By recur binary search: {}' .format(peak_1d_binary_search_recur(nums))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By iter binary search: {}' .format(peak_1d_binary_search_iter(nums))) print('Time: {}'.format(time.time() - start_time)) np.random.seed(71) nums = np.random.permutation(10) print('nums', nums) start_time = time.time() print('By brute force: {}'.format(peak_1d_brute_force(nums))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By recur binary search: {}' .format(peak_1d_binary_search_recur(nums))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By iter binary search: {}' .format(peak_1d_binary_search_iter(nums))) print('Time: {}'.format(time.time() - start_time)) if __name__ == '__main__': main()
Python
0.000008
@@ -853,16 +853,8 @@ if - right - lef @@ -858,17 +858,21 @@ left == -0 +right :%0A @@ -885,34 +885,29 @@ rn left%0A -else:%0A +%0A mid = le @@ -928,36 +928,32 @@ left) // 2%0A%0A - - if nums%5Bmid%5D %3C n @@ -966,36 +966,32 @@ d + 1%5D:%0A - # If mid %3C mid's @@ -977,36 +977,34 @@ # If mid %3C -mid' +it s right, search @@ -1007,36 +1007,32 @@ rch right part.%0A - return _ @@ -1081,41 +1081,67 @@ - else:%0A # Otherwise +elif nums%5Bmid%5D %3C nums%5Bmid - 1%5D:%0A # If mid %3C its left , se @@ -1148,36 +1148,32 @@ arch left part.%0A - return _ @@ -1207,17 +1207,78 @@ eft, mid -) + - 1)%0A else:%0A # Else, found peak.%0A return mid %0A%0A%0Adef p @@ -1818,12 +1818,10 @@ d %3C -mid' +it s ri @@ -1885,74 +1885,181 @@ el -se:%0A # Otherwise, search left part.%0A right = +if nums%5Bmid%5D %3C nums%5Bmid - 1%5D:%0A # If mid %3C its left, search left part.%0A right = mid - 1%0A else:%0A # Else, found peak.%0A return mid @@ -2164,18 +2164,16 @@ # nums -ay of leng @@ -2187,16 +2187,18 @@ th peak +4 at 3.%0A
50a2ac8e5aeee2c4435707a72c22092e28ecf5ba
Update zeromq_server.py
examples/zeromq_server.py
examples/zeromq_server.py
import zmq from jsonrpcserver import Success, method, dispatch socket = zmq.Context().socket(zmq.REP) @method def ping(): return Success("pong") if __name__ == "__main__": socket.bind("tcp://*:5000") while True: request = socket.recv().decode() socket.send_string(dispatch(request))
Python
0.000001
@@ -1,15 +1,4 @@ -import zmq%0A from @@ -44,16 +44,27 @@ dispatch +%0Aimport zmq %0A%0Asocket
80a5609b35baecfe8d208b9abec36b25c10bb609
Revert "minor date fix"
generic/reporting/reports.py
generic/reporting/reports.py
import datetime, time from generic.reporting.forms import DateRangeForm class Column(object): report = None order = 0 title = None chart_title = None chart_subtitle = None chart_yaxis = None def get_title(self): return self.title def set_report(self, report): self.report = report def add_to_report(self, report, key, dictionary): pass def get_chart(self): return None def get_redirect(self): None def get_view_function(self): return None def get_order(self): return self.order def __init__(self, order=0, title=None, chart_title=None, chart_subtitle=None, chart_yaxis=None): self.order = order self.title = title self.chart_title = chart_title self.chart_subtitle = chart_subtitle self.chart_yaxis = chart_yaxis class BasicDateGetter(object): def get_dates(self, request): to_ret = {} if self.request.POST: form = DateRangeForm(self.request.POST) if form.is_valid(): to_ret['start'] = form.cleaned_data['start'] to_ret['end'] = form.cleaned_data['end'] return to_ret def set_default_dates(self, request, context): dates = self.get_dates(request) max_date = dates.setdefault('max', datetime.datetime.now()) min_date = dates.setdefault('min', max_date - datetime.timedelta(days=365)) min_date = datetime.datetime(min_date.year, min_date.month, 1) max_date = datetime.datetime(max_date.year, max_date.month, 1) + datetime.timedelta(days=30) start_date = dates.setdefault('start', min_date) start_date = datetime.datetime(start_date.year, start_date.month, start_date.day) end_date = dates.setdefault('end', max_date) end_date = datetime.datetime(end_date.year, end_date.month, end_date.day) max_ts = time.mktime(max_date.timetuple()) min_ts = time.mktime(min_date.timetuple()) start_ts = time.mktime(start_date.timetuple()) end_ts = time.mktime(end_date.timetuple()) context.update({ 'max_ts':max_ts, \ 'min_ts':min_ts, \ 'selected_ts':[(start_ts, 'start',), (end_ts, 'end',)], 'start_ts':start_ts, 'end_ts':end_ts, 'start_date':start_date, 'end_date':end_date, 'ts_range':range(long(min_ts), long(max_ts) + 1, 86400), \ }) def add_dates_to_context(self, request, context): self.set_default_dates(request, context)
Python
0
@@ -1593,14 +1593,18 @@ onth + + 1 , 1) -+ +- dat @@ -1628,10 +1628,9 @@ ays= -30 +1 )%0A
709e9dddf94e8afa43d908e92f7c7d6bc0817e46
remove commented line
humid_temp.py
humid_temp.py
# import Adafruit_Python_DHT import weather_utils import Adafruit_DHT def get_data(): humidity, temp_c = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, 4) temp_f = weather_utils.c_to_f(temp_c) return { 'temp_f': temp_f, 'temp_c': temp_c, 'humidity': humidity, 'dewptf': weather_utils.dewpoint_f(temp_f, humidity) }
Python
0
@@ -1,33 +1,4 @@ -# import Adafruit_Python_DHT%0A impo
b359d2418210499573705015f057a79d71899da7
Fix for empty raw-data option (#109)
alertaclient/commands/cmd_send.py
alertaclient/commands/cmd_send.py
import sys import click @click.command('send', short_help='Send an alert') @click.option('--resource', '-r', metavar='RESOURCE', required=True, help='Resource under alarm') @click.option('--event', '-e', metavar='EVENT', required=True, help='Event name') @click.option('--environment', '-E', metavar='ENVIRONMENT', help='Environment eg. Production, Development') @click.option('--severity', '-s', metavar='SEVERITY', help='Severity eg. critical, major, minor, warning') @click.option('--correlate', '-C', metavar='EVENT', multiple=True, help='List of related events eg. node_up, node_down') @click.option('--service', '-S', metavar='SERVICE', multiple=True, help='List of affected services eg. app name, Web, Network, Storage, Database, Security') @click.option('--group', '-g', metavar='GROUP', help='Group event by type eg. OS, Performance') @click.option('--value', '-v', metavar='VALUE', help='Event value') @click.option('--text', '-t', metavar='DESCRIPTION', help='Description of alert') @click.option('--tag', '-T', 'tags', multiple=True, metavar='TAG', help='List of tags eg. London, os:linux, AWS/EC2') @click.option('--attributes', '-A', multiple=True, metavar='KEY=VALUE', help='List of attributes eg. priority=high') @click.option('--origin', '-O', metavar='ORIGIN', help='Origin of alert in form app/host') @click.option('--type', metavar='EVENT_TYPE', help='Event type eg. exceptionAlert, performanceAlert, nagiosAlert') @click.option('--timeout', metavar='EXPIRES', type=int, help='Seconds before an open alert will be expired') @click.option('--raw-data', metavar='STRING', help='Raw data of orignal alert eg. SNMP trap PDU. \'@\' to read from file, \'-\' to read from stdin') @click.option('--customer', metavar='STRING', help='Customer (Admin only)') @click.pass_obj def cli(obj, resource, event, environment, severity, correlate, service, group, value, text, tags, attributes, origin, type, timeout, raw_data, customer): """Send an alert.""" client = obj['client'] # read raw data from file or stdin if raw_data.startswith('@'): raw_data_file = raw_data.lstrip('@') with open(raw_data_file, 'r') as f: raw_data = f.read() elif raw_data == '-': raw_data = sys.stdin.read() try: id, alert, message = client.send_alert( resource=resource, event=event, environment=environment, severity=severity, correlate=correlate, service=service, group=group, value=value, text=text, tags=tags, attributes=dict(a.split('=') for a in attributes), origin=origin, type=type, timeout=timeout, raw_data=raw_data, customer=customer ) except Exception as e: click.echo('ERROR: {}'.format(e)) sys.exit(1) if alert: if alert.repeat: message = '{} duplicates'.format(alert.duplicate_count) else: message = '{} -> {}'.format(alert.previous_severity, alert.severity) click.echo('{} ({})'.format(id, message))
Python
0.000001
@@ -2036,16 +2036,29 @@ %0A if +raw_data and raw_data
c06e904588503768331a580d8766bf4a47f83574
add option "-k" to limit tests in test-install command (#2635)
allennlp/commands/test_install.py
allennlp/commands/test_install.py
""" The ``test-install`` subcommand verifies an installation by running the unit tests. .. code-block:: bash $ allennlp test-install --help usage: allennlp test-install [-h] [--run-all] [--include-package INCLUDE_PACKAGE] Test that installation works by running the unit tests. optional arguments: -h, --help show this help message and exit --run-all By default, we skip tests that are slow or download large files. This flag will run all tests. --include-package INCLUDE_PACKAGE additional packages to include """ import argparse import logging import os import pathlib import pytest import allennlp from allennlp.commands.subcommand import Subcommand logger = logging.getLogger(__name__) # pylint: disable=invalid-name class TestInstall(Subcommand): def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Test that installation works by running the unit tests.''' subparser = parser.add_parser( name, description=description, help='Run the unit tests.') subparser.add_argument('--run-all', action="store_true", help="By default, we skip tests that are slow " "or download large files. This flag will run all tests.") subparser.set_defaults(func=_run_test) return subparser def _get_module_root(): return pathlib.Path(allennlp.__file__).parent def _run_test(args: argparse.Namespace): initial_working_dir = os.getcwd() module_parent = _get_module_root().parent logger.info("Changing directory to %s", module_parent) os.chdir(module_parent) test_dir = os.path.join(module_parent, "allennlp") logger.info("Running tests at %s", test_dir) if args.run_all: # TODO(nfliu): remove this when notebooks have been rewritten as markdown. exit_code = pytest.main([test_dir, '--color=no', '-k', 'not notebooks_test']) else: exit_code = pytest.main([test_dir, '--color=no', '-k', 'not sniff_test and not notebooks_test', '-m', 'not java']) # Change back to original working directory after running tests os.chdir(initial_working_dir) exit(exit_code)
Python
0
@@ -1478,16 +1478,158 @@ tests.%22) +%0A subparser.add_argument('-k', type=str, default=None,%0A help=%22Limit tests by setting pytest -k argument%22) %0A%0A @@ -2093,169 +2093,290 @@ ir)%0A - if args.run_all:%0A # TODO(nfliu): remove this when notebooks have been rewritten as markdown.%0A exit_code = pytest.main(%5Btest_dir, '--color=no', +%0A if args.k:%0A pytest_k = %5B'-k', args.k%5D%0A pytest_m = %5B'-m', 'not java'%5D%0A if args.run_all:%0A logger.warning(%22the argument '-k' overwrites '--run-all'.%22)%0A elif args.run_all:%0A pytest_k = %5B%5D%0A pytest_m = %5B%5D%0A else:%0A pytest_k = %5B '-k' @@ -2386,40 +2386,60 @@ not -notebooks +sniff _test'%5D -) %0A + -else:%0A + pytest_m = %5B'-m', 'not java'%5D%0A%0A @@ -2489,108 +2489,33 @@ =no' -, '-k', 'not sniff_test and not notebooks_test',%0A '-m', 'not java'%5D) +%5D + pytest_k + pytest_m)%0A %0A
c7c39f9c32ca6f52ba60335ebbaa2b960e74fa8f
handle conversion better
twtoolbox/streaming.py
twtoolbox/streaming.py
# Twitter Toolbox for Python # Copyright 2016 Hugo Hromic # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Twitter Public Streaming API module.""" import logging import json from tweepy import StreamListener, Stream from .helpers import init_logger, read_config, get_oauth_api from .helpers import ensure_at_least_one # module logging LOGGER = logging.getLogger(__name__) init_logger(LOGGER) class PassThroughStreamListener(StreamListener): """Stream Listener that passes incoming messages directly to a writer.""" def __init__(self, writer, limit=0, **kwargs): super(self.__class__, self).__init__(**kwargs) self.writer = writer self.limit = limit self.num_written = 0 def on_status(self, status): """Write an incoming Tweet to the writer.""" self.writer.write("%s\n" % json.dumps(status._json, separators=(",", ":"))) # pylint: disable=protected-access self.num_written += 1 if self.num_written == self.limit: return False return True def on_error(self, status_code): """Handle stream errors.""" if status_code == 420: LOGGER.error("too many connection attempts, stopping stream") return False def _get_stream(writer, config, limit=0): api = get_oauth_api(config) listener = PassThroughStreamListener(writer, limit=limit) return Stream(auth=api.auth, listener=listener) def get_sample(writer): """Get hydrated Tweet-objects from the sample Streaming API endpoint.""" LOGGER.info("get_sample() starting") # initialize a Streaming API object and run the endpoint config = read_config() limit = config.getint("sample", "limit") stream = _get_stream(writer, config, limit=limit) stream.sample() # finished LOGGER.info("get_sample() finished") def get_filter(writer, follow=None, track=None, locations=None): """Get hydrated Tweet-objects from the filter Streaming API endpoint.""" LOGGER.info("get_filter() starting") ensure_at_least_one(follow=follow, track=track, locations=locations) # initialize a Streaming API object and run the endpoint config = read_config() limit = config.getint("filter", "limit") stream = _get_stream(writer, config, limit=limit) stream.filter(follow=[str(f) for f in follow], track=track, locations=locations) # finished LOGGER.info("get_filter() finished") def get_firehose(writer): """Get hydrated Tweet-objects from the firehose Streaming API endpoint.""" LOGGER.info("get_firehose() starting") # initialize a Streaming API object and run the endpoint config = read_config() limit = config.getint("firehose", "limit") stream = _get_stream(writer, config, limit=limit) stream.firehose() # finished LOGGER.info("get_firehose() finished")
Python
0
@@ -2582,32 +2582,90 @@ tions=locations) +%0A follow = %5Bstr(f) for f in follow%5D if follow else None %0A%0A # initiali @@ -2866,32 +2866,14 @@ low= -%5Bstr(f) for f in follow -%5D , tr
71769c444ef911cc0303762e23e385bea865b4ef
Add docs for server.py
typedjsonrpc/server.py
typedjsonrpc/server.py
"""Contains the Werkzeug server for debugging and WSGI compatibility.""" from __future__ import absolute_import, print_function import json from werkzeug.debug import DebuggedApplication from werkzeug.debug.tbtools import get_current_traceback from werkzeug.exceptions import abort from werkzeug.serving import run_simple from werkzeug.routing import Map, Rule from werkzeug.wrappers import Request, Response __all__ = ["Server", "DebuggedJsonRpcApplication"] DEFAULT_API_ENDPOINT_NAME = "/api" class Server(object): """A basic WSGI-compatible server for typedjsonrpc endpoints.""" def __init__(self, registry, endpoint=DEFAULT_API_ENDPOINT_NAME): """ :param registry: The jsonrpc registry to use :type registry: typedjsonrpc.registry.Registry :param endpoint: (optional) The endpoint to publish jsonrpc endpoints. Default "/api". :type endpoint: str """ self._registry = registry self.endpoint = endpoint self._url_map = Map([Rule(endpoint, endpoint=self.endpoint)]) def _dispatch_request(self, request): adapter = self._url_map.bind_to_environ(request.environ) endpoint, _ = adapter.match() if endpoint == self.endpoint: json_output = self._registry.dispatch(request) return Response(json_output, mimetype="application/json") else: abort(500) def wsgi_app(self, environ, start_response): """A basic WSGI app""" request = Request(environ) response = self._dispatch_request(request) return response(environ, start_response) def __call__(self, environ, start_response): return self.wsgi_app(environ, start_response) def run(self, host, port, **options): """For debugging purposes, you can run this as a standalone server""" debugged = DebuggedJsonRpcApplication(self, evalex=True) run_simple(host, port, debugged, use_reloader=True, **options) @staticmethod def handle_json_error(environ, start_response, traceback): """Handles a json error specially by returning the id which links to the failure""" response = Response(json.dumps({"traceback_id": traceback.id}), mimetype="application/json") return response(environ, start_response) class DebuggedJsonRpcApplication(DebuggedApplication): """A jsonrpc-specific debugged application. This differs from DebuggedApplication since the normal debugger assumes you are hitting the endpoint from a web browser. A returned response will be JSON of the form: {"traceback_id": <id>} which you can use to hit the endpoint http://<host>:<port>/debug/<traceback_id>. NOTE: This should never be used in production because the user gets shell access in debug mode. """ def __init__(self, app, **kwargs): """ :param app: The wsgi application to be debugged :type app: object :param **kwargs:The arguments to pass to the DebuggedApplication """ super(DebuggedJsonRpcApplication, self).__init__(app, **kwargs) self._debug_map = Map([Rule("/debug/<int:traceback_id>", endpoint="debug")]) def debug_application(self, environ, start_response): """Run the application and preserve the traceback frames. :type environ: dict[str, object] :type start_response: (str, list[(str, str)]) -> None :rtype: generator[str] """ app_iter = None adapter = self._debug_map.bind_to_environ(environ) if adapter.test(): _, args = adapter.match() yield self.handle_debug(environ, start_response, args["traceback_id"]) else: try: app_iter = self.app(environ, start_response) for item in app_iter: yield item if hasattr(app_iter, 'close'): app_iter.close() except Exception: # pylint: disable=broad-except if hasattr(app_iter, 'close'): app_iter.close() traceback = get_current_traceback(skip=1, show_hidden_frames=self.show_hidden_frames, ignore_system_exceptions=True) for frame in traceback.frames: self.frames[frame.id] = frame self.tracebacks[traceback.id] = traceback error_iter = self.app.handle_json_error(environ, start_response, traceback) for item in error_iter: yield item traceback.log(environ['wsgi.errors']) def handle_debug(self, environ, start_response, traceback_id): """Handles the debug endpoint for inspecting previous errors. :type environ: dict[str, object] :type start_response: (str, list[(str, str)]) -> NoneType :param traceback_id: The id of the traceback to inspect :type traceback_id: int """ if traceback_id not in self.tracebacks: abort(404) traceback = self.tracebacks[traceback_id] try: start_response('500 INTERNAL SERVER ERROR', [ ('Content-Type', 'text/html; charset=utf-8'), # Disable Chrome's XSS protection, the debug # output can cause false-positives. ('X-XSS-Protection', '0'), ]) except Exception: # pylint: disable=broad-except # if we end up here there has been output but an error # occurred. in that situation we can do nothing fancy any # more, better log something into the error log and fall # back gracefully. environ['wsgi.errors'].write( 'Debugging middleware caught exception in streamed ' 'response at a point where response headers were already ' 'sent.\n') else: rendered = traceback.render_full(evalex=self.evalex, secret=self.secret) return rendered.encode('utf-8', 'replace')
Python
0
@@ -3322,39 +3322,204 @@ : -type environ: dict%5Bstr, object%5D +param environ: The environment which is passed into the wsgi application%0A :type environ: dict%5Bstr, object%5D%0A :param start_response: The start_response function of the wsgi application %0A @@ -4999,39 +4999,204 @@ : -type environ: dict%5Bstr, object%5D +param environ: The environment which is passed into the wsgi application%0A :type environ: dict%5Bstr, object%5D%0A :param start_response: The start_response function of the wsgi application %0A
411b95793ff5e73e14949aabc3466a9f39336267
Figure out what's in that resp
micro.py
micro.py
#!/usr/bin/env python from __future__ import division from flask import Flask, render_template, url_for, session, request, g, \ redirect, flash from flaskext.oauth import OAuth import os #setup code app = Flask(__name__) app.secret_key = 'pagerduty' oauth = OAuth() facebook = oauth.remote_app('facebook', base_url='https://graph.facebook.com/', request_token_url=None, access_token_url='/oauth/access_token', authorize_url='https://www.facebook.com/dialog/oauth', consumer_key='187344811378870', consumer_secret='7f75c822a976c064f479e4fe93c68d9d', request_token_params={'scope': 'read_stream'} ) #Ok, let's see... We need to integrate some Graph API in here. I may #not get around to Twitter by Tuesday. I need to look into a pythonic #replacement for PHP's strtotime(), then use that to request the #correct wall posts/news feed for the date entered. ##Conventions I will be using: #g.user is the oauth access token #access_token is the key (for dicts) that I will be using to store the #access token under. ####Load from cookie @app.before_request def before_request(): g.user = None if 'access_token' in session: g.user = session['access_token'] ####Views @app.route('/') def index(): name = None if g.user is not None: resp = facebook.get('/me') if resp.status == 200: name = resp.data['name'] else: flash('Unable to get news feed data.') return render_template('index.html', name=name) @app.route('/login') def login(): if g.user is None: return facebook.authorize(callback=url_for('authorized', next=request.args.get('next') or request.referrer or None, _external=True)) else: return redirect(url_for('index')) @app.route('/logout') def logout(): session.pop('access_token', None) g.user = None return redirect(url_for('index')) @app.route('/authorized') @facebook.authorized_handler def authorized(resp): next_url = request.args.get('next') or url_for('index') if resp is None: flash('You need to allow us to pull your data!') return redirect(next_url) g.user = resp['access_token'] session['access_token'] = g.user return redirect(next_url) @app.route('/<int:month>/<int:day>/<int:year>') def display(month, day, year): return "Stella is estudpido!" @app.errorhandler(404) def not_found(error): return render_template('404.html'), 404 ####Non-view handlers @facebook.tokengetter def get_fb_token(): return g.user, '' if __name__=='__main__': #Set up for Heroku port = int(os.environ.get('PORT', 5000)) app.run(host='0.0.0.0', port=port, debug=True)
Python
0.99993
@@ -1956,16 +1956,83 @@ 'index') +%0A%0A%09#Figure out what resp is (i.e. print str(resp))%0A%09print str(resp) %0A%09%0A%09if r
5a0c6904c23a84d1ee931e3ef82f297197141222
add twitter and flags
light.py
light.py
#!/usr/bin/python """ light.py Read analog values from the photoresistor ======= run with: sudo ./light.py Copyright 2014 David P. Bradway (dpb6@duke.edu) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __author__ = "David Bradway" __email__ = "dpb6@duke.edu" __license__ = "Apache v2.0" def main(): import Adafruit_BBIO.ADC as ADC import time import datetime import numpy as np import matplotlib.pyplot as plt sensor_pin = 'P9_40' ADC.setup() #plt.axis([0, 1000, 0, 1]) plt.ion() plt.show() print('Time\tReading\t\tVolts') i=0 while True: now = datetime.datetime.now() reading = ADC.read(sensor_pin) volts = reading * 1.800 print('%s\t%f\t%f' % (now.second, reading, volts)) plt.scatter(i, volts) plt.draw() time.sleep(0.1) i=i+1 if __name__ == "__main__": main()
Python
0
@@ -911,16 +911,158 @@ as plt%0A + import collections%0A import twitter%0A %0A # Set Flags%0A plotit = 0%0A debug = 0%0A printit = 1%0A tweetit = 1%0A %0A # Set ADC %0A sen @@ -1079,21 +1079,16 @@ 'P9_40'%0A - %0A ADC. @@ -1092,24 +1092,172 @@ DC.setup()%0A%0A + # Set data collection buffer%0A buflength = 15%0A circbuf=collections.deque(maxlen=buflength)%0A %0A # Plot the raw data%0A if plotit:%0A #plt.axi @@ -1272,15 +1272,23 @@ 00, -0 +-.1 , 1 +.7 %5D)%0A + @@ -1297,24 +1297,28 @@ t.ion()%0A + + plt.show()%0A%0A @@ -1320,242 +1320,1541 @@ w()%0A + %0A -print('Time%5CtReading%5Ct%5CtVolts')%0A%0A i=0 %0A while True:%0A now = datetime.datetime.now()%0A reading = ADC.read(sensor_pin)%0A volts = reading * 1.800%0A print('%25s%5Ct%25f%5Ct%25f' %25 (now.second, reading, volts))%0A +# Print table of data%0A if debug:%0A print('Time%5CtVolts%5Ct%5CtMedian%5Ct%5CtVariance%5Ct%5CtStable%5Ct%5Ct%5Ctnewstateflag')%0A %0A if tweetit:%0A api = twitter.Api(consumer_key='',%0A consumer_secret='',%0A access_token_key='',%0A access_token_secret='')%0A %0A if debug:%0A print api.VerifyCredentials()%0A %0A i = 0%0A med = 0%0A stable = 0%0A variance = 0%0A newstateflag = False%0A%0A while True:%0A now = datetime.datetime.now()%0A reading = ADC.read(sensor_pin)%0A volts = reading * 1.800%0A circbuf.append(volts)%0A med = np.median(circbuf)%0A variance = np.var(circbuf)%0A %0A if variance %3C 0.001:%0A stable = med%0A newstateflag = False%0A %0A if variance %3E 0.01 and newstateflag == False:%0A if med %3E stable:%0A update = 'Lights on %25s' %25 (str(now))%0A if printit:%0A print(update)%0A if tweetit:%0A status = api.PostUpdate(update)%0A %0A newstateflag = True%0A elif med %3C stable:%0A update = 'Lights off %25s' %25 (str(now))%0A if printit:%0A print(update)%0A if tweetit:%0A status = api.PostUpdate(update)%0A %0A newstateflag = True%0A %0A if debug:%0A print('%25s%5Ct%25f%5Ct%25f%5Ct%25f%5Ct%25f%5Ct%25f' %25 (now.second, volts, med, variance, stable, newstateflag))%0A %0A if plotit:%0A @@ -2872,23 +2872,25 @@ tter(i, -volts)%0A +med)%0A @@ -2896,24 +2896,33 @@ plt.draw()%0A + %0A time @@ -2932,11 +2932,11 @@ eep( -0.1 +.25 )%0A
e6b3a02376f832ee1bf79905ececfc76933197fc
Fix client
client/client.py
client/client.py
import socket import os import json import time class Client(): def __init__(self): abs_prefix = os.path.join(os.path.dirname(__file__), "../data") with open(abs_prefix + '/config.json','r') as f: conf = json.load(f) self.port = conf["port"] self.host = conf["host"] self.password = conf["password"] server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock = socket.socket() self.sock.connect((self.host, self.port)) self.sock.send(self.password) assert self.sock.recv(1024) == "welcome" def Send(self, msg): self.sock.send(msg) if msg == "quit": self.sock.Close() return False data = self.sock.recv(1024) print data return True def Prompt(self): return self.Send(raw_input("boom> ")) def Close(self): self.sock.close() if __name__ == "__main__": c = Client() print "You are now connected to a TiaraBoom server." print "This server enforces NO RATE LIMITS!!!" print "Please respect that Tiara does not wish to respond to more than a few requests per hour!" print "If you receive an error other than a syntax error, or the server does not respond, please DO NOT TRY AGAIN" print "Instead, contact support at 513-284-5321" print "Enter a command, or try \"help\"" print "to exit, type quit" while True: if not c.Prompt(): break print "good bye"
Python
0.000001
@@ -147,20 +147,22 @@ _), %22../ -data +client %22)%0A @@ -315,24 +315,64 @@ onf%5B%22host%22%5D%0A + print (self.host,self.port)%0A @@ -408,79 +408,8 @@ d%22%5D%0A - server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A @@ -657,16 +657,16 @@ %22quit%22:%0A + @@ -674,21 +674,16 @@ self. -sock. Close()%0A
d4d4a065323d61073eb80a4f7f04d2902a33e4fe
fix ntb publish service get_filename for events
server/ntb/publish/ntb_publish_service.py
server/ntb/publish/ntb_publish_service.py
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2016 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from superdesk.publish.publish_service import PublishService, set_publish_service from xml.etree import ElementTree as ET import logging logger = logging.getLogger(__name__) class NTBPublishService(PublishService): DEFAULT_EXT = "xml" @classmethod def get_filename(cls, item): # we reparse formatted item to get filename from <meta name="filename"> element # this way we are sure that we have the exact same filename try: xml = ET.fromstring(item['formatted_item']) except (KeyError, ET.ParseError) as e: filename = None logger.error("Error on parsing, can't get filename: {}".format(e)) else: filename = xml.find('head/meta[@name="filename"]').attrib['content'] if not filename: return super(NTBPublishService, cls).get_filename(item) return filename set_publish_service(NTBPublishService)
Python
0
@@ -986,16 +986,37 @@ else:%0A + try:%0A @@ -1088,16 +1088,83 @@ ntent'%5D%0A + except AttributeError:%0A filename = None%0A
936e6427c48ebbf16a12fb3afa0f48ad6c397046
fix feature input to hb-view
Lib/diffenator/utils.py
Lib/diffenator/utils.py
import subprocess from PIL import Image from fontTools.varLib.mutator import instantiateVariableFont try: from StringIO import StringIO except ImportError: # py3 workaround from io import BytesIO as StringIO def render_string(font, string, features=None, pt_size=128): """Use Harfbuzz to render a string""" cmd = ['hb-view', '--font-size=%d' % pt_size] if font.instance_coordinates: location = '' for axis, val in font.instance_coordinates.items(): location += '{}={}, '.format(axis, val) cmd += ['--variations=%s' % location] if features: cmd += ['--features=%s' % features] cmd += [font.path, u'{}'.format(string)] try: img = StringIO(subprocess.check_output(cmd)) return Image.open(img) except FileNotFoundError: raise OSError( "hb-view was not found. Check if Harbuzz is installed." )
Python
0
@@ -609,42 +609,295 @@ -cmd += %5B'--features=%25s' %25 features +# ignore aalt tag. This feat is used so users can access glyphs%0A # via a glyph pallette.%0A # https://typedrawers.com/discussion/1319/opentype-aalt-feature%0A # glyphsapp will autogen this feature%0A cmd += %5B'--features=%25s' %25 ','.join(features).replace(%22aalt,%22, %22%22) %5D%0A
1d51a59a6e1406852a081585e252afab892e9756
Handle cases where the states don't exist when getting dialogue config's states
go/apps/dialogue/vumi_app.py
go/apps/dialogue/vumi_app.py
# -*- test-case-name: go.apps.dialogue.tests.test_vumi_app -*- import pkg_resources import json from vumi.application.sandbox import SandboxResource from go.apps.jsbox.vumi_app import JsBoxConfig, JsBoxApplication def determine_endpoints(poll): names = set( s['channel_type'] for s in poll['states'] if s['type'] == 'send') types = poll.get('channel_types', []) return [t['label'] for t in types if t['name'] in names] def dialogue_js_config(conv): poll = conv.config.get("poll", {}) config = { "name": "poll-%s" % conv.key, "endpoints": determine_endpoints(poll) } poll_metadata = poll.get('poll_metadata', {}) delivery_class = poll_metadata.get('delivery_class') if delivery_class is not None: config['delivery_class'] = delivery_class return config class PollConfigResource(SandboxResource): """Resource that provides access to dialogue conversation config.""" def _get_config(self, conversation): """Returns a virtual sandbox config for the given dialogue. :returns: JSON string containg the configuration dictionary. """ return json.dumps(dialogue_js_config(conversation)) def _get_poll(self, conversation): """Returns the poll definition from the given dialogue. :returns: JSON string containing the poll definition. """ poll = conversation.config.get("poll") return poll def handle_get(self, api, command): key = command.get("key") if key is None: return self.reply(command, success=False) conversation = self.app_worker.conversation_for_api(api) if key == "config": value = self._get_config(conversation) elif key == "poll": value = self._get_poll(conversation) else: # matches what is returned for unknown keys by # go.apps.jsbox.vumi_app.ConversationConfigResource value = {} return self.reply(command, value=value, success=True) class DialogueConfig(JsBoxConfig): _cached_javascript = None @property def javascript(self): if self._cached_javascript is None: self._cached_javascript = pkg_resources.resource_string( "go.apps.dialogue", "vumi_app.js") return self._cached_javascript class DialogueApplication(JsBoxApplication): CONFIG_CLASS = DialogueConfig worker_name = 'dialogue_application' def get_jsbox_js_config(self, conv): return dialogue_js_config(conv)
Python
0.000002
@@ -311,17 +311,21 @@ poll -%5B +.get( 'states' %5D if @@ -320,17 +320,21 @@ 'states' -%5D +, %5B%5D) if s%5B't
7bce0c3e1c2cc0ca8f37916ad88abd0f91ba2c38
Update Keras.py
tanh/Keras.py
tanh/Keras.py
from keras.datasets import mnist from keras.initializers import RandomUniform from keras.layers import Dense from keras.models import Sequential from keras.optimizers import SGD from keras.utils import to_categorical batch_size = 128 epochs = 30 learning_rate = 0.5 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape([x_train.shape[0], -1]).astype('float32') / 255 y_train = to_categorical(y_train, num_classes=10) x_test = x_test.reshape([x_test.shape[0], -1]).astype('float32') / 255 y_test = to_categorical(y_test, num_classes=10) model = Sequential() model.add(Dense(512, activation='tanh', input_shape=(784,), kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01))) model.add(Dense(512, activation='tanh', kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01))) model.add(Dense(10, activation='softmax', kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01))) model.summary() model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=learning_rate), metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
Python
0
@@ -1211,18 +1211,8 @@ %0D%0A%0D%0A -history = mode @@ -1237,26 +1237,16 @@ - - y_train, @@ -1257,26 +1257,16 @@ - - batch_si @@ -1291,26 +1291,16 @@ - - epochs=e @@ -1307,26 +1307,16 @@ pochs,%0D%0A -
ac545eb8b21ed1bfadc5649fc84e58da10a35846
Change `compose` to return a string instead of a generator
tinysrt.py
tinysrt.py
#!/usr/bin/env python '''A tiny library for parsing, modifying, and composing SRT files.''' import functools import re from datetime import timedelta from itertools import groupby SUBTITLE_PATTERN = r'(\d+)\n(\d+:\d+:\d+,\d+) --> (\d+:\d+:\d+,\d+)\n(.+?)\n\n' SUBTITLE_REGEX = re.compile(SUBTITLE_PATTERN, re.MULTILINE | re.DOTALL) @functools.total_ordering class Subtitle(object): def __init__(self, index, start, end, content): self.index = index self.start = start self.end = end self.content = content def __eq__(self, other): return self.__dict__ == other.__dict__ def __lt__(self, other): return self.start < other.start def to_srt(self): return '%d\n%s --> %s\n%s\n\n' % ( self.index, timedelta_to_srt_timestamp(self.start), timedelta_to_srt_timestamp(self.end), self.content, ) def timedelta_to_srt_timestamp(timedelta_timestamp): '''Convert a timedelta to an SRT timestamp.''' hrs, remainder = divmod(timedelta_timestamp.seconds, 3600) mins, secs = divmod(remainder, 60) msecs = timedelta_timestamp.microseconds // 1000 return '%02d:%02d:%02d,%03d' % (hrs, mins, secs, msecs) def srt_timestamp_to_timedelta(srt_timestamp): '''Convert an SRT timestamp to a timedelta.''' hrs, mins, secs, msecs = map(int, re.split('[,:]', srt_timestamp)) return timedelta(hours=hrs, minutes=mins, seconds=secs, milliseconds=msecs) def parse(srt): '''Convert an SRT formatted string to a generator of Subtitle objects.''' for match in SUBTITLE_REGEX.finditer(srt): raw_index, raw_start, raw_end, content = match.groups() yield Subtitle( index=int(raw_index), start=srt_timestamp_to_timedelta(raw_start), end=srt_timestamp_to_timedelta(raw_end), content=content, ) def parse_file(srt_stream): '''Parse an SRT formatted stream into Subtitle objects.''' for is_sep, lines in groupby(srt_stream, lambda line: line != '\n'): if is_sep: srt_block = ''.join(lines) + '\n' subtitle, = parse(srt_block) yield subtitle def compose(subtitles): '''Convert an iterator of Subtitle objects to SRT formatted strings.''' return (subtitle.to_srt() for subtitle in subtitles) def compose_file(subtitles, srt_stream): for srt_block in compose(subtitles): srt_stream.write(srt_block)
Python
0
@@ -2266,16 +2266,23 @@ return +''.join (subtitl @@ -2375,28 +2375,19 @@ or s -rt_block in compose( +ubtitle in subt @@ -2387,25 +2387,24 @@ in subtitles -) :%0A sr @@ -2419,18 +2419,26 @@ .write(s -rt_block +ubtitle.to_srt() )%0A
7977939caa8b521bd99112a3d368e343db3169cd
Fix a bug about losing the owner of a password when doing a modification on the password
yithlibraryserver/views.py
yithlibraryserver/views.py
import json import bson from pyramid.httpexceptions import HTTPBadRequest from pyramid.view import view_config, view_defaults from yithlibraryserver.errors import password_not_found, invalid_password_id from yithlibraryserver.utils import jsonable from yithlibraryserver.validation import validate_password @view_defaults(route_name='password_collection_view', renderer='json') class PasswordCollectionRESTView(object): def __init__(self, request): self.request = request self.user = self.request.matchdict['user'] @view_config(request_method='OPTIONS', renderer='string') def options(self): headers = self.request.response.headers headers['Access-Control-Allow-Methods'] = 'GET, POST' headers['Access-Control-Allow-Headers'] = 'Origin, Content-Type, Accept' return '' @view_config(request_method='GET') def get(self): return [jsonable(p) for p in self.request.db.passwords.find({'owner': self.user})] @view_config(request_method='POST') def post(self): password, errors = validate_password(self.request.body, self.request.charset) if errors: result = {'message': ','.join(errors)} return HTTPBadRequest(body=json.dumps(result), content_type='application/json') # add the password to the database password['owner'] = self.user _id = self.request.db.passwords.insert(password, safe=True) password['_id'] = str(_id) return password @view_defaults(route_name='password_view', renderer='json') class PasswordRESTView(object): def __init__(self, request): self.request = request # the user is actually not used in this view since # the passwords ids are globally unique self.user = self.request.matchdict['user'] self.password_id = self.request.matchdict['password'] @view_config(request_method='OPTIONS', renderer='string') def options(self): headers = self.request.response.headers headers['Access-Control-Allow-Methods'] = 'GET, PUT, DELETE' headers['Access-Control-Allow-Headers'] = 'Origin, Content-Type, Accept' return '' @view_config(request_method='GET') def get(self): try: _id = bson.ObjectId(self.password_id) except bson.errors.InvalidId: return invalid_password_id() password = self.request.db.passwords.find_one(_id) if password is None: return password_not_found() else: return jsonable(password) @view_config(request_method='PUT') def put(self): try: _id = bson.ObjectId(self.password_id) except bson.errors.InvalidId: return invalid_password_id() password, errors = validate_password(self.request.body, self.request.charset, _id) if errors: result = {'message': ','.join(errors)} return HTTPBadRequest(body=json.dumps(result), content_type='application/json') # update the password in the database result = self.request.db.passwords.update({'_id': _id}, password, safe=True) # result['n'] is the number of documents updated # See http://www.mongodb.org/display/DOCS/getLastError+Command#getLastErrorCommand-ReturnValue if result['n'] == 1: return jsonable(password) else: return password_not_found() @view_config(request_method='DELETE') def delete(self): try: _id = bson.ObjectId(self.password_id) except bson.errors.InvalidId: return invalid_password_id() result = self.request.db.passwords.remove(_id, safe=True) if result['n'] == 1: return '' else: return password_not_found()
Python
0.000006
@@ -1758,115 +1758,8 @@ st%0A%0A - # the user is actually not used in this view since%0A # the passwords ids are globally unique%0A @@ -3172,32 +3172,70 @@ in the database%0A + password%5B'owner'%5D = self.user%0A result =
c624fe37353128f661839be475a352515b243610
Update broker.py
fx_collect/broker.py
fx_collect/broker.py
from settings import FX_USER, FX_PASS, URL, FX_ENVR from datetime import datetime, timedelta import forexconnect as fx import numpy as np import time OLE_TIME_ZERO = datetime(1899, 12, 30) class FXCMBrokerHandler(object): """ The BrokerHandler object is designed to interact directly with FXCM using the python-forexconnect API. """ def __init__(self): self.broker = 'fxcm' self.supported_time_frames = [ 'D1', 'W1', 'M1', 'H8', 'H4', 'H2', 'H1', 'm30', 'm15', 'm5', 'm1' ] self.dtype = np.dtype( [ ('date', '<M8[us]'), ('askopen', '<f8'), ('askhigh', '<f8'), ('asklow', '<f8'), ('askclose', '<f8'), ('bidopen', '<f8'), ('bidhigh', '<f8'), ('bidlow', '<f8'), ('bidclose', '<f8'), ('volume', '<i8') ] ) self._login() def _session_status(self): if self.session.is_connected(): return True else: return False def _login(self): con = False while True: try: self.session = fx.ForexConnectClient( FX_USER, FX_PASS, FX_ENVR, URL ) if self._session_status(): con = True break except RuntimeError: time.sleep(1) if not con: raise Exception('Unable to login') def get_offers(self): return self.session.get_offers() def get_offer_status(self, offer): status = self.session.get_offer_trading_status(offer) oletime = self.session.get_offer_time(offer) return status, self._from_ole(oletime) def get_initial_datetime(self, offer): return self.session.get_historical_prices( offer, 0, 0, 'D1')[0].date def get_open_datetime(self, offer): dt = datetime.utcnow().replace(second=0,microsecond=0) dtto = self._to_ole(dt) dtfm = self._to_ole(dt.replace(hour=0,minute=0)) while True: data = self.session.get_historical_prices( offer, dtfm, dtto, 'm1') if len(data) > 0: dtto = self._to_ole(data[-1].date) if len(data) == 1: break else: break return self._from_ole(dtto) def get_current_tick(self, offer): bid = self.session.get_bid(offer) ask = self.session.get_ask(offer) return bid, ask def get_bars(self, offer, time_frame, dtfm, dtto): fxdata = self.session.get_historical_prices( offer, self._to_ole(dtfm), self._to_ole(dtto), time_frame ) npvalues = self._numpy_convert(fxdata) return self._integrity_check(npvalues) def _numpy_convert(self, values): return np.array( [v.__getinitargs__() for v in values], dtype=self.dtype) def _integrity_check(self, a): a = a[a['askhigh'] >= a['asklow']] a = a[a['askhigh'] >= a['askopen']] a = a[a['asklow'] <= a['askopen']] a = a[a['askhigh'] >= a['askclose']] a = a[a['asklow'] <= a['askclose']] a = a[a['bidhigh'] >= a['bidlow']] a = a[a['bidhigh'] >= a['bidopen']] a = a[a['bidlow'] <= a['bidopen']] a = a[a['bidhigh'] >= a['bidclose']] a = a[a['bidlow'] <= a['bidclose']] a = a[a['volume'] >= 0] idx = np.unique(a['date'][::-1], return_index = True)[1] return a[::-1][idx][::-1] def _to_ole(self, pydate): delta = pydate - OLE_TIME_ZERO return float(delta.days) + (float(delta.seconds) / 86400) def _from_ole(self, oletime): return OLE_TIME_ZERO + timedelta(days=float(oletime))
Python
0.000001
@@ -2472,19 +2472,69 @@ -bid +while True:%0A try:%0A bid, ask = self. @@ -2548,16 +2548,20 @@ .get_bid +_ask (offer)%0A @@ -2572,41 +2572,156 @@ -ask = self.session.get_ask(offer) + if bid %3E 0 and ask %3E 0:%0A break%0A except RuntimeError as e:%0A print(offer, e)%0A pass %0A
fa99e3eb8504016fee706347fe162c2692b74c70
Support concurrent loops.
gearman/client.py
gearman/client.py
""" Gearman client implementation. """ import sys import struct from collections import deque from twisted.internet import defer from twisted.protocols import stateful from twisted.python import log from constants import * __all__ = ['GearmanProtocol', 'GearmanWorker'] class GearmanProtocol(stateful.StatefulProtocol): """Base protocol for handling gearman connections.""" def makeConnection(self, transport): stateful.StatefulProtocol.makeConnection(self, transport) self.receivingCommand = 0 self.deferreds = deque() def send_raw(self, cmd, data=''): """Send a command with the given data with no response.""" self.transport.write(REQ_MAGIC) self.transport.write(struct.pack(">II", cmd, len(data))) self.transport.write(data) def send(self, cmd, data=''): """Send a command and get a deferred waiting for the response.""" self.send_raw(cmd, data) d = defer.Deferred() self.deferreds.append(d) return d def getInitialState(self): return self._headerReceived, HEADER_LEN def connectionLost(self, reason): try: for d in list(self.deferreds): d.errback(reason) except: log.err() self.deferreds.clear() def _headerReceived(self, header): if header[:4] != RES_MAGIC: log.msg("Invalid header magic returned, failing.") self.transport.loseConnection() return cmd, size = struct.unpack(">II", header[4:]) self.receivingCommand = cmd return self._completed, size def _completed(self, data): d = self.deferreds.popleft() d.callback((self.receivingCommand, data)) self.receivingCommand = 0 return self._headerReceived, HEADER_LEN def pre_sleep(self): """Enter a sleep state.""" return self.send(PRE_SLEEP) def echo(self, data="hello"): """Send an echo request.""" return self.send(ECHO_REQ, data) class GearmanJob(object): """A gearman job.""" def __init__(self, raw_data): self.handle, self.function, self.data = raw_data.split("\0", 2) def __repr__(self): return "<GearmanJob %s func=%s with %d bytes of data>" % (self.handle, self.function, len(self.data)) class GearmanWorker(object): """A gearman worker.""" def __init__(self, protocol): self.protocol = protocol self.functions = {} def registerFunction(self, name, func): """Register the ability to perform a function.""" self.functions[name] = func self.protocol.send_raw(CAN_DO, name) def _send_job_res(self, cmd, job, data=''): self.protocol.send_raw(cmd, job.handle + "\0" + data) @defer.inlineCallbacks def getJob(self): """Get the next job.""" stuff = yield self.protocol.send(GRAB_JOB) while stuff[0] == NO_JOB: yield self.protocol.pre_sleep() stuff = yield self.protocol.send(GRAB_JOB) defer.returnValue(GearmanJob(stuff[1])) @defer.inlineCallbacks def _finishJob(self, job): assert job f = self.functions[job.function] assert f try: rv = yield f(job.data) if rv is None: rv = "" self._send_job_res(WORK_COMPLETE, job, rv) except: x = sys.exc_info() self._send_job_res(WORK_EXCEPTION, job, str(x)) self._send_job_res(WORK_FAIL, job) def __iter__(self): while True: yield self.getJob().addCallback(self._finishJob)
Python
0
@@ -2619,16 +2619,45 @@ ons = %7B%7D +%0A self.sleeping = None %0A%0A de @@ -2954,84 +2954,455 @@ -@defer.inlineCallbacks%0A def getJob(self):%0A %22%22%22Get the next job.%22%22%22 +def _sleep(self):%0A if not self.sleeping:%0A self.sleeping = self.protocol.pre_sleep()%0A def _clear(x):%0A self.sleeping = None%0A self.sleeping.addBoth(_clear)%0A return self.sleeping%0A%0A @defer.inlineCallbacks%0A def getJob(self):%0A %22%22%22Get the next job.%22%22%22%0A%0A # If we're currently sleeping, attach to the existing sleep.%0A if self.sleeping:%0A yield self._sleep()%0A %0A @@ -3502,36 +3502,24 @@ yield self. -protocol.pre _sleep()%0A
4063d94b405a41ead8d5c3df5a1d27dd1dba140f
remove config for warmup
gae/tap/warmup.py
gae/tap/warmup.py
# -*- coding: utf-8 -*- from functools import wraps import os import sys from google.appengine.api import lib_config import webapp2 # Config class ConfigDefaults(object): IS_TEST = "unittest" in sys.modules.keys() SITE_PACKAGES = "site-packages" config = lib_config.register("warmup", ConfigDefaults.__dict__) # Search Path def execute_once(func): @wraps(func) def inner(_result=[None], *argv, **kwargv): if _result[0] is None: _result[0] = func(*argv, **kwargv) if _result[0] is None: raise ValueError("The return value must be not `None`.") return _result[0] return inner @execute_once def sys_path_append(): try: import __main__ as main except ImportError: is_shell = False else: is_shell = not hasattr(main, "__file__") base_path = config.SITE_PACKAGES if config.IS_TEST or is_shell: base_path = os.path.abspath(base_path) path = base_path if path not in sys.path and os.path.exists(path): sys.path.append(path) if os.path.exists(base_path): path = os.path.join(base_path, "packages") if path not in sys.path: sys.path.append(path) if os.path.exists(path): for zipfile in os.listdir(path): if zipfile.endswith(".zip"): zipfile_path = os.path.join(path, zipfile) if zipfile_path not in sys.path: sys.path.append(zipfile_path) if is_shell or sys.argv[0].endswith("/sphinx-build"): import google base = os.path.join(os.path.dirname(google.__file__), "../lib/") for webapp2 in ["webapp2-2.5.2", "webapp2"]: path = os.path.join(base, webapp2) if os.path.exists(path): sys.path.append(path) break for path in ["endpoints-1.0", "protorpc-1.0", "jinja2"]: sys.path.append(os.path.join(base, path)) elif config.IS_TEST: import google base = os.path.join(os.path.dirname(google.__file__), "../lib/") for path in ["endpoints-1.0"]: sys.path.append(os.path.join(base, path)) return True sys_path_append() from js.angular import angular_cookies, angular_resource from js.bootstrap import bootstrap import tap class WarmUp(tap.RequestHandler): @tap.head(angular_cookies, angular_resource, bootstrap) def get(self): pass app = webapp2.WSGIApplication([ ("/_ah/warmup", WarmUp), ])
Python
0.000001
@@ -72,252 +72,22 @@ ys%0A%0A -from google.appengine.api import lib_config%0Aimport webapp2%0A%0A%0A# Config%0A%0Aclass ConfigDefaults(object):%0A IS_TEST = %22unittest%22 in sys.modules.keys()%0A SITE_PACKAGES = %22site-packages%22%0A%0Aconfig = lib_config.register(%22warmup%22, ConfigDefaults.__dict__) +import webapp2 %0A%0A%0A# @@ -573,103 +573,55 @@ h = -config.SITE_PACKAGES%0A if config.IS_TEST or is_shell:%0A base_path = os.path.abspath(base_path +os.environ.get(%22SITE_PACKAGES%22, %22site-packages%22 )%0A @@ -1515,201 +1515,8 @@ h))%0A - elif config.IS_TEST:%0A import google%0A base = os.path.join(os.path.dirname(google.__file__), %22../lib/%22)%0A for path in %5B%22endpoints-1.0%22%5D:%0A sys.path.append(os.path.join(base, path))%0A re
3dfac1e6ef418e180d4967aba921ff527ff31e6b
Update ittr_multiplayer.py
nose_ittr/ittr_multiplayer.py
nose_ittr/ittr_multiplayer.py
__author__ = 'Sergey Ragatsky' __version__ = '0.0.2' import re import logging from types import FunctionType from itertools import product logger = logging.getLogger(__name__) def ittr(*args, **kwargs): """ Decorator that adds iteration attributes to test method """ def update_attr(func): # combining kwargs and args dict with ittr dict ittrs = {str(item): item for item in args or []} ittr_dict = getattr(func, 'ittr', dict()) ittr_dict.update(ittrs) ittr_dict.update(kwargs) setattr(func, 'ittr', ittr_dict) return func return update_attr class IttrMultiplayer(type): """ Multiples the tests in a given test class by the parameters given in ittr decorator. """ def __new__(mcs, name, bases, dct): # check if the class been multiplied by IttrMultiplayer if dct.get('is_multiplied'): return type.__new__(mcs, name, bases, dct) for attribute_name, attribute in dct.items(): # if not a method continue if not type(attribute) == FunctionType: logging.debug('attribute {0} is not a method'.format(attribute_name)) continue # is method decorated with platform attr if not hasattr(attribute, 'ittr') or not attribute.ittr: logging.debug('method {0} has not attr decorator'.format(attribute_name)) continue # create product of all the iterators b = [map(lambda value: (key, value), values) for key, values in attribute.ittr.iteritems() if values] products = map(dict, product(*b)) for prod in products: logging.debug('method product: {0}'.format(prod)) suffix = re.sub(r'\W+', '', str(prod.values()) .translate(None, "[]'") .replace(',', '_')) logging.debug('method suffix: {0}'.format(suffix)) # in case itts passed are empty if not suffix: logging.debug('Empty suffix, product: {0}'.format(prod)) continue new_func_name = attribute_name + '_' + suffix # combine both product and ittr dict to be added to new method func_params = dict(attribute.func_dict, **prod) mirror_func = mcs._attribute_injector(attribute, **func_params) setattr(mirror_func, 'ittr', prod) # assign new name and docstring and save back at our class mirror_func.func_name = new_func_name mirror_func.func_doc = attribute.func_doc dct[new_func_name] = mirror_func # set no test flag to original test method attribute.__test__ = False dct['is_multiplied'] = True # mark has been multiplied return type.__new__(mcs, name, bases, dct) @classmethod def _attribute_injector(cls, func, **keywords): def injector(*fargs, **fkeywords): # transfer ittr and attr to self when called self = fargs[0] for name, value in keywords.iteritems(): setattr(self, name, value) return func(*fargs, **fkeywords) # transfers all attr and ittr to newfunc for name, value in keywords.iteritems(): setattr(injector, name, value) setattr(injector, 'keywords', keywords) return injector
Python
0.000001
@@ -856,18 +856,17 @@ rMultipl -ay +i er%0A @@ -2901,16 +2901,50 @@ = False%0A + # mark has been multiplied %0A @@ -2977,34 +2977,8 @@ ue -# mark has been multiplied %0A
d2466f376a19389a8cdc3aeaa070a30edb406b10
store 'curve_type' in layer's lib
Lib/ufo2ft/filters/cubicToQuadratic.py
Lib/ufo2ft/filters/cubicToQuadratic.py
from __future__ import ( print_function, division, absolute_import, unicode_literals) from ufo2ft.filters import BaseFilter from cu2qu.ufo import DEFAULT_MAX_ERR, CURVE_TYPE_LIB_KEY from cu2qu.pens import Cu2QuPointPen import logging logger = logging.getLogger(__name__) class CubicToQuadraticFilter(BaseFilter): _kwargs = { 'conversionError': None, 'reverseDirection': True, 'rememberCurveType': False, } def set_context(self, font, glyphSet): ctx = super(CubicToQuadraticFilter, self).set_context(font, glyphSet) relativeError = self.options.conversionError or DEFAULT_MAX_ERR ctx.absoluteError = relativeError * font.info.unitsPerEm ctx.stats = {} return ctx def __call__(self, font, glyphSet=None): if self.options.rememberCurveType: curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic") if curve_type == "quadratic": logger.info("Curves already converted to quadratic") return set() elif curve_type == "cubic": pass # keep converting else: raise NotImplementedError(curve_type) modified = super(CubicToQuadraticFilter, self).__call__(font, glyphSet) if modified: stats = self.context.stats logger.info('New spline lengths: %s' % (', '.join( '%s: %d' % (l, stats[l]) for l in sorted(stats.keys())))) if self.options.rememberCurveType: curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic") if curve_type != "quadratic": font.lib[CURVE_TYPE_LIB_KEY] = "quadratic" return modified def filter(self, glyph): if not len(glyph): return False pen = Cu2QuPointPen( glyph.getPointPen(), self.context.absoluteError, reverse_direction=self.options.reverseDirection, stats=self.context.stats) contours = list(glyph) glyph.clearContours() for contour in contours: contour.drawPoints(pen) return True
Python
0
@@ -84,16 +84,64 @@ erals)%0A%0A +from ufo2ft.constants import DEFAULT_LAYER_NAME%0A from ufo @@ -837,16 +837,46 @@ Set=None +, layerName=DEFAULT_LAYER_NAME ):%0A @@ -905,32 +905,167 @@ emberCurveType:%0A + # check first in the global font lib, then in layer lib%0A for lib in (font.lib, font.layers%5BlayerName%5D.lib):%0A curv @@ -1065,37 +1065,32 @@ curve_type = -font. lib.get(CURVE_TY @@ -1102,32 +1102,36 @@ B_KEY, %22cubic%22)%0A + if c @@ -1164,32 +1164,36 @@ + logger.info(%22Cur @@ -1241,24 +1241,28 @@ + return set() @@ -1262,16 +1262,20 @@ n set()%0A + @@ -1322,16 +1322,20 @@ + + pass # @@ -1366,14 +1366,22 @@ + else:%0A + @@ -1495,16 +1495,29 @@ _call__( +%0A font, gl @@ -1522,16 +1522,36 @@ glyphSet +, layerName%0A )%0A @@ -1777,32 +1777,109 @@ emberCurveType:%0A + # 'lib' here is the layer's lib, as defined in for loop variable%0A curv @@ -1887,21 +1887,16 @@ _type = -font. lib.get( @@ -1982,21 +1982,16 @@ -font. lib%5BCURV
b842d9f553fe5c4cbf8273c2c11362b85c0ffbf5
Change _get_tests to static method.
health_monitor/models.py
health_monitor/models.py
""" Copyright 2017 Gracenote Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from jsonfield import JSONField from django.db import models from django.utils import timezone from . import utils class Health(models.Model): uid = models.IntegerField(primary_key=True, db_index=True) state = JSONField(default={}, blank=True, null=True) severity = JSONField(default={}, blank=True, null=True) def __unicode__(self): # For Python 2, use __str__ on Python 3 return unicode(self.uid) def _calculate_severity(self, g): """Return the highest score in state dict.""" test_scores = [1, ] for t in self.state[g].keys(): if self.state[g][t]['score']: test_scores.append(self.state[g][t]['score']) return max(test_scores) def update_score(self, test, score): """Update the health based on the test name and score.""" for group in HealthTest._get_groups(test): if test in HealthTest._get_tests(group): if group not in self.state.keys(): self.state[group] = {} self.state[group] = utils.init_score_dict(self.state[group], test) self.state[group][test] = utils.update_score_dict(self.state[group][test], score) self.severity = utils.init_score_dict(self.severity, group) self.severity[group] = utils.update_score_dict(self.severity[group], self._calculate_severity(group)) self.save() def delete_test_state(self, test): """Delete test state""" for group in self.state.keys(): if test in self.state[group].keys(): del(self.state[group][test]) self.severity[group] = utils.update_score_dict(self.severity[group], self._calculate_severity(group)) self.save() class Meta(object): abstract = True class HealthTest(models.Model): uid = models.IntegerField(db_index=True) time = models.DateTimeField() test = None groups = [] health_model = Health @classmethod def create(cls, uid, **kwargs): health_test = cls(uid=uid, time=timezone.now(), **kwargs) health_test.save() h, _ = cls.health_model.objects.get_or_create(uid=uid) h.update_score(test=cls.test, score=cls.get_score(**kwargs)) return health_test @classmethod def _get_tests(cls, group): return [t.test for t in HealthTest.__subclasses__() if group in t.groups] @staticmethod def _get_groups(test): for t in HealthTest.__subclasses__(): if test == t.test: return t.groups return [] @staticmethod def _get_model(test): for t in HealthTest.__subclasses__(): if test == t.test: return t raise TypeError('test {} does not exist'.format(test)) @classmethod def get_score(cls, **kwargs): score = cls.score(**kwargs) if type(score) != int: raise TypeError('score() method should return an integer') else: return score class Meta(object): abstract = True
Python
0
@@ -2888,29 +2888,30 @@ _test%0A%0A @ -class +static method%0A d @@ -2924,21 +2924,16 @@ t_tests( -cls, group):%0A
2ee4f2d423cf1b3dba9ceed3cd31db91ee0ce86d
Fix variable referenced before assignment
nsone/rest/transport/basic.py
nsone/rest/transport/basic.py
# # Copyright (c) 2014 NSONE, Inc. # # License under The MIT License (MIT). See LICENSE in project root. # from __future__ import absolute_import from nsone.rest.transport.base import TransportBase from nsone.rest.errors import ResourceException, RateLimitException, \ AuthException try: from urllib.request import build_opener, Request, HTTPSHandler from urllib.error import HTTPError except ImportError: from urllib2 import build_opener, Request, HTTPSHandler from urllib2 import HTTPError import json import socket class BasicTransport(TransportBase): def __init__(self, config): TransportBase.__init__(self, config, self.__module__) self._timeout = self._config.get('timeout', socket._GLOBAL_DEFAULT_TIMEOUT) def send(self, method, url, headers=None, data=None, files=None, callback=None, errback=None): if headers is None: headers = {} if files is not None: # XXX raise Exception('file uploads not supported in BasicTransport yet') self._logHeaders(headers) self._log.debug("%s %s %s" % (method, url, data)) opener = build_opener(HTTPSHandler) request = Request(url, headers=headers, data=data) request.get_method = lambda: method def handleProblem(code, resp, msg): if errback: errback((resp, msg)) return if code == 429: raise RateLimitException('rate limit exceeded', resp, msg) elif code == 401: raise AuthException('unauthorized', resp, msg) else: raise ResourceException('server error', resp, msg) # Handle error and responses the same so we can # always pass the body to the handleProblem function try: resp = opener.open(request, timeout=self._timeout) except HTTPError as e: resp = e body = resp.read() except Exception as e: body = '"Service Unavailable"' resp = HTTPError(url, 503, body, headers, None) finally: if resp.code != 200: handleProblem(resp.code, resp, body) # TODO make sure json is valid try: jsonOut = json.loads(body) except ValueError: if errback: errback(resp) return else: raise ResourceException('invalid json in response', resp, body) if callback: return callback(jsonOut) else: return jsonOut TransportBase.REGISTRY['basic'] = BasicTransport
Python
0.002868
@@ -2179,39 +2179,8 @@ = e%0A - body = resp.read()%0A @@ -2326,16 +2326,47 @@ inally:%0A + body = resp.read()%0A
3967ecb61f28a952bb0ec5e3798be60849d845e5
Add support for feeds sorted by awesome_count to apiv2
dwitter/views_v2.py
dwitter/views_v2.py
import datetime from dateutil.parser import parse from django.contrib.auth.models import User from django.db.models import Prefetch from django.utils import timezone from rest_framework import mixins, viewsets from rest_framework.decorators import action from rest_framework.exceptions import PermissionDenied from rest_framework.response import Response from dwitter.models import Comment, Dweet from dwitter.serializers_v2 import DweetSerializer, UserSerializer class UserViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer def get_object(self): if self.kwargs['pk'] == 'me': if self.request.user.is_authenticated: return self.request.user else: raise PermissionDenied() return super().get_object() class DweetViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = Dweet.objects.all().select_related( 'author', ).prefetch_related( 'likes', Prefetch('comments', queryset=Comment.objects.select_related('author')) ).order_by('-hotness') serializer_class = DweetSerializer def create(self, request): from dwitter.feed.views import dweet as original_dweet_view class DummyRequest: def __init__(self): self.method = 'POST' self.POST = request.data self.build_absolute_uri = request.build_absolute_uri self._messages = request._messages self.META = request.META self.user = request.user response = original_dweet_view(DummyRequest()) pk = int(response.url.split('/')[-1]) dweet = self.queryset.get(pk=pk) context = self.get_serializer_context() return Response(DweetSerializer(context=context).to_representation(dweet)) def list(self, request): order_by = request.query_params.get('order_by', '-hotness') if order_by not in ('hotness', '-hotness', 'posted', '-posted', '?'): order_by = '-hotness' try: posted_before = parse(request.query_params.get('posted_before', '')) except ValueError: posted_before = datetime.datetime(year=9999, month=12, day=31) try: posted_after = parse(request.query_params.get('posted_after', '')) except ValueError: posted_after = datetime.datetime(year=1, month=1, day=1) if order_by not in ('hotness', '-hotness', 'posted', '-posted', '?'): order_by = '-hotness' username = request.query_params.get('username', None) hashtag = request.query_params.get('hashtag', None) filters = {} if username: filters['author__username'] = username if hashtag: filters['hashtag__name'] = hashtag self.queryset = self.queryset.order_by(order_by).filter( posted__gte=posted_after, posted__lt=posted_before, **filters) return super().list(request) @action(methods=['POST'], detail=True) def set_like(self, request, pk=None): if not request.user.is_authenticated: return PermissionDenied() dweet = self.get_object() like = request.data.get('like', True) if like: dweet.likes.add(request.user) else: dweet.likes.remove(request.user) dweet = self.queryset.get(pk=dweet.pk) context = self.get_serializer_context() return Response(DweetSerializer(context=context).to_representation(dweet)) @action(methods=['POST'], detail=True) def add_comment(self, request, pk=None): if not request.user.is_authenticated: return PermissionDenied() text = request.data.get('text', '') dweet = self.get_object() Comment.objects.create( reply_to=dweet, text=text, author=request.user, posted=timezone.now()) dweet = self.queryset.get(pk=dweet.pk) context = self.get_serializer_context() return Response(DweetSerializer(context=context).to_representation(dweet))
Python
0
@@ -125,16 +125,23 @@ Prefetch +, Count %0Afrom dj @@ -2174,39 +2174,57 @@ ted', '-posted', + '-awesome_count', '?'):%0A - orde @@ -2241,16 +2241,17 @@ otness'%0A +%0A @@ -2631,121 +2631,8 @@ 1)%0A%0A - if order_by not in ('hotness', '-hotness', 'posted', '-posted', '?'):%0A order_by = '-hotness'%0A%0A @@ -2910,16 +2910,139 @@ ashtag%0A%0A + if order_by == '-awesome_count':%0A self.queryset = self.queryset.annotate(awesome_count=Count('likes'))%0A%0A @@ -3098,16 +3098,20 @@ filter(%0A +
5f663eb971542d252186dbf9345ca458dca93c16
fix file not exist during render file
tpl/tpl.py
tpl/tpl.py
# -*- coding:utf-8 -*- import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import jinja2 from tpl import path from tpl import errors class Template(object): IGNORE_FILES = [ 'construct.sh', 'construct.py' ] def __init__(self, tpl_dir): self.tpl_dir = tpl_dir def is_ignored_file(self, file): file_name = file.split('/')[-1] if file_name in self.IGNORE_FILES: return True return False def render_file(self, file, context): env = jinja2.Environment(undefined=jinja2.StrictUndefined) if '{{' in file and '}}' in file: file = env.from_string(file).render(context) with open(file, 'r') as fd: file_content = fd.read() file_content = env.from_string(file_content).render(context) return file, file_content def render_dir(self, dir, context): if not ('{{' in dir and '}}' in dir): return dir env = jinja2.Environment(undefined=jinja2.StrictUndefined) dir = env.from_string(dir).render(context) return dir def render(self, context): assert isinstance(context) render_dirs = [] render_files = [] for dir in path.list_dirs(self.tpl_dir): render_dirs.append(self.render_dir(dir, context)) for file in path.list_files(self.tpl_dir): if self.is_ignored_file(file): continue render_files.append(self.render_file(file, context)) return render_dirs, render_files
Python
0.000001
@@ -611,32 +611,59 @@ trictUndefined)%0A + render_file = file%0A if '%7B%7B' @@ -696,24 +696,31 @@ +render_ file = env.f @@ -825,32 +825,39 @@ .read()%0A +render_ file_content = e @@ -916,22 +916,36 @@ return -file, +render_file, render_ file_con
92911d0299b299af92e1ff8936748d8838e529cb
Fix faulty login url
shuup_workbench/settings/base_settings.py
shuup_workbench/settings/base_settings.py
# This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import os from shuup.addons import add_enabled_addons BASE_DIR = os.getenv("SHUUP_WORKBENCH_BASE_DIR") or ( os.path.dirname(os.path.dirname(__file__))) SECRET_KEY = "Shhhhh" DEBUG = True ALLOWED_HOSTS = [] MEDIA_ROOT = os.path.join(BASE_DIR, "var", "media") STATIC_ROOT = os.path.join(BASE_DIR, "var", "static") MEDIA_URL = "/media/" SHUUP_ENABLED_ADDONS_FILE = os.getenv("SHUUP_ENABLED_ADDONS_FILE") or ( os.path.join(BASE_DIR, "var", "enabled_addons")) INSTALLED_APPS = add_enabled_addons(SHUUP_ENABLED_ADDONS_FILE, [ # django 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.staticfiles', # external apps that needs to be loaded before Shuup 'easy_thumbnails', # shuup themes 'shuup.themes.classic_gray', # shuup 'shuup.addons', 'shuup.admin', 'shuup.api', 'shuup.core', 'shuup.default_tax', 'shuup.front', 'shuup.front.apps.auth', 'shuup.front.apps.carousel', 'shuup.front.apps.customer_information', 'shuup.front.apps.personal_order_history', 'shuup.front.apps.saved_carts', 'shuup.front.apps.registration', 'shuup.front.apps.simple_order_notification', 'shuup.front.apps.simple_search', 'shuup.notify', 'shuup.simple_cms', 'shuup.customer_group_pricing', 'shuup.campaigns', 'shuup.simple_supplier', 'shuup.order_printouts', 'shuup.testing', 'shuup.utils', 'shuup.xtheme', 'shuup.reports', 'shuup.default_reports', 'shuup.regions', 'shuup.importer', 'shuup.default_importer', # external apps 'bootstrap3', 'django_jinja', 'filer', 'registration', 'rest_framework', ]) MIDDLEWARE_CLASSES = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'shuup.front.middleware.ProblemMiddleware', 'shuup.front.middleware.ShuupFrontMiddleware', ] ROOT_URLCONF = 'shuup_workbench.urls' WSGI_APPLICATION = 'shuup_workbench.wsgi.application' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } LANGUAGE_CODE = 'en' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' LOGIN_REDIRECT_URL = '/' SOUTH_TESTS_MIGRATE = False # Makes tests that much faster. DEFAULT_FROM_EMAIL = 'no-reply@example.com' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' LOGGING = { 'version': 1, 'formatters': { 'verbose': {'format': '[%(asctime)s] (%(name)s:%(levelname)s): %(message)s'}, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' }, }, 'loggers': { 'shuup': {'handlers': ['console'], 'level': 'DEBUG', 'propagate': True}, } } LANGUAGES = [ ('en', 'English'), ('fi', 'Finnish'), ('ja', 'Japanese'), ('zh-hans', 'Simplified Chinese'), ('pt-br', 'Portuguese (Brazil)'), ] PARLER_DEFAULT_LANGUAGE_CODE = "en" PARLER_LANGUAGES = { None: [{"code": c, "name": n} for (c, n) in LANGUAGES], 'default': { 'hide_untranslated': False, } } _TEMPLATE_CONTEXT_PROCESSORS = [ "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.request", "django.core.context_processors.tz", "django.contrib.messages.context_processors.messages" ] TEMPLATES = [ { "BACKEND": "django_jinja.backend.Jinja2", "APP_DIRS": True, "OPTIONS": { "match_extension": ".jinja", "context_processors": _TEMPLATE_CONTEXT_PROCESSORS, "newstyle_gettext": True, "environment": "shuup.xtheme.engine.XthemeEnvironment", }, "NAME": "jinja2", }, { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": _TEMPLATE_CONTEXT_PROCESSORS, "debug": DEBUG } }, ] # set login url here because of `login_required` decorators LOGIN_URL = "/login" SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer" SHUUP_PRICING_MODULE = "customer_group_pricing" REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.BasicAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAdminUser', ) } if os.environ.get("SHUUP_WORKBENCH_DISABLE_MIGRATIONS") == "1": from .utils import DisableMigrations MIGRATION_MODULES = DisableMigrations() def configure(setup): setup.commit(globals())
Python
0.000001
@@ -4993,16 +4993,17 @@ %22/login +/ %22%0A%0ASESSI
7b385ca9897ab8a7b25966cc54324a4b42596f7d
fix case of no saved lists or items
lists.py
lists.py
# manage list functions import os import redis import json r = redis.from_url(os.environ.get("REDIS_URL")) def getLists(): lists = [] items = [] lists = json.loads(r.get('lists')) items = json.loads(r.get('items')) return True def putLists() r.put('lists', json.dumps(lists)) r.pub('items', json.dumps(items)) return True def createList(list): getLists() if (lists.count(list) == 0): lists.append(list) items.append([]) putLists() return True else: print('List ' + list + ' already exists') return False def deleteList(list) getLists() if (lists.count(list) != 0): items.pop(lists.index(list)) lists.remove(list) putLists() return True else: print('List ' + list + ' not found') return False def addItem(item, list): getLists() if (lists.count(list) == 0): print('List ' + list + ' not found') return False else: items[lists.index(list)].append(item) putLists() return True def removeItem(item, list): getLists() if (lists.count(list) == 0): print('List ' + list + ' not found') return False elif (items[lists.index(list)].count(item) == 0): print('Item ' + item + ' not found on ' + list + ' list') return False else: items[lists.index(list)].remove(item) putLists() return True def readList(list): getLists() if (lists.count(list) == 0): print('List ' + list + ' not found') return [] else: return items[lists.index(list)]
Python
0.00001
@@ -126,73 +126,223 @@ -lists = %5B%5D%0A items = %5B%5D%0A lists = json.loads(r.get('lists'))%0A +savedLists = r.get('lists')%0A if savedLists == None:%0A lists = %5B%5D%0A else:%0A lists = json.loads(savedLists)%0A savedItems = r.get('items')%0A if savedItems == None:%0A items = %5B%5D%0A else:%0A @@ -360,30 +360,26 @@ n.loads( -r.get('i +savedI tems -') )%0A re
2a08d3154992b5f0633d7cd2ca1bbfc7ecd63f69
Fix to allow version number to be imported without dependencies being installed.
email_extras/__init__.py
email_extras/__init__.py
from django.core.exceptions import ImproperlyConfigured from email_extras.settings import USE_GNUPG __version__ = "0.1.0" if USE_GNUPG: try: import gnupg except ImportError: raise ImproperlyConfigured, "Could not import gnupg"
Python
0
@@ -1,63 +1,4 @@ -%0D%0Afrom django.core.exceptions import ImproperlyConfigured%0D%0A %0D%0Afr @@ -86,17 +86,26 @@ G:%0D%0A -%09 + try:%0D%0A -%09%09 + impo @@ -114,17 +114,20 @@ gnupg%0D%0A -%09 + except I @@ -141,12 +141,103 @@ ror: -%0D%0A%09%09 +%0A try:%0A from django.core.exceptions import ImproperlyConfigured%0D%0A rais @@ -285,9 +285,56 @@ gnupg%22%0D + except ImportError:%0A pass%0A%0D%0A %0A
b39518482da1d3e064cdbc34490e4a9924f6d5f1
Add a test for vectorized call
quantecon/tests/test_ecdf.py
quantecon/tests/test_ecdf.py
""" Tests for ecdf.py """ import unittest import numpy as np from quantecon import ECDF class TestECDF(unittest.TestCase): @classmethod def setUpClass(cls): cls.obs = np.random.rand(40) # observations defining dist cls.ecdf = ECDF(cls.obs) def test_call_high(self): "ecdf: x above all obs give 1.0" # all of self.obs <= 1 so ecdf(1.1) should be 1 self.assertAlmostEqual(self.ecdf(1.1), 1.0) def test_call_low(self): "ecdf: x below all obs give 0.0" # all of self.obs <= 1 so ecdf(1.1) should be 1 self.assertAlmostEqual(self.ecdf(-0.1), 0.0) def test_ascending(self): "ecdf: larger values should return F(x) at least as big" x = np.random.rand() F_1 = self.ecdf(x) F_2 = self.ecdf(1.1 * x) self.assertGreaterEqual(F_2, F_1)
Python
0
@@ -849,8 +849,289 @@ 2, F_1)%0A +%0A def test_vectorized(self):%0A %22ecdf: testing vectorized __call__ method%22%0A t = np.linspace(-1, 1, 100)%0A self.assertEqual(t.shape, self.ecdf(t).shape)%0A t = np.linspace(-1, 1, 100).reshape(2, 2, 25)%0A self.assertEqual(t.shape, self.ecdf(t).shape)%0A
e36192babd239366d51d1ea7cdfab94c06791fe7
Test assertElementPresents return function
functional_tests/test_base.py
functional_tests/test_base.py
# -*- coding: utf-8 -*- from .base import FunctionalTestCase import unittest from unittest import mock class TestBaseFuctionalTest(unittest.TestCase): def test_assertelementpresent_can_find_string(self): e = [mock.Mock(text='test')] ft = FunctionalTestCase() ft.assertElementPresent(e, 'test') def test_assertelementpresent_raises_when_string_not_found(self): e = [mock.Mock(text='some string')] ft = FunctionalTestCase() with self.assertRaises(AssertionError): ft.assertElementPresent(e, 'test')
Python
0.000002
@@ -541,28 +541,280 @@ rtElementPresent(e, 'test')%0A +%0A def test_assertelementpresent_returns_element_found(self):%0A e = %5Bmock.Mock(text='test'), mock.Mock(text='some string')%5D%0A ft = FunctionalTestCase()%0A ret = ft.assertElementPresent(e, 'test')%0A self.assertEquals(e%5B0%5D, ret)%0A
273ad7c297974cc97eb2889ebdbafe40e34be095
Add better comments at protocol class
honey/server.py
honey/server.py
import logging from twisted.conch import manhole, avatar from twisted.conch.interfaces import IConchUser, ISession from twisted.conch.ssh import keys, session from twisted.cred import checkers from twisted.cred.portal import Portal, IRealm from twisted.internet import reactor from twisted.conch.insults import insults from twisted.conch.ssh.factory import SSHFactory from zope.interface import implementer import honey_logging from config import CONFIG from parser import parse_input class HoneyProtocol(manhole.Manhole): ''' This is the bulk of the logic that handles all connections ''' def __init__(self, user): self.user = user address = self.user.transport.getPeer().address self.log_data = self.get_avatar_identifier_dict(address) def showPrompt(self): self.terminal.nextLine() self.terminal.write('$ ') def connectionMade(self): ''' Called once on initial connection of an Avatar ''' logging.info('Connection made') super(HoneyProtocol, self).connectionMade() self.log_stash('login') self.terminal.write(CONFIG['motd']) self.showPrompt() def log_stash(self, msg): honey_logging.stash.info(msg, extra=self.log_data) def get_avatar_identifier_dict(self, address): ''' Every logstash message has this data ''' data = { 'protocol': address.type, 'ip': address.host, 'port': address.port } return data def lineReceived(self, line): ''' Called everytime an avatar sends a line This is where logging and output is done ''' command = line.strip() output = parse_input(command) self.log_data['command'] = command self.log_stash('command') self.terminal.write(output) self.showPrompt() def handle_INT(self): ''' The default INT handler is sloppy Replaced it with something that is more expected ''' self.terminal.write('^C') self.showPrompt() @implementer(ISession) class HoneyAvatar(avatar.ConchUser): ''' An avatar represents one user logged in ''' def __init__(self, username): logging.info('Avatar being created') avatar.ConchUser.__init__(self) self.username = username self.channelLookup.update({'session': session.SSHSession}) def openShell(self, transport): logging.info('Protocol being setup') self.transport = transport protocol = insults.ServerProtocol(HoneyProtocol, self) protocol.makeConnection(transport) transport.makeConnection(session.wrapProtocol(protocol)) def getPty(self, terminal, windowSize, attrs): return None def eofReceived(self): pass def closed(self): pass @implementer(IRealm) class HoneyRealm(object): ''' A realm is a factory that returns avatars after authentication is made ''' def requestAvatar(self, avatarId, mind, *interfaces): if IConchUser in interfaces: logging.info('Interface IConchUser found') return interfaces[0], HoneyAvatar(avatarId), lambda: None else: raise NotImplementedError('No supported interfaces found') def _create_private_and_public_keys(): private_key = keys.Key.fromString(data=CONFIG['pem_key']) public_key = keys.Key.fromString(data=CONFIG['pub_key']) return (private_key, public_key) def _get_and_setup_factory(checker, portal): ''' creates and adds sshkeys and portal to factory ''' factory = SSHFactory() (private_key, public_key) = _create_private_and_public_keys() factory.privateKeys = {'ssh-rsa': private_key} factory.publicKeys = {'ssh-rsa': public_key} factory.portal = portal return factory def _get_checker(): ''' creates login:password based on config ''' checker = checkers.InMemoryUsernamePasswordDatabaseDontUse() checker.addUser(CONFIG['username'], CONFIG['password']) return checker def _get_portal(checker): ''' creates portal and adds realm and checker ''' portal = Portal(HoneyRealm()) portal.registerChecker(checker) return portal if __name__ == '__main__': logging.info('Creating checker') checker = _get_checker() logging.info('Creating portal') portal = _get_portal(checker) logging.info('Setting up factory') factory = _get_and_setup_factory(checker, portal) reactor.listenTCP(CONFIG['port'], factory) logging.info('Starting up') reactor.run()
Python
0
@@ -536,66 +536,238 @@ -This is the bulk of the logic that handles all connections +After authentication and Avatar(user) creation%0A This is where all connections end up.%0A Async calls at lineReceived%0A Manhole protocol has a lot of magic.%0A It gives a line history and has a rough emulation of a terminal. %0A
472c22a83e9e3982e5b6705757f79fba3b32f3e6
Set path dependency with context to development and deployment
hooks/common.py
hooks/common.py
import os import sys import urllib2 # Add charmhelpers to the system path. sys.path.insert(0, os.path.abspath(os.path.join('..', 'lib'))) from charmhelpers.core.hookenv import ( log, config, ) from charmhelpers.core.host import ( mkdir, ) from charmhelpers.fetch import ( apt_install, apt_update, ) def sanity_check(): if not config()['canonical_domain']: log("No Canonical Domain specified - Aborting until configured") # It's reasonable to assume we're not doing anything useful at this # point, as we are unconfigured. Abort doing *anything* return False return True # ########### # Environment Probe / Modifications # ########### # Create Configuration Directories for local Bind9 Server def make_bind_store(): mkdir('/etc/bind') mkdir('/etc/bind/zones') # Parse existing nameservers from resolv.conf def existing_nameservers(): dns_servers = [] with open('/etc/resolv.conf') as f: contents = f.readlines() print contents for line in contents: if line.find('nameserver') != -1: dns_servers.append(line.replace('nameserver ', '').rstrip()) return dns_servers # this is kind of arbitrary, attempt to connect to a google ip address. # This won't be the best solution, but i'm open to ideas on how to improve # 'onlineability' checking. def am_i_online(): try: urllib2.urlopen('http://74.125.228.100', timeout=1) return True except urllib2.URLError: pass return False # ########### # BIND tasks # ########### def install(): sanity_check() log("Preparing for BIND installation") apt_update(fatal=True) apt_install(packages=[ 'bind9', 'dnsutils', 'python-dnspython' ], fatal=True) def install_bind(): sanity_check() log("Preparing for BIND installation") apt_update(fatal=True) apt_install(packages=[ 'bind9', 'dnsutils', 'python-dnspython' ], fatal=True)
Python
0
@@ -69,16 +69,171 @@ m path.%0A +try:%0A sys.path.insert(0, os.path.abspath(os.path.join(os.environ%5B'CHARM_DIR'%5D,%0A 'lib')))%0Aexcept:%0A sys.path
a2b6cef1fc6dee679407d61312a23f96c3e99cda
Use a plain list of url instances for the tests urls too
oauth2_provider/tests/urls.py
oauth2_provider/tests/urls.py
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns( '', url(r'^admin/', include(admin.site.urls)), url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')), )
Python
0
@@ -25,18 +25,8 @@ port - patterns, inc @@ -110,25 +110,9 @@ s = -patterns(%0A '', +( %0A
aaf243646eccebea2e1d764d7de58c40c279830c
test info
hostend/main.py
hostend/main.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import sys sys.path.append('..') from tornado.web import Application import tornado.ioloop import tornado.options import tornado.httpserver import tornado.autoreload from tornado.options import define, options from hostend.controller import * from lib.utils import Host from proxy import * import docker import json from handlers import * config = json.load(open('config.json','r')) controller = Controller() controller.reportUrl = config.get('reportUrl') controller.requestUrl = config.get('requestUrl') host = Host.currentHost('',switchInterface=config['switchName'],transportInterface=config['transportInterface']) data = controller.request('getHostId',[host.mac,host.transportIP]) if 'uuid' in data : host.uuid = data['uuid'] define("port", default=8000, help="run on the given port", type=int) class Application(tornado.web.Application): def __init__(self): handlers = [ (r'/createContainer',CreateContainerHandler) ] settings = { 'template_path': 'templates', 'debug': True, 'cookie_secret' : "dfaew989q2kejczo8u8923e400ialsjdf", 'static_path': 'static' } self.host = host self.controller = controller self.containerProxy = DockerProxy(docker.Client('unix://var/run/docker.sock'),self.host,self.controller) tornado.web.Application.__init__(self, handlers, **settings) if __name__ == "__main__": tornado.options.parse_command_line() app = Application() http_server = tornado.httpserver.HTTPServer(app) http_server.listen(options.port) tornado.ioloop.IOLoop.instance().start()
Python
0.000001
@@ -817,16 +817,54 @@ uuid'%5D%0A%0A +print 'my host id is : %25s'%25host.uuid%0A%0A %0Adefine(
0fbfef27d35cea23ad0e20fd2c9df3e8a4a046cb
Fix GCF region tags (#1827)
functions/log/main.py
functions/log/main.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START functions_log_bigquery_stackdriver] import base64 import json # [END functions_log_bigquery_stackdriver] # [START functions_log_helloworld] import logging # [END functions_log_helloworld] # [START functions_log_retrieve] import os # [END functions_log_retrieve] # [START functions_logs_retrieve] import google.cloud.logging as cloud_logging # [END functions_logs_retrieve] # [START functions_log_helloworld] def hello_world(data, context): """Background Cloud Function. Args: data (dict): The dictionary with data specific to the given event. context (google.cloud.functions.Context): The event metadata. """ print('Hello, stdout!') logging.warn('Hello, logging handler!') # [END functions_log_helloworld] # [START functions_log_retrieve] cloud_client = cloud_logging.Client() log_name = 'cloudfunctions.googleapis.com%2Fcloud-functions' cloud_logger = cloud_client.logger(log_name.format(os.getenv('GCP_PROJECT'))) def get_log_entries(request): """ HTTP Cloud Function that displays log entries from Cloud Functions. Args: request (flask.Request): The request object. Returns: The response text, or any set of values that can be turned into a Response object using `make_response` <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>. """ """""" all_entries = cloud_logger.list_entries(page_size=10) entries = next(all_entries.pages) for entry in entries: timestamp = entry.timestamp.isoformat() print('* {}: {}'.format (timestamp, entry.payload)) return 'Done!' # [END functions_log_retrieve] # [START functions_log_stackdriver] def process_log_entry(data, context): data_buffer = base64.b64decode(data['data']) log_entry = json.loads(data_buffer)['protoPayload'] print(f"Method: {log_entry['methodName']}") print(f"Resource: {log_entry['resourceName']}") print(f"Initiator: {log_entry['authenticationInfo']['principalEmail']}") # [END functions_log_stackdriver]
Python
0
@@ -587,33 +587,24 @@ nctions_log_ -bigquery_ stackdriver%5D @@ -656,17 +656,8 @@ log_ -bigquery_ stac @@ -840,33 +840,32 @@ RT functions_log -s _retrieve%5D%0Aimpor @@ -924,17 +924,16 @@ ions_log -s _retriev
8d56c42dd3a721a477fa1333c1d979f4002e7cc1
Simplify importer paths after dockerization
labonneboite/importer/conf/lbbdev.py
labonneboite/importer/conf/lbbdev.py
import os # --- importer input directory of DPAE and ETABLISSEMENT exports INPUT_SOURCE_FOLDER = '/srv/lbb/data' # --- job 1/8 & 2/8 : check_etablissements & extract_etablissements JENKINS_ETAB_PROPERTIES_FILENAME = os.path.join(os.environ["WORKSPACE"], "properties.jenkins") MINIMUM_OFFICES_TO_BE_EXTRACTED_PER_DEPARTEMENT = 10000 # --- job 3/8 & 4/8 : check_dpae & extract_dpae JENKINS_DPAE_PROPERTIES_FILENAME = os.path.join(os.environ["WORKSPACE"], "properties_dpae.jenkins") MAXIMUM_ZIPCODE_ERRORS = 100 MAXIMUM_INVALID_ROWS = 100 # --- job 5/8 : compute_scores SCORE_COEFFICIENT_OF_VARIATION_MAX = 2.0 RMSE_MAX = 1500 # On 2017.03.15 departement 52 reached RMSE=1141 HIGH_SCORE_COMPANIES_DIFF_MAX = 70 # --- job 6/8 : validate_scores MINIMUM_OFFICES_PER_DEPARTEMENT_FOR_DPAE = 500 MINIMUM_OFFICES_PER_DEPARTEMENT_FOR_ALTERNANCE = 0 # --- job 8/8 : populate_flags BACKUP_OUTPUT_FOLDER = '/srv/lbb/backups/outputs' BACKUP_FOLDER = '/srv/lbb/backups'
Python
0.000009
@@ -873,89 +873,4 @@ ags%0A -BACKUP_OUTPUT_FOLDER = '/srv/lbb/backups/outputs'%0ABACKUP_FOLDER = '/srv/lbb/backups'%0A
b2b573c706ec8c7a29b82edbae6f19917bf4b92f
Make generated corpus easier to learn
generateCPTCorpus.py
generateCPTCorpus.py
"""Script that generates a (synthetic) corpus to test the CPT model. The corpus consists of 5 documents containing fixed topics and opinions. The generation process is described in the CPT paper. A text document contains the topic words on the first line and the opion words on the second line. Usage: python generateCPTCorpus.py <out dir> """ import argparse import numpy as np from collections import Counter import codecs import os def generate_opinion_words(topic_counter, num_topics, phi, vocabulary): words = [] # select opinion (index) based on topic occurrence om = np.array([float(topic_counter[i]) for i in range(num_topics)]) om /= sum(om) for i in range(length_opinion): # opinion words topic = np.random.multinomial(1, om).argmax() word = np.random.multinomial(1, phi[topic]).argmax() words.append(vocabulary[word]) return words parser = argparse.ArgumentParser() #parser.add_argument('num_doc', help='the number of documents to be generated') #parser.add_argument('num_topic_words', help='the number of topic words per ' # 'document') #parser.add_argument('num_opinion_words', help='the number of opinion words ' # 'per document') parser.add_argument('out_dir', help='the directory where the generated ' 'documents should be saved.') args = parser.parse_args() if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) topic_vocabulary = np.array(['zon', 'ijs', 'strand', 'vanille', 'chocola', 'broccoli', 'wortel']) opinion_vocabulary = np.array(['warm', 'zwemmen', 'zonnig', 'bewolkt', 'vies', 'lekker', 'koud']) real_theta_topic = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.7, 0.3, 0.0], [0.0, 0.5, 0.5]]) real_phi_topic1 = np.array([[0.4, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.3, 0.0, 0.35, 0.35, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5]]) real_phi_topic2 = np.array([[0.4, 0.0, 0.6, 0.0, 0.0, 0.0, 0.0], [0.0, 0.3, 0.0, 0.35, 0.35, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5]]) real_phi_opinion1 = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]) real_phi_opinion2 = np.array([[0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0]]) num_topics = real_theta_topic.shape[1] phi_opinion_perspectives = [real_phi_opinion1, real_phi_opinion2] phi_topic_perspectives = [real_phi_topic1, real_phi_topic2] num_perspectives = len(phi_opinion_perspectives) length_topic = 50 length_opinion = 20 for p in range(num_perspectives): p_dir = os.path.join(args.out_dir, 'p{}'.format(p)) print p_dir if not os.path.exists(p_dir): os.makedirs(p_dir) for m, tm in enumerate(real_theta_topic): out_file = os.path.join(p_dir, 'document{}.txt'.format(m+1)) print out_file with codecs.open(out_file, 'wb', 'utf8') as f: topic_words = [] topic_counter = Counter() for i in range(length_topic): # topic words topic = np.random.multinomial(1, tm).argmax() topic_counter[topic] += 1 word = np.random.multinomial(1, phi_topic_perspectives[p][topic]).argmax() topic_words.append(topic_vocabulary[word]) #print topic_counter f.write('{}\n'.format(' '.join(topic_words))) opinion_words = generate_opinion_words(topic_counter, num_topics, phi_opinion_perspectives[p], opinion_vocabulary) f.write(' '.join(opinion_words))
Python
0
@@ -2279,30 +2279,30 @@ ay(%5B%5B0.4, 0. -6 +2 , 0. -0 +4 , 0.0, 0.0, @@ -2485,14 +2485,14 @@ , 0. -0 +2 , 0. -6 +4 , 0.
6fb2c9b56dfecafa5da5d3326b06654d9cdcf2e5
change self.Rs to self.Rbs to avoid problems with inherited KeplerLike
EXOSIMS/PlanetPopulation/AlbedoByRadius.py
EXOSIMS/PlanetPopulation/AlbedoByRadius.py
from EXOSIMS.PlanetPopulation.SAG13 import SAG13 import astropy.units as u import astropy.constants as const import numpy as np import scipy.integrate as integrate class AlbedoByRadius(SAG13): """Planet Population module based on SAG13 occurrence rates. NOTE: This assigns constant albedo based on radius ranges. Attributes: SAG13coeffs (float 4x2 ndarray): Coefficients used by the SAG13 broken power law. The 4 lines correspond to Gamma, alpha, beta, and the minimum radius. Gamma (float ndarray): Gamma coefficients used by SAG13 broken power law. alpha (float ndarray): Alpha coefficients used by SAG13 broken power law. beta (float ndarray): Beta coefficients used by SAG13 broken power law. Rplim (float ndarray): Minimum radius used by SAG13 broken power law. SAG13starMass (astropy Quantity): Assumed stellar mass corresponding to the given set of coefficients. mu (astropy Quantity): Gravitational parameter associated with SAG13starMass. Ca (float 2x1 ndarray): Constants used for sampling. ps (float nx1 ndarray): Constant geometric albedo values. Rb (float (n-1)x1 ndarray): Planetary radius break points for albedos in earthRad. Rs (float (n+1)x1 ndarray): Planetary radius break points with 0 padded on left and np.inf padded on right """ def __init__(self, SAG13coeffs=[[.38, -.19, .26, 0.],[.73, -1.18, .59, 3.4]], SAG13starMass=1., Rprange=[2/3., 17.0859375], arange=[0.09084645, 1.45354324], ps=[0.2,0.5], Rb=[1.4], **specs): SAG13.__init__(self, SAG13coeffs=SAG13coeffs, SAG13starMass=SAG13starMass, Rprange=Rprange, arange=arange, **specs) # cast inputs to arrays self.ps = np.array(ps, ndmin=1, copy=False) self.Rb = np.array(Rb, ndmin=1, copy=False) # check to ensure proper inputs assert len(self.ps) - len(self.Rb) == 1, \ 'input albedos must have one more element than break radii' self.Rs = np.hstack((0.0,self.Rb,np.inf)) # populate _outspec with new specific attributes self._outspec['ps'] = self.ps self._outspec['Rb'] = self.Rb def gen_plan_params(self, n): """Generate semi-major axis (AU), eccentricity, geometric albedo, and planetary radius (earthRad) Semi-major axis and planetary radius are jointly distributed. Eccentricity is a Rayleigh distribution. Albedo is a constant value based on planetary radius. Args: n (integer): Number of samples to generate Returns: a (astropy Quantity array): Semi-major axis in units of AU e (float ndarray): Eccentricity p (float ndarray): Geometric albedo Rp (astropy Quantity array): Planetary radius in units of earthRad """ n = self.gen_input_check(n) # generate semi-major axis and planetary radius samples Rp, a = self.gen_radius_sma(n) # check for constrainOrbits == True for eccentricity samples # constants C1 = np.exp(-self.erange[0]**2/(2.*self.esigma**2)) ar = self.arange.to('AU').value if self.constrainOrbits: # restrict semi-major axis limits arcon = np.array([ar[0]/(1.-self.erange[0]), ar[1]/(1.+self.erange[0])]) # clip sma values to sma range sma = np.clip(a.to('AU').value, arcon[0], arcon[1]) # upper limit for eccentricity given sma elim = np.zeros(len(sma)) amean = np.mean(arcon) elim[sma <= amean] = 1. - ar[0]/sma[sma <= amean] elim[sma > amean] = ar[1]/sma[sma>amean] - 1. elim[elim > self.erange[1]] = self.erange[1] elim[elim < self.erange[0]] = self.erange[0] # additional constant C2 = C1 - np.exp(-elim**2/(2.*self.esigma**2)) a = sma*u.AU else: C2 = self.enorm e = self.esigma*np.sqrt(-2.*np.log(C1 - C2*np.random.uniform(size=n))) # generate albedo from planetary radius p = self.get_p_from_Rp(Rp) return a, e, p, Rp def get_p_from_Rp(self, Rp): """Generate albedos from radius ranges Args: Rp (astropy Quantity array): Planetary radius with units of earthRad Returns: p (float ndarray): Albedo values """ Rp = np.array(Rp.to('earthRad').value, ndmin=1, copy=False) p = np.zeros(Rp.shape) for i in xrange(len(self.Rs)-1): mask = np.where((Rp>=self.Rs[i])&(Rp<self.Rs[i+1])) p[mask] = self.ps[i] return p
Python
0
@@ -1380,16 +1380,17 @@ R +b s (float @@ -2227,16 +2227,17 @@ self.R +b s = np.h @@ -4973,16 +4973,17 @@ n(self.R +b s)-1):%0A @@ -5020,16 +5020,17 @@ %3E=self.R +b s%5Bi%5D)&(R @@ -5037,16 +5037,17 @@ p%3Cself.R +b s%5Bi+1%5D))
f1c270f2145cf1f48a0207696cb4f6e9592af357
Correct db `NAME`, use in-memory database for testing
NTHU_Course/settings/testing_sqlite.py
NTHU_Course/settings/testing_sqlite.py
''' A configuration for testing in travis CI with sqlite3 ''' from .default import * # noqa DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # https://docs.djangoproject.com/en/1.10/topics/testing/overview/#the-test-database # django uses in memory database for testing 'NAME': 'test_sqlite' } }
Python
0.000001
@@ -333,19 +333,16 @@ ': ' -test_sqlite +:memory: '%0A
34f3d762f608c3ea3fd12cded38dc3853d36545f
Update vouchers.py
2014-2015/B4-vouchers/vouchers.py
2014-2015/B4-vouchers/vouchers.py
vouchers = [1, 2, 3, 5, 8, 9, 18, 19, 46, 154, 313] def main(): for b in range(1, max(vouchers)): # Amount can be created with one voucher. if b in vouchers: continue bins = [] ws = sorted(vouchers, reverse=True) for w in ws: for x in bins: if sum(x) + w <= b: x.append(w) w = None break if w != None: bins.append([w]) r = list(map(sum, bins)) if b not in list(map(sum, bins)): print(b) return main()
Python
0
@@ -1,16 +1,635 @@ +#!/usr/bin/env python3%0A# -*- coding: utf-8 -*- %0A%0A# Copyright 2014 Fabian M.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# %09http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A vouchers = %5B1, 2 @@ -1064,15 +1064,11 @@ urn%0A + main()%0A -%09%09%09%0A
4ef31ecf71a8e1107329a49ed205c8b2cdffef65
Update utils.py
httpie/utils.py
httpie/utils.py
import json import mimetypes import re import sys import time from collections import OrderedDict from http.cookiejar import parse_ns_headers from pprint import pformat from typing import Any, List, Optional, Tuple import requests.auth RE_COOKIE_SPLIT = re.compile(r', (?=[^ ;]+=)') Item = Tuple[str, Any] Items = List[Item] class JsonDictPreservingDuplicateKeys(OrderedDict): """A specialized JSON dict preserving duplicate keys. """ # Python versions prior to 3.8 suffer from an issue with multiple keys with the same name. # `json.dumps(obj, indent=N, sort_keys=True)` will output sorted keys when they are unique, and # duplicate keys will be outputted as they were defined in the original data. # See <https://bugs.python.org/issue23493#msg400929> for the behavior change between Python versions. SUPPORTS_SORTING = sys.version_info >= (3, 8) def __init__(self, items: Items): self._items = items self._ensure_items_used() def _ensure_items_used(self) -> None: """HACK: Force `json.dumps()` to use `self.items()` instead of an empty dict. Two JSON encoders are available on CPython: pure-Python (1) and C (2) implementations. (1) The pure-python implementation will do a simple `if not dict: return '{}'`, and we could fake that check by implementing the `__bool__()` method. Source: - <https://github.com/python/cpython/blob/9d318ad/Lib/json/encoder.py#L334-L336> (2) On the other hand, the C implementation will do a check on the number of items contained inside the dict, using a verification on `dict->ma_used`, which is updated only when an item is added/removed from the dict. For that case, there is no workaround but to add an item into the dict. Sources: - <https://github.com/python/cpython/blob/9d318ad/Modules/_json.c#L1581-L1582> - <https://github.com/python/cpython/blob/9d318ad/Include/cpython/dictobject.h#L53> - <https://github.com/python/cpython/blob/9d318ad/Include/cpython/dictobject.h#L17-L18> To please both implementations, we simply add one item to the dict. """ if self._items: self['__hack__'] = '__hack__' def items(self) -> Items: """Return all items, duplicate ones included. """ return self._items def load_json_preserve_order_and_dupe_keys(s): return json.loads(s, object_pairs_hook=JsonDictPreservingDuplicateKeys) def repr_dict(d: dict) -> str: return pformat(d) def humanize_bytes(n, precision=2): # Author: Doug Latornell # Licence: MIT # URL: https://code.activestate.com/recipes/577081/ """Return a humanized string representation of a number of bytes. >>> humanize_bytes(1) '1 B' >>> humanize_bytes(1024, precision=1) '1.0 kB' >>> humanize_bytes(1024 * 123, precision=1) '123.0 kB' >>> humanize_bytes(1024 * 12342, precision=1) '12.1 MB' >>> humanize_bytes(1024 * 12342, precision=2) '12.05 MB' >>> humanize_bytes(1024 * 1234, precision=2) '1.21 MB' >>> humanize_bytes(1024 * 1234 * 1111, precision=2) '1.31 GB' >>> humanize_bytes(1024 * 1234 * 1111, precision=1) '1.3 GB' """ abbrevs = [ (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'B') ] if n == 1: return '1 B' for factor, suffix in abbrevs: if n >= factor: break # noinspection PyUnboundLocalVariable return f'{n / factor:.{precision}f} {suffix}' class ExplicitNullAuth(requests.auth.AuthBase): """Forces requests to ignore the ``.netrc``. <https://github.com/psf/requests/issues/2773#issuecomment-174312831> """ def __call__(self, r): return r def get_content_type(filename): """ Return the content type for ``filename`` in format appropriate for Content-Type headers, or ``None`` if the file type is unknown to ``mimetypes``. """ return mimetypes.guess_type(filename, strict=False)[0] def split_cookies(cookies): """ When ``requests`` stores cookies in ``response.headers['Set-Cookie']`` it concatenates all of them through ``, ``. This function splits cookies apart being careful to not to split on ``, `` which may be part of cookie value. """ if not cookies: return [] return RE_COOKIE_SPLIT.split(cookies) def get_expired_cookies( cookies: str, now: float = None ) -> List[dict]: now = now or time.time() def is_expired(expires: Optional[float]) -> bool: return expires is not None and expires <= now attr_sets: List[Tuple[str, str]] = parse_ns_headers( split_cookies(cookies) ) cookies = [ # The first attr name is the cookie name. dict(attrs[1:], name=attrs[0][0]) for attrs in attr_sets ] _max_age_to_expires(cookies=cookies, now=now) return [ { 'name': cookie['name'], 'path': cookie.get('path', '/') } for cookie in cookies if is_expired(expires=cookie.get('expires')) ] def _max_age_to_expires(cookies, now): """ Translate `max-age` into `expires` for Requests to take it into account. HACK/FIXME: <https://github.com/psf/requests/issues/5743> """ for cookie in cookies: if 'expires' in cookie: continue max_age = cookie.get('max-age') if max_age and max_age.isdigit(): cookie['expires'] = now + float(max_age)
Python
0.000001
@@ -431,22 +431,16 @@ te keys. -%0A%0A %22%22%22%0A%0A
caa36d6a0e1a0b3d2c64d42b1494f3e4eeb30722
Update docstrings
husc/metrics.py
husc/metrics.py
from itertools import combinations import numpy as np from scipy.spatial.distance import pdist def sq_to_dist(i, j, n): """Convert coordinate of square distance matrix to condensed matrix index. The condensed version of a squareform, pairwise distance matrix is a linearisation of the upper triangular, non-diagonal coordinates of the squareform distance matrix. This function returns the [i, j]-th coordinate of the condensed array. eg. given a squareform matrix, array([[ 0. , 10. , 22.36067977], [ 10. , 0. , 14.14213562], [ 22.36067977, 14.14213562, 0. ]]) The condensed version of this matrix is: array([ 10. , 22.36067977, 14.14213562]) Parameters ---------- i : int i-th coordinate. j : int j-th coordinate. n : int Dimension n of n*n distance matrix. Returns ------- index : int Position of pairwise distance [i, j] in condensed distance matrix. Examples -------- >>> sq_to_dist(1, 2, 3) 2 """ index = i*n + j - i*(i+1)/2 - i - 1 return index def mongo_group_by(collection, group_by): """Group MongoDB collection according to specified field. Sends aggregate query to MongoDB collection to group all documents by a given field and returns dictionary mapping the field to the corresponding (plate, well) co-ordinate(s). Parameters ---------- collection : pymongo collection Pymongo object directing to collection. group_by : string Field to group collection by. Returns ------- query_dict : dict Query dictionary. """ mongo_query = collection.aggregate([{ '$group' : { # groups all documents according to specified field '_id': '$' + group_by, 'coords': { '$addToSet': { # add cell_plate_barcode and well for each document # belonging to the group 'cell_plate_barcode': '$cell_plate_barcode', 'well': '$well' } } } }])['result'] query_dict = {} for doc in mongo_query: query_dict[doc['_id']] = [] for coord in doc['coords']: try: new_coord = (coord['cell_plate_barcode'], str(coord['well'])) query_dict[doc['_id']].append(new_coord) except KeyError: pass return query_dict def gene_distance_score(X, collection, metric='euclidean'): """Find intra/extra gene distance scores between samples. Parameters ---------- X : Data frame, shape (n_samples, n_features) Feature data frame. metric : string, optional Which distance measure to use when calculating distances. Must be one of the options allowable in scipy.spatial.distance.pdist. Default is euclidean distance. Returns ------- all_intragene_data : array An 1D array with intra-gene distances (i.e. distances between samples with the same gene knocked down). all_intergene_data : array An 1D array with inter-gene distances (i.e. distances between samples with different gene knocked down). """ gene_dict = mongo_group_by(collection, 'gene_name') all_intragene_index = [] for key in gene_dict: if len(gene_dict[key]) > 1: indices = map(X.index.get_loc, gene_dict[key]) for i, j in combinations(indices, 2): all_intragene_index.append(sq_to_dist(i, j, X.shape[0])) all_intragene_index.sort() n = sq_to_dist(X.shape[0], X.shape[0], X.shape[0]) all_intergene_index = np.setdiff1d(np.arange(n), all_intragene_index, assume_unique=True) distance = pdist(X, metric) all_intragene_data = distance[all_intragene_index] all_intergene_data = distance[all_intergene_index] return all_intragene_data, all_intergene_data
Python
0.000001
@@ -1687,33 +1687,145 @@ dict -%0A Query dictionary + %7B string : list of tuple %7D%0A Query dictionary mapping the specified group_by field to a list of%0A (plate, well) co-ordinates .%0A @@ -2802,13 +2802,13 @@ tra/ -extra +inter gen
241376f76d4175c98c8226998832b8054697504c
Fix xpath expression for tomorrows forecast
app/jobs/yr.py
app/jobs/yr.py
#!/usr/bin/env python import requests from jobs import AbstractJob from lxml import etree class Yr(AbstractJob): def __init__(self, conf): self.url = conf['url'] self.interval = conf['interval'] def _parse_tree(self, tree, tabular_xpath=None): if tabular_xpath is None: tabular = tree.xpath('/weatherdata/forecast/tabular/time[1]').pop() data_root = tree.xpath( '/weatherdata/observations/weatherstation[1]').pop() else: tabular = tree.xpath(tabular_xpath).pop() data_root = tabular windSpeed = data_root.xpath('windSpeed').pop() return { 'location': tree.xpath('/weatherdata/location/name').pop().text, 'temperature': data_root.xpath('temperature').pop().get('value'), 'description': tabular.xpath('symbol').pop().get('name'), 'wind': { 'speed': windSpeed.get('mps'), 'description': windSpeed.get('name'), 'direction': data_root.xpath('windDirection').pop().get('name') } } def _parse(self, xml): tree = etree.fromstring(xml) data = self._parse_tree(tree) data.update({'tomorrow': self._parse_tree( tree, '/weatherdata/forecast/tabular/time[3]')}) return data def get(self): r = requests.get(self.url) if r.status_code == 200 and len(r.content) > 0: return self._parse(r.content) return {}
Python
0.000131
@@ -32,16 +32,57 @@ equests%0A +from datetime import datetime, timedelta%0A from job @@ -1266,108 +1266,266 @@ -data.update(%7B'tomorrow': self._parse_tree(%0A tree, '/weatherdata/forecast/tabular/time%5B3%5D' +tomorrow = datetime.now().date() + timedelta(days=1)%0A xpath = ('/weatherdata/forecast/tabular/time%5B@period=2 and'%0A ' starts-with(@from, %22%25s%22)%5D') %25 (tomorrow.strftime('%25F'),)%0A data.update(%7B'tomorrow': self._parse_tree(tree, xpath )%7D)%0A
b6b522bbdde24f0e6eab0e7a91830171bcac63a5
Add logout url
genevieve/urls.py
genevieve/urls.py
from django.conf import settings from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.contrib import admin from django.contrib.auth import views as auth_views from .views import UserCreateView admin.autodiscover() urlpatterns = patterns('', url(r'^admin/', include(admin.site.urls)), ) urlpatterns = patterns('', # Examples: # url(r'^$', 'genevieve.views.home', name='home'), # url(r'^blog/', include('blog.urls')), (r'^file_process/', include('file_process.urls')), url(r'^accounts/signup/$', UserCreateView.as_view()), url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html', 'extra_context': {'next': '/file_process'}}, name='auth_login'), ) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
Python
0.000003
@@ -759,16 +759,127 @@ login'), +%0A url(r'%5Eaccounts/logout/$', auth_views.logout, %7B'next_page': '/file_process'%7D,%0A name='auth_logout'), %0A%0A) + st
babc90158facf0e215aa18a2c0f262c2eb6e78a2
Fix issue with connecting to postgres on development
geodj/settings.py
geodj/settings.py
import os # Override these on production env os.environ.setdefault("APP_ENV", "development") os.environ.setdefault("SECRET_KEY", "^uhrm48x9y=1f&+$bg=oc(#23mp0*g5k%8+si9tdz7&4_xk&lf") BASE_DIR = os.path.dirname(os.path.abspath(__file__)) DEBUG = os.environ['APP_ENV'] != 'production' TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = {} if os.environ["APP_ENV"] == "production": import dj_database_url DATABASES['default'] = dj_database_url.config() else: DATABASES['default'] = { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'geodj_development', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['*'] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = 'staticfiles' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = os.environ["SECRET_KEY"] # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'geodj.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'geodj.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'geodj', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
Python
0
@@ -707,24 +707,33 @@ 'HOST': ' +localhost ',%0A '
ba2161f6adf5a0e0782e322371d7071ca79d02e3
Update tornado for mypy
gidgethub/tornado.py
gidgethub/tornado.py
from typing import Mapping, Optional, Tuple from tornado import gen from tornado import httpclient from . import abc as gh_abc class GitHubAPI(gh_abc.GitHubAPI): async def _request(self, method: str, url: str, headers: Mapping, body: bytes = b'') -> Tuple[int, Mapping, bytes]: """Make an HTTP request.""" if method == "GET" and not body: real_body = None else: real_body = body request = httpclient.HTTPRequest(url, method, headers, real_body) # Since Tornado has designed AsyncHTTPClient to be a singleton, there's # no reason not to simply instantiate it every time. client = httpclient.AsyncHTTPClient() response = await client.fetch(request, raise_error=False) return response.code, response.headers, response.body async def sleep(self, seconds: float) -> None: """Sleep for the specified number of seconds.""" await gen.sleep(seconds)
Python
0
@@ -228,16 +228,26 @@ Mapping +%5Bstr, str%5D ,%0A @@ -303,16 +303,26 @@ Mapping +%5Bstr, str%5D , bytes%5D
ad56d5aa6b2359098c4ed6bf9cd37cb58613a372
Update github3.repos.tag for consistency
github3/repos/tag.py
github3/repos/tag.py
# -*- coding: utf-8 -*- """ github3.repos.tag ================= This module contains the RepoTag object for GitHub's tag API. """ from __future__ import unicode_literals from ..models import GitHubCore class RepoTag(GitHubCore): """The :class:`RepoTag <RepoTag>` object. This stores the information representing a tag that was created on a repository. See also: http://developer.github.com/v3/repos/#list-tags """ def _update_attributes(self, tag): #: Name of the tag. self.name = self._get_attribute(tag, 'name') #: URL for the GitHub generated zipball associated with the tag. self.zipball_url = self._get_attribute(tag, 'zipball_url') #: URL for the GitHub generated tarball associated with the tag. self.tarball_url = self._get_attribute(tag, 'tarball_url') #: Dictionary containing the SHA and URL of the commit. self.commit = self._get_attribute(tag, 'commit', {}) def _repr(self): return '<Repository Tag [{0}]>'.format(self) def __str__(self): return self.name
Python
0
@@ -24,46 +24,8 @@ %0A%22%22%22 -%0Agithub3.repos.tag%0A=================%0A%0A This @@ -81,18 +81,16 @@ tag API. -%0A%0A %22%22%22%0Afrom @@ -136,23 +136,16 @@ om . -.models import GitH @@ -140,26 +140,44 @@ import -GitHubCore +commit%0Afrom .. import models %0A%0A%0Aclass @@ -185,16 +185,23 @@ RepoTag( +models. GitHubCo @@ -216,130 +216,306 @@ %22%22%22 -The :class:%60RepoTag %3CRepoTag%3E%60 object. This stores the information%0A representing a tag that was created on a repository +Representation of a tag made on a GitHub repository.%0A%0A .. note::%0A%0A This is distinct from :class:%60~github3.git.Tag%60. This object%0A has information specific to a tag on a *GitHub* repository.%0A That includes URLs to the tarball and zipball files auto-generated%0A by GitHub .%0A%0A @@ -579,135 +579,397 @@ ags%0A +%0A -%22%22%22%0A def _update_attributes(self, tag):%0A #: Name of the tag.%0A self.name = self._get_attribute(tag, 'name') +This object has the following attributes:%0A%0A .. attribute:: commit%0A%0A .. versionchanged:: 1.0.0%0A%0A This attribute used to be a two item dictionary.%0A%0A A :class:%60~github3.repos.commit.MiniCommit%60 object representing the%0A commit this tag references.%0A%0A .. attribute:: name%0A%0A The name of this tag, e.g., %60%60v1.0.0%60%60.%0A%0A .. attribute:: tarball_url %0A%0A @@ -966,34 +966,35 @@ ll_url%0A%0A -#: +The URL for the Git @@ -994,95 +994,63 @@ the -GitHub generated zipball associated with the +tarball file generated by GitHub for this tag.%0A - +%0A - self.zipball_url = self._get_ +.. attr @@ -1054,23 +1054,19 @@ ttribute -(tag, ' +:: zipball_ @@ -1060,34 +1060,32 @@ te:: zipball_url -') %0A%0A #: URL @@ -1078,18 +1078,19 @@ -#: +The URL for @@ -1098,52 +1098,49 @@ the -GitHub generated tarball associated with the +zipball file generated by GitHub for this tag @@ -1149,40 +1149,28 @@ - self.tarball_url = self._get +%22%22%22%0A%0A def _update _att @@ -1179,154 +1179,206 @@ bute -(tag +s(self , -' ta -rball_url')%0A%0A #: Dictionary containing the SHA and URL of the commit.%0A self.commit = self._get_attribute(tag, 'commit', %7B%7D) +g):%0A self.commit = commit.MiniCommit(tag%5B'commit'%5D, self)%0A self.name = tag%5B'name'%5D%0A self.tarball_url = tag%5B'tarball_url'%5D%0A self.zipball_url = tag%5B'zipball_url'%5D %0A%0A
73f3c743139927a049c5e733f98e1fcb8125721a
Update release version
gdaltools/metadata.py
gdaltools/metadata.py
# -*- coding: utf-8 -*- """Project metadata Information describing the project. """ # The package name, which is also the "UNIX name" for the project. package = 'pygdaltools' project = 'pygdaltools' project_no_spaces = project.replace(' ', '') version = '0.2' description = """Python wrapper for Gdal/OGR command line tools""" authors = ['Cesar Martinez Izquierdo - SCOLAB'] authors_string = ', '.join(authors) emails = [] license = 'AGPL3' copyright = '2016 ' + authors_string url = 'https://github.com/scolab-dot-eu/pygdaltools'
Python
0
@@ -252,17 +252,17 @@ on = '0. -2 +3 '%0Adescri
ad423635860c93021d8dcf8ba33cc86ddd006132
fix error message when no OpenJPEG library can be found, closes #284
glymur/lib/config.py
glymur/lib/config.py
""" Configure glymur to use installed libraries if possible. """ # configparser is new in python3 (pylint/python-2.7) # pylint: disable=F0401 import ctypes from ctypes.util import find_library import os import platform import warnings import sys if sys.hexversion <= 0x03000000: from ConfigParser import SafeConfigParser as ConfigParser from ConfigParser import NoOptionError else: from configparser import ConfigParser from configparser import NoOptionError # default library locations for MacPorts _macports_default_location = { 'openjp2': '/opt/local/lib/libopenjp2.dylib', 'openjpeg': '/opt/local/lib/libopenjpeg.dylib' } # default library locations on Windows _windows_default_location = { 'openjp2': os.path.join('C:\\', 'Program files', 'OpenJPEG 2.0', 'bin', 'openjp2.dll'), 'openjpeg': os.path.join('C:\\', 'Program files', 'OpenJPEG 1.5', 'bin', 'openjpeg.dll') } def glymurrc_fname(): """Return the path to the configuration file. Search order: 1) current working directory 2) environ var XDG_CONFIG_HOME 3) $HOME/.config/glymur/glymurrc """ # Current directory. fname = os.path.join(os.getcwd(), 'glymurrc') if os.path.exists(fname): return fname confdir = get_configdir() if confdir is not None: fname = os.path.join(confdir, 'glymurrc') if os.path.exists(fname): return fname # didn't find a configuration file. return None def load_openjpeg_library(libname): path = read_config_file(libname) if path is not None: return load_library_handle(path) # No location specified by the configuration file, must look for it # elsewhere. path = find_library(libname) if path is None: # Could not find a library via ctypes if platform.system() == 'Darwin': # MacPorts path = _macports_default_location[libname] elif os.name == 'nt': path = _windows_default_location[libname] if path is not None and not os.path.exists(path): # the mac/win default location does not exist. return None return load_library_handle(path) def load_library_handle(path): """Load the library, return the ctypes handle.""" if path is None or path in ['None', 'none']: # Either could not find a library via ctypes or user-configuration-file, # or we could not find it in any of the default locations. # This is probably a very old linux. return None try: if os.name == "nt": opj_lib = ctypes.windll.LoadLibrary(path) else: opj_lib = ctypes.CDLL(path) except (TypeError, OSError): msg = '"Library {0}" could not be loaded. Operating in degraded mode.' msg = msg.format(path) warnings.warn(msg, UserWarning) opj_lib = None return opj_lib def read_config_file(libname): """ Extract library locations from a configuration file. Parameters ---------- libname : str One of either 'openjp2' or 'openjpeg' Returns ------- path : None or str None if no location is specified, otherwise a path to the library """ filename = glymurrc_fname() if filename is None: # There's no library file path to return in this case. return None # Read the configuration file for the library location. parser = ConfigParser() parser.read(filename) try: path = parser.get('library', libname) except NoOptionError: path = None return path def glymur_config(): """ Try to ascertain locations of openjp2, openjpeg libraries. Returns ------- tpl : tuple tuple of library handles """ lst = [] for libname in ['openjp2', 'openjpeg']: lst.append(load_openjpeg_library(libname)) if all(handle is None for handle in lst): msg = "Neither the openjp2 nor the openjpeg library could be loaded. " raise IOError(msg) return tuple(lst) def get_configdir(): """Return string representing the configuration directory. Default is $HOME/.config/glymur. You can override this with the XDG_CONFIG_HOME environment variable. """ if 'XDG_CONFIG_HOME' in os.environ: return os.path.join(os.environ['XDG_CONFIG_HOME'], 'glymur') if 'HOME' in os.environ and os.name != 'nt': # HOME is set by WinPython to something unusual, so we don't # necessarily want that. return os.path.join(os.environ['HOME'], '.config', 'glymur') # Last stand. Should handle windows... others? return os.path.join(os.path.expanduser('~'), 'glymur')
Python
0
@@ -2819,71 +2819,72 @@ = ' -%22L +The l ibrary -%7B0%7D%22 could not be loaded. Operating in degraded mode. +specified by configuration file at %7B0%7D could not be '%0A @@ -2896,26 +2896,20 @@ msg ++ = -msg.format(path) +'loaded.' %0A @@ -2929,16 +2929,29 @@ warn(msg +.format(path) , UserWa
2c0dbf04f04b686cac79e40791c3ed298618dd07
add more info on process
gen-gallery-images.py
gen-gallery-images.py
'''Generates small and medium gallery images from a set of large originals. Some maps will be tall, some wide, some square so we want to simply maintain the ratio and resize down to a set maximum. Original images need to be downloaded to the large path below. They are accessible in this Dropbox folder https://www.dropbox.com/sh/z9fjbfquwd1t5st/AAAxV22dpleDodnxJqiertt0a?dl=0 ''' from wand.image import Image from os import listdir from os.path import isfile, join small_size = 400 med_size = 1280 img_path = "/Users/twelch/src/foss4g2014-wordpress/uploads/mapgallery/" large_path = img_path+"large/" med_path = img_path+"medium/" small_path = img_path+"small/" #Get list of files in directory img_files = [ f for f in listdir(large_path) if isfile(join(large_path,f)) ] print "Looking for images in: " + img_path print "Found the following: " + str(img_files) for f in img_files: if f.startswith("."): print "Not an image file: " + str(f) else: print "Processing image: "+f #Create image objects for small and medium using original large mi = Image(filename=join(large_path,f)) si = mi.clone() print 'Original: '+str(mi.width)+'x'+str(mi.height) #Resize maintaining aspect ratio mi.transform(resize=str(med_size)+'x'+str(med_size)+'>') print 'Medium: '+str(mi.width)+'x'+str(mi.height) si.transform(resize=str(small_size)+'x'+str(small_size)+'>') print 'Small: '+str(si.width)+'x'+str(si.height) #Convert to JPEG if necessary and save as new file mf = join(med_path,f) if mi.format != 'JPEG': mi.convert('jpeg') mi.save(filename=mf) sf = join(small_path,f) if si.format != 'JPEG': si.convert('jpeg') si.save(filename=sf)
Python
0
@@ -243,16 +243,22 @@ e large +image path bel @@ -378,16 +378,250 @@ t0a?dl=0 +%0A%0AOnce large, medium, and small images are in place the folders are copied into the foss4g Wordpress theme, in the uploads/mapgallery folder where the images are served from to the map gallery page. See the map gallery page template. %0A'''%0A%0Afr
1ea1773fd8f1e43bb8d0fa9c24efc158376a077b
Fix spelling for Volume
packages/mesos/extra/make_disk_resources.py
packages/mesos/extra/make_disk_resources.py
#!/opt/mesosphere/bin/python3 import json import os import re import shutil import subprocess import sys from datetime import datetime from itertools import chain from math import floor from string import Template PROG = os.path.basename(__file__) JSON_COMMON_TEMPLATE = Template(''' { "name": "disk", "role": "$role", "scalar": { "value": $free_space }, "type": "SCALAR" } ''') JSON_DISK_TEMPLATE = Template(''' { "disk": { "source": { "type": "MOUNT", "mount": { "root": "$mp" } } } } ''') MOUNT_PATTERN = re.compile('on\s+(/dcos/volume\d+)\s+', re.M | re.I) # Conversion factor for Bytes -> MB calculation MB = float(1 << 20) TOLERANCE_MB = 100 RESOURCES_TEMPLATE_HEADER = '''# Generated by {prog} on {dt} # ''' RESOURCES_TEMPLATE = ''' MESOS_RESOURCES='{res}' ''' class VolumeDiscoveryException(Exception): pass def find_mounts_matching(pattern): ''' find all matching mounts from the output of the mount command ''' print('Looking for mounts matching pattern "{}"'.format(pattern.pattern)) mounts = subprocess.check_output(['mount'], universal_newlines=True) return pattern.findall(mounts) def make_disk_resources_json(mounts, role): ''' Disk resources are defined in https://mesos.apache.org/documentation/latest/multiple-disk/ Substitute discovered mounts into JSON_TEMPLATE, returning a list of Mounts @type mounts: tuple(mount_point, free_space_in_mb) @rtype: list ''' for (mp, fs) in mounts: common = JSON_COMMON_TEMPLATE.substitute(free_space=fs, role=role) disk = JSON_DISK_TEMPLATE.substitute(mp=mp) yield json.loads(common), json.loads(disk) def get_disk_free(path): ''' @type path: str @rtype tuple ''' return (path, floor(float(shutil.disk_usage(path).free) / MB)) def get_mounts_and_freespace(matching_mounts): for mount, free_space in map(get_disk_free, matching_mounts): net_free_space = free_space - TOLERANCE_MB if net_free_space <= 0: # Per @cmaloney and @lingmann, we should hard exit here if volume # doesn't have sufficient space. raise VolumeDiscoveryException( '{} has {} MB net free space, expected > 100M'.format(mount, net_free_space) ) yield (mount, net_free_space) def _handle_root_volume(root_volume, role): os.makedirs(root_volume, exist_ok=True) for common, _ in make_disk_resources_json(get_mounts_and_freespace([root_volume]), role): yield common, {} def stitch(parts): common, disk = parts common.update(disk) return common def main(output_env_file): ''' Find mounts and freespace matching MOUNT_PATTERN, create RESOURCES for the disks, and merge the list of disk resources with optionally existing MESOS_RESOURCES environment varianble. @type output_env_file: str, filename to write resources ''' if os.path.exists(output_env_file): print('Voume discovery assumed to be completed because {} exists'.format(output_env_file)) return mounts_dfree = list(get_mounts_and_freespace(find_mounts_matching(MOUNT_PATTERN))) print('Found matching mounts : {}'.format(mounts_dfree)) role = os.getenv('MESOS_DEFAULT_ROLE', '*') disk_resources = list( map( stitch, chain( make_disk_resources_json(mounts_dfree, role), _handle_root_volume(os.environ['MESOS_WORK_DIR'], role) ) ) ) print('Generated disk resources map: {}'.format(disk_resources)) # write contents to a temporary file tmp_file = '{}.tmp'.format(output_env_file) with open(tmp_file, 'w') as env_file: env_file.write(RESOURCES_TEMPLATE_HEADER.format(prog=PROG, dt=datetime.now())) if disk_resources: msg = 'Creating updated environment artifact file : {}' env_resources = os.environ.get('MESOS_RESOURCES', '[]') try: resources = json.loads(env_resources) except ValueError as e: print('ERROR: Invalid MESOS_RESOURCES JSON {} --- {}'.format(e, env_resources), file=sys.stderr) sys.exit(1) resources.extend(disk_resources) env_file.write(RESOURCES_TEMPLATE.format(res=json.dumps(resources))) else: msg = 'No additional volumes. Empty artifact file {} created' print(msg.format(output_env_file)) # Now rename tmp file to final file. This guarantees that anything reading # this file never sees a "partial" version of the file. It either doesn't # exist or it is there with full contents. os.rename(tmp_file, output_env_file) if __name__ == '__main__': try: main(sys.argv[1]) except KeyError as e: print('ERROR: Missing key {}'.format(e), file=sys.stderr) sys.exit(1) except VolumeDiscoveryException as e: print('ERROR: {}'.format(e), file=sys.stderr) sys.exit(1)
Python
0.99999
@@ -3065,16 +3065,17 @@ rint('Vo +l ume disc
ec8abd51b1ee818dc428c1d4c6efd8e94a91eeb0
Fix bug in build comparison
extruder/copy_packages.py
extruder/copy_packages.py
from __future__ import print_function from argparse import ArgumentParser import os from ruamel import yaml from binstar_client.utils import get_server_api from binstar_client.errors import NotFound from conda.version import VersionOrder __all__ = ['PackageCopier'] class PackageCopier(object): def __init__(self, source, destination, input_packages, token=''): """ Parameters ---------- source : ``str`` Name of source conda channel. destination : ``str`` Name of destination conda channel. input_package : ``dict`` Dictionary in which keys are package names and values are either a string version number (e.g. ``'1.0.1'``) or ``None``, which indicates the latest version on the source channel should be copied. This dictionary should contain the packages that potentially need to be copied. token : ``str``, optional Token for conda API. Needed for the actual copy operation. """ self.source = source self.destination = destination self.input_packages = input_packages self.api = get_server_api(token) self.to_copy = self._package_versions_to_copy() def _package_versions_to_copy(self): """ Determine which version of each package in packages should be copied from conda channel source to channel destination. Returns ------- ``dict`` Dictionary whose keys are the packages that actually need to be copied and whose values are the version to be copied. """ packages = self.input_packages copy_versions = {} for p, version in packages.items(): copy_builds = [] need_to_copy = False # This will end up True if the version exists on both src and dest # and triggers a comparison of file names. Technically, it could # be omitted, but seems more likely to be clear to future me. check_builds = False cf = self.api.package(self.source, p) cf_version = VersionOrder(cf['latest_version']) if version is not None: pinned_version = VersionOrder(version) else: pinned_version = None if pinned_version is not None: if str(pinned_version) not in cf['versions']: error_message = ('Version {} of package {} not ' 'found on source channel {}.') err = error_message.format(pinned_version, p, self.source) raise RuntimeError(err) try: ap = self.api.package(self.destination, p) except NotFound: need_to_copy = True ap_version = None else: ap_version = VersionOrder(ap['latest_version']) if pinned_version is None: if cf_version > ap_version: need_to_copy = True elif cf_version == ap_version: check_builds = True else: if str(pinned_version) not in ap['versions']: need_to_copy = True else: check_builds = True if check_builds: # If we get here it means that the same version is on both # source and destination so we need to check the individual # builds. copy_builds = \ self._check_for_missing_builds(cf, ap, cf_version) need_to_copy = len(copy_builds) > 0 if need_to_copy: copy_versions[p] = (str(cf_version), copy_builds) return copy_versions def _check_for_missing_builds(self, source, dest, version): """ For two packages that have the same version, see if there are any files on the source that are not on the destination. source and dest are both conda channels, and version should be a string. """ def files_for_version(channel, version): files = [f['basename'] for f in channel['files'] if VersionOrder(version) == VersionOrder(f['version'])] return files source_files = files_for_version(source, version) destination_files = files_for_version(dest, version) need_to_copy = [src for src in source_files if src not in destination_files] return need_to_copy def copy_packages(self): """ Actually do the copying of the packages. """ for p, v in self.to_copy.items(): version, buildnames = v if not buildnames: # Copy all of the builds for this version self.api.copy(self.source, p, v, to_owner=self.destination) else: for build in buildnames: self.api.copy(self.source, p, version, basename=build, to_owner=self.destination) def main(arguments=None): parser = ArgumentParser('Simple script for copying packages ' 'from one conda owner to another') parser.add_argument('packages_yaml', help=('Packages to copy, as a yaml dictionary. ' 'Keys are package names, values are version, ' 'or None for the latest version from ' 'the source.')) parser.add_argument('--source', default='conda-forge', help='Source conda channel owner.') parser.add_argument('--token', default='', help=('anaconda.org API token. May set ' 'environmental variable BINSTAR_TOKEN ' 'instead.')) parser.add_argument('destination_channel', help=('Destination conda channel owner.')) if arguments is None: args = parser.parse_args() else: args = parser.parse_args(arguments) source = args.source dest = args.destination_channel package_file = args.packages_yaml token = args.token with open(package_file) as f: packages = yaml.load(f) # No token on command line, try the environment... if not token: token = os.getenv('BINSTAR_TOKEN') # Still no token, so raise an error if not token: raise RuntimeError('Set an anaconda.org API token before running') pc = PackageCopier(source, dest, packages, token=token) pc.copy_packages() if __name__ == '__main__': main()
Python
0
@@ -3679,16 +3679,81 @@ builds.%0A + check_version = pinned_version or cf_version%0A @@ -3946,33 +3946,36 @@ c -f +heck _version)%0A
b8cfbbe0965ff869d4c254413420209a4cdcd82c
Update twine/commands/upload.py
twine/commands/upload.py
twine/commands/upload.py
# Copyright 2013 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os.path from twine.commands import _find_dists from twine.package import PackageFile from twine import exceptions from twine import settings from twine import utils def skip_upload(response, skip_existing, package): if not skip_existing: return False status = response.status_code reason = getattr(response, 'reason', '').lower() text = getattr(response, 'text', '').lower() # NOTE(sigmavirus24): PyPI presently returns a 400 status code with the # error message in the reason attribute. Other implementations return a # 403 or 409 status code. return any([ # Warehouse and old PyPI status == 400 and 'already exist' in reason, # Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss) status == 400 and 'updating asset' in reason, # Artifactory (https://jfrog.com/artifactory/) status == 403 and 'overwrite artifact' in text, # pypiserver (https://pypi.org/project/pypiserver) status == 409, ]) def upload(upload_settings, dists): dists = _find_dists(dists) # Determine if the user has passed in pre-signed distributions signatures = {os.path.basename(d): d for d in dists if d.endswith(".asc")} uploads = [i for i in dists if not i.endswith(".asc")] upload_settings.check_repository_url() repository_url = upload_settings.repository_config['repository'] print(f"Uploading distributions to {repository_url}") repository = upload_settings.create_repository() uploaded_packages = [] for filename in uploads: package = PackageFile.from_filename(filename, upload_settings.comment) skip_message = ( " Skipping {} because it appears to already exist".format( package.basefilename) ) # Note: The skip_existing check *needs* to be first, because otherwise # we're going to generate extra HTTP requests against a hardcoded # URL for no reason. if (upload_settings.skip_existing and repository.package_is_uploaded(package)): print(skip_message) continue signed_name = package.signed_basefilename if signed_name in signatures: package.add_gpg_signature(signatures[signed_name], signed_name) elif upload_settings.sign: package.sign(upload_settings.sign_with, upload_settings.identity) resp = repository.upload(package) # Bug 92. If we get a redirect we should abort because something seems # funky. The behaviour is not well defined and redirects being issued # by PyPI should never happen in reality. This should catch malicious # redirects as well. if resp.is_redirect: raise exceptions.RedirectDetected.from_args( repository_url, resp.headers["location"], ) if skip_upload(resp, upload_settings.skip_existing, package): print(skip_message) continue utils.check_status_code(resp, upload_settings.verbose) uploaded_packages.append(package) release_urls = repository.release_urls(uploaded_packages) if release_urls: print('\nView at:') for url in release_urls: print(url) # Bug 28. Try to silence a ResourceWarning by clearing the connection # pool. repository.close() def main(args): parser = argparse.ArgumentParser(prog="twine upload") settings.Settings.register_argparse_arguments(parser) parser.add_argument( "dists", nargs="+", metavar="dist", help="The distribution files to upload to the repository " "(package index). Usually dist/* . May additionally contain " "a .asc file to include an existing signature with the " "file upload.", ) args = parser.parse_args(args) upload_settings = settings.Settings.from_argparse(args) # Call the upload function with the arguments from the command line return upload(upload_settings, args.dists)
Python
0
@@ -1207,26 +1207,19 @@ # -Warehouse and old +PyPI / Test PyPI
1c986a578946118966b3b5e99932831e189400df
Fix euca-upload-bundle --skipmanifest
euca2ools/commands/bundle/uploadbundle.py
euca2ools/commands/bundle/uploadbundle.py
# Copyright 2009-2014 Eucalyptus Systems, Inc. # # Redistribution and use of this software in source and binary forms, # with or without modification, are permitted provided that the following # conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing import os.path from requestbuilder import Arg from requestbuilder.mixins import FileTransferProgressBarMixin from euca2ools.bundle.manifest import BundleManifest from euca2ools.commands.bundle.mixins import BundleUploadingMixin from euca2ools.commands.s3 import S3Request from euca2ools.commands.s3.putobject import PutObject class UploadBundle(S3Request, BundleUploadingMixin, FileTransferProgressBarMixin): DESCRIPTION = 'Upload a bundle prepared by euca-bundle-image to the cloud' ARGS = [Arg('-m', '--manifest', metavar='FILE', required=True, help='manifest for the bundle to upload (required)'), Arg('-d', '--directory', metavar='DIR', help='''directory that contains the bundle parts (default: directory that contains the manifest)'''), # TODO: make this work Arg('--part', metavar='INT', type=int, default=0, help='''begin uploading with a specific part number (default: 0)'''), Arg('--skipmanifest', action='store_true', help='do not upload the manifest')] def configure(self): self.configure_bundle_upload_auth() S3Request.configure(self) def main(self): key_prefix = self.get_bundle_key_prefix() self.ensure_dest_bucket_exists() manifest = BundleManifest.read_from_file(self.args['manifest']) part_dir = (self.args.get('directory') or os.path.dirname(self.args['manifest'])) for part in manifest.image_parts: part.filename = os.path.join(part_dir, part.filename) if not os.path.isfile(part.filename): raise ValueError("no such part: '{0}'".format(part.filename)) # manifest -> upload part_out_r, part_out_w = multiprocessing.Pipe(duplex=False) part_gen = multiprocessing.Process(target=_generate_bundle_parts, args=(manifest, part_out_w)) part_gen.start() part_out_w.close() # Drive the upload process by feeding in part info self.upload_bundle_parts(part_out_r, key_prefix, show_progress=self.args.get('show_progress')) part_gen.join() # (conditionally) upload the manifest if not self.args.get('skip_manifest'): manifest_dest = (key_prefix + os.path.basename(self.args['manifest'])) req = PutObject.from_other( self, source=self.args['manifest'], dest=manifest_dest, acl=self.args.get('acl') or 'aws-exec-read', retries=self.args.get('retries') or 0) req.main() else: manifest_dest = None return {'parts': tuple({'filename': part.filename, 'key': (key_prefix + os.path.basename(part.filename))} for part in manifest.image_parts), 'manifests': ({'filename': self.args['manifest'], 'key': manifest_dest},)} def print_result(self, result): if self.debug: for part in result['parts']: print 'Uploaded', part['key'] if result['manifests'][0]['key'] is not None: print 'Uploaded', result['manifests'][0]['key'] def _generate_bundle_parts(manifest, out_mpconn): try: for part in manifest.image_parts: assert os.path.isfile(part.filename) out_mpconn.send(part) finally: out_mpconn.close()
Python
0
@@ -3720,17 +3720,16 @@ et('skip -_ manifest
2fcca753e2877d21e158d5e4a3923e1d22e61608
Add mask A to causal atrous conv1d (WIP)
eva/layers/causal_atrous_convolution1d.py
eva/layers/causal_atrous_convolution1d.py
import keras.backend as K from keras.layers import AtrousConvolution1D from keras.utils.np_utils import conv_output_length class CausalAtrousConvolution1D(AtrousConvolution1D): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.border_mode != 'valid': raise ValueError("Causal mode dictates border_mode=valid.") self.length = self.atrous_rate * (self.filter_length - 1) def get_output_shape_for(self, input_shape): length = conv_output_length(input_shape[1] + self.length, self.filter_length, self.border_mode, self.subsample[0], dilation=self.atrous_rate) return (input_shape[0], length, self.nb_filter) def call(self, x, mask=None): return super().call(K.asymmetric_temporal_padding(x, self.length, 0), mask)
Python
0
@@ -196,24 +196,39 @@ self, *args, + mask_type='B', **kwargs):%0A @@ -448,16 +448,95 @@ h - 1)%0A%0A + # XXX debug.%0A if mask_type == 'A':%0A self.length = 3%0A%0A def
56de128ac7462a297505ec4b207d07c1af613c9b
connect annotations to api
ui/app/api/__init__.py
ui/app/api/__init__.py
from flask import Blueprint api = Blueprint('api', __name__) # # end-points to create # query from . import query # services from . import services # spans from . import spans # # traces # services # annotations # dependencies # pin from . import pin
Python
0.000001
@@ -175,16 +175,57 @@ spans%0A%0A +# annotations%0Afrom . import annotations%0A%0A #%0A# trac
6fdeeb8941753e2394406ef2b90fab51684dab6d
Update mainapp.py
app/mainapp.py
app/mainapp.py
# -*- coding: utf-8 -*- from flask import Flask, jsonify, request, abort, make_response from futu_server_api import * from db import save_update_token from db import delete_tokens import logging import logging.config import json app = Flask(__name__) logging.config.fileConfig('./conf/logging.conf') no_db_logger = logging.getLogger() def check_parameters(pjson): if not pjson or not 'app_account' in pjson or not 'card' in pjson or not 'appid' in pjson: no_db_logger.info('No Parameter') abort(400) cli = {'account':pjson['app_account'], 'card':pjson['card'], 'appid':pjson['appid']} return client(cli['account'], cli['card'], cli['appid']) def log_handler(myjson, mytitle): if 'ClientWarning' in myjson: return '%s' % myjson['ClientWarning'] elif myjson['result_code'] == 0: return 'SUCCESS' else: return 'FAIL ,REASON OF FAILURE:%s ,PARAMETER:%s' % (myjson['error_msg'], request.json) @app.route('/') def hello_world(): no_db_logger.info('server start#####') return 'hello 22222222 world!' @app.route('/api/v1/tradetoken', methods=['POST']) def trade_token(): trade_pswd = request.json['trade_pswd'] account = request.json['app_account'] card = request.json['card'] appid = request.json['appid'] cc = check_parameters(request.json) message = cc.get_trade_token(trade_pswd) if message['result_code'] != 0 and message['error_msg' == 'didn\'t get accesstoken']: no_db_logger.info('didn\'t get accesstoken') return json.dumps({'result_code':2,'error_msg':'didn\'t get accesstoken'}, ensure_ascii=False) if message['result_code'] == 0: token = message['data']['trade_token'] save_update_token(account, appid, None, token, card, True) return jsonify(**message) @app.route('/api/v1/account', methods=['POST']) def get_account_detail(): cc = check_parameters(request.json) message = cc.get_account_detail() logtext = log_handler(message, '获取账户信息') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/api/v1/account/cash', methods=['POST']) def get_account_cash(): cc = check_parameters(request.json) message = cc.get_account_cash() logtext = log_handler(message, '获取账户现金') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/api/v1/account/portfolio', methods=['POST']) def get_account_portfolio(): cc = check_parameters(request.json) message = cc.get_account_portfolio() logtext = log_handler(message, '获取账户持仓') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/api/v1/get_list_orders', methods=['POST']) def get_list_orders(): date_begin = request.json['date_begin'] date_end = request.json['date_end'] cc = check_parameters(request.json) message = cc.get_list_orders() logtext = log_handler(message, '获取订单列表') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/api/v1/get_list_trades', methods=['POST']) def get_list_trades(): cc = check_parameters(request.json) message = cc.get_list_trades() logtext = log_handler(message, '获取交易列表') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/api/v1/place_order', methods=['POST']) def place_order(): code = request.json['code'] quantity = request.json['quantity'] price = request.json['price'] side = request.json['side'] ltype = request.json['type'] cc = check_parameters(request.json) message = cc.place_order(code, quantity, price, side, ltype) logtext = log_handler(message, '下单') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/api/v1/change_order', methods=['POST']) def change_order(): order_id = request.json['order_id'] quantity = request.json['quantity'] price = request.json['price'] cc = check_parameters(request.json) message = cc.change_order(order_id, quantity, price) logtext = log_handler(message, '改单') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/api/v1/cancle_order', methods=['POST']) def cancle_order(): order_id = request.json['order_id'] cc = check_parameters(request.json) message = cc.cancel_order(order_id) logtext = log_handler(message, '撤单') no_db_logger.info(logtext) return json.dumps(message, ensure_ascii=False) @app.route('/ap1/v1/save_token', methods=['POST']) def save_token(): account = request.json['app_account'] appid = request.json['appid'] market = request.json['market'] token = request.json['token'] card = request.json['card'] card_desc = request.json['text'] DB_result = save_update_token(account, appid, market, token, card, False, card_desc) if DB_result == 'success': no_db_logger.info('token save success') return json.dumps({'result_code':0,'error_msg':''}, ensure_ascii=False) else: no_db_logger.info('token save fail') return json.dumps({'result_code':1,'error_msg':'token保存失败'}, ensure_ascii=False) @app.route('/api/v1/delete_token', methods=['POST']) def delete_token(): appid = request.json['appid'] account = request.json['app_account'] DB_result = delete_tokens(account, appid) if DB_result == 'success': no_db_logger.info('token delete success') return json.dumps({'result_code':0,'error_msg':''}, ensure_ascii=False) else: no_db_logger.info('token delete fail') return json.dumps({'result_code':1,'error_msg':'token删除失败'}, ensure_ascii=False) if __name__ == '__main__': app.run()
Python
0.000001
@@ -283,25 +283,20 @@ conf/log -ging.conf +.ini ')%0Ano_db
c5b00edd9b8acbe594e43ecce093cd1c695b8b01
Use user ID instead of username to get messages even with username changes
getalltextfromuser.py
getalltextfromuser.py
#!/usr/bin/env python3 """ A program to extract all text sent by a particular user from a Telegram chat log which is in json form """ import argparse from json import loads def main(): parser = argparse.ArgumentParser( description="Extract raw text sent by a user from a json telegram chat log") parser.add_argument( 'filepath', help='the json file to analyse') parser.add_argument( 'username', help='the username of the person whose text you want') args=parser.parse_args() filepath = args.filepath username = args.username with open(filepath, 'r') as jsonfile: events = (loads(line) for line in jsonfile) for event in events: #check the event is the sort we're looking for if "from" in event and "text" in event: if "username" in event["from"]: #do i need "from" here? if event["from"]["username"] == username: print(event["text"]) if __name__ == "__main__": main()
Python
0
@@ -570,16 +570,89 @@ ername%0A%0A + user_id = %22%22%0A%0A #first, get the ID of the user with that username.%0A with @@ -1027,24 +1027,552 @@ ername:%0A + #print(event%5B%22text%22%5D)%0A print(event%5B'from'%5D%5B'id'%5D)%0A user_id = event%5B'from'%5D%5B'id'%5D%0A break%0A if user_id == %22%22:%0A print(%22user not found%22)%0A exit()%0A%0A with open(filepath, 'r') as jsonfile:%0A events = (loads(line) for line in jsonfile)%0A for event in events:%0A #check the event is the sort we're looking for%0A if %22from%22 in event and %22text%22 in event:%0A if user_id == event%5B%22from%22%5D%5B%22id%22%5D:%0A
48b227f0019fb28a5b96874f62662fee79998fe5
Add a TODO for diamondash metric snapshot request auth in diamondash proxy view
go/dashboard/views.py
go/dashboard/views.py
from django.http import HttpResponse from django.contrib.auth.decorators import login_required from django.views.decorators.http import require_http_methods from django.views.decorators.csrf import csrf_exempt from go.dashboard import client @login_required @csrf_exempt @require_http_methods(['GET']) def diamondash_api_proxy(request): """ Proxies client snapshot requests to diamondash. NOTE: This proxy is a fallback for dev purposes only. A more sensible proxying solution should be used in production (eg. haproxy). """ api = client.get_diamondash_api() _, url = request.path.split('/diamondash/api', 1) response = api.raw_request(request.method, url, content=request.body) return HttpResponse( response['content'], status=response['code'], content_type='application/json')
Python
0
@@ -709,16 +709,132 @@ .body)%0A%0A + # TODO for the case of snapshot requests, ensure the widgets requested are%0A # allowed for the given account%0A%0A retu
e7d6f5fbd21819ba0c5d3b5e952331b2eabe32e8
move ballsearch para to config
gpbo/core/__init__.py
gpbo/core/__init__.py
from collections import defaultdict debugoutput=defaultdict(lambda :False) debugoutput['path']='dbout' from .optimize import * from .optutils import * from .acquisitions import * from .reccomenders import * from .config import * from .GPdc import * from .ESutils import * from .choosers import *
Python
0
@@ -96,16 +96,17 @@ 'dbout'%0A +%0A from .op
c937d19b263070f380fcbc75dfe60d15840fe477
Drop call to 'service' wrapper in 'vyos-config-mdns-repeater.py'
src/conf-mode/vyos-config-mdns-repeater.py
src/conf-mode/vyos-config-mdns-repeater.py
#!/usr/bin/env python3 # # Copyright (C) 2017 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # import sys import os import netifaces import time import subprocess from vyos.config import Config from vyos.util import ConfigError config_file = r'/etc/default/mdns-repeater' def get_config(): interface_list = [] conf = Config() conf.set_level('service mdns repeater') if not conf.exists(''): return interface_list if conf.exists('interfaces'): intfs_names = conf.return_values('interfaces') intfs_names=intfs_names.replace("'", "") intfs_names=intfs_names.split() for name in intfs_names: interface_list.append(name) return interface_list def verify(mdns): # '0' interfaces are possible, think of service deletion. Only '1' is not supported! if len(mdns) == 1: raise ConfigError('At least 2 interfaces must be specified but %d given!' % len(mdns)) # For mdns-repeater to work it is essential that the interfaces # have an IP address assigned for intf in mdns: try: netifaces.ifaddresses(intf)[netifaces.AF_INET] except KeyError as e: raise ConfigError('No IP address configured for interface "%s"!' % intf) return None def generate(mdns): config_header = '### Autogenerated by vyos-update-mdns-repeater.py on {tm} ###\n'.format(tm=time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())) if len(mdns) > 0: config_args = 'DAEMON_ARGS="' + ' '.join(str(e) for e in mdns) + '"\n' else: config_args = 'DAEMON_ARGS=""\n' # write new configuration file f = open(config_file, 'w') f.write(config_header) f.write(config_args) f.close() return None def apply(mdns): systemd_cmd = ["sudo", "service", "mdns-repeater"] if len(mdns) == 0: systemd_cmd.append("stop") else: systemd_cmd.append("restart") subprocess.call(systemd_cmd) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) sys.exit(1)
Python
0
@@ -2366,31 +2366,16 @@ , %22s -ervice%22, %22mdns-repeater +ystemctl %22%5D%0A%0A @@ -2481,16 +2481,65 @@ tart%22)%0A%0A + systemd_cmd.append(%22mdns-repeater.service%22)%0A%0A subp
8914b9e5d38e7c97940b25c6a041ce7f4660f25d
Update padus.py
padus.py
padus.py
#!/usr/bin/python """ INSTALL yum install python-ldap groupadd -g 10002 ldap_test ldapsearch -x -H ldap://172.16.2.201 -D cn=ldap,cn=users,dc=gm,dc=local -w ldap -b CN=ldap_test,CN=Users,DC=gm,DC=local """ import os import sys import pwd import ldap def finduser(user): try: pwd.getpwnam(user) print user, "user exists" return True except: return False def search_member_in_ldap(ldap, ldapo, cn): baseDN = cn #"dc=gm,dc=local" searchScope = ldap.SCOPE_SUBTREE ## retrieve all attributes - again adjust to your needs - see documentation for more options retrieveAttributes = None searchFilter = "cn=*" #cn[0] #print ldap.SCORE_SUBTREE print baseDN print searchFilter try: ldap_result_id = ldapo.search(baseDN, searchScope, searchFilter, retrieveAttributes) result_set = [] while 1: result_type, result_data = ldapo.result(ldap_result_id, 0) if (result_data == []): break else: if result_type == ldap.RES_SEARCH_ENTRY: result_set.append(result_data) print result_set[0][0][1]['cn'][0] str_uid = result_set[0][0][1]['uid'][0] print result_set[0][0][1]['uidNumber'][0] print result_set[0][0][1]['gidNumber'][0] print result_set[0][0][1]['unixHomeDirectory'][0] print result_set[0][0][1]['loginShell'][0] # cat /etc/passwd | grep ^root: | sed -e 's/:.*//g' print "Check localuser '%s':" % str_uid #print pwd.getpwnam(str_uid) if not finduser(str_uid): print " user not exists.... create" values = { "username": result_set[0][0][1]['uid'][0], "shell": result_set[0][0][1]['loginShell'][0], "uid": result_set[0][0][1]['uidNumber'][0], "gid": result_set[0][0][1]['gidNumber'][0], "homedir": result_set[0][0][1]['unixHomeDirectory'][0] } os.system("useradd -b /home/GM -d %(homedir)s -g %(gid)s -u %(uid)s %(username)s" % values) else: print " user already exists." except ldap.LDAPError, e: print e try: ldapo = ldap.initialize('ldap://172.16.2.201') ldapo.protocol_version = ldap.VERSION3 except ldap.LDAPError, e: print e sys.exit(2) ldapo.simple_bind_s('cn=ldap,cn=users,dc=gm,dc=local', 'ldap') """ lc = ldapom.LdapConnection(uri='ldap://172.16.2.201', base='CN=ldap_test,CN=Users,DC=gm,DC=local', login='cn=ldap,cn=users,dc=gm,dc=local', password='ldap') """ baseDN = "cn=users,dc=gm,dc=local" searchScope = ldap.SCOPE_SUBTREE ## retrieve all attributes - again adjust to your needs - see documentation for more options retrieveAttributes = None searchFilter = "cn=ldap_test" try: ldap_result_id = ldapo.search(baseDN, searchScope, searchFilter, retrieveAttributes) result_set = [] while 1: result_type, result_data = ldapo.result(ldap_result_id, 0) if (result_data == []): break else: if result_type == ldap.RES_SEARCH_ENTRY: result_set.append(result_data) for member in result_set[0][0][1]['member']: print member search_member_in_ldap(ldap, ldapo, member) #print "Group: %s" % result_set[0][0][0] #print "Members: %s" % result_set[0][0][1]['member'] #search_member_in_ldap(ldap, ldapo, result_set[0][0][1]['member']) except ldap.LDAPError, e: print e
Python
0
@@ -20,42 +20,8 @@ %22%22%22%0A -INSTALL%0A%0Ayum install python-ldap%0A%0A grou
61e4d7742c3a5ec2df4301773fc53d9cd09d5179
fix for unknown objects
art_detectors/art_arcode_detector/src/detector.py
art_detectors/art_arcode_detector/src/detector.py
#!/usr/bin/env python from ar_track_alvar_msgs.msg import AlvarMarkers, AlvarMarker from art_msgs.msg import ObjInstance, InstancesArray from art_msgs.srv import getObject from shape_msgs.msg import SolidPrimitive import sys import rospy from visualization_msgs.msg import Marker from geometry_msgs.msg import Point from tf import transformations class ArCodeDetector: objects_table = None def __init__(self): self.get_object_srv = rospy.ServiceProxy('/art_db/object/get', getObject) self.ar_code_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.ar_code_cb) self.detected_objects_pub = rospy.Publisher("/art_object_detector/object", InstancesArray, queue_size=10) self.visualize_pub = rospy.Publisher("art_object_detector/visualize_objects", Marker, queue_size=10) # TODO make a timer clearing this cache from time to time self.objects_cache = {} def ar_code_cb(self, data): rospy.logdebug("New arcodes arrived:") instances = InstancesArray() id = 0 for arcode in data.markers: aid = int(arcode.id) if aid not in self.objects_cache: try: resp = self.get_object_srv(obj_id=aid) except rospy.ServiceException, e: print "Service call failed: %s"%e continue self.objects_cache[aid] = {'name': resp.name, 'model_url': resp.model_url, 'type': resp.type, 'bb': resp.bbox} obj_in = ObjInstance() obj_in.object_id = self.objects_cache[aid]['name'] obj_in.pose = arcode.pose.pose obj_in.pose.position.z = 0 angles = transformations.euler_from_quaternion([obj_in.pose.orientation.x, obj_in.pose.orientation.y, obj_in.pose.orientation.z, obj_in.pose.orientation.w]) q = transformations.quaternion_from_euler(0, 0, angles[2]) obj_in.pose.orientation.x = q[0] obj_in.pose.orientation.y = q[1] obj_in.pose.orientation.z = q[2] obj_in.pose.orientation.w = q[3] obj_in.bbox = self.objects_cache[aid]['bb'] self.show_rviz_bb(obj_in, arcode.id) instances.header.stamp = arcode.header.stamp instances.header.frame_id = arcode.header.frame_id instances.instances.append(obj_in) ++id if len(data.markers) == 0: rospy.logdebug("Empty") else: self.detected_objects_pub.publish(instances) def show_rviz_bb(self, obj, id): ''' :type obj: ObjInstance :return: ''' marker = Marker() marker.type = marker.LINE_LIST marker.id = int(id) marker.action = marker.ADD marker.scale.x = 0.001 marker.scale.y = 0.01 marker.scale.z = 0.01 marker.color.r = 0 marker.color.g = 1 marker.color.b = 0 marker.color.a = 1 marker.lifetime = rospy.Duration(5) marker.pose = obj.pose pos = obj.pose.position bbox_x = float(obj.bbox.dimensions[0]/2) bbox_y = float(obj.bbox.dimensions[1]/2) bbox_z = float(obj.bbox.dimensions[2]) marker.points = [ Point(- bbox_x,- bbox_y, 0), Point(+ bbox_x,- bbox_y, 0), Point(+ bbox_x,- bbox_y, 0), Point(+ bbox_x,+ bbox_y, 0), Point(+ bbox_x,+ bbox_y, 0), Point(- bbox_x,+ bbox_y, 0), Point(- bbox_x,+ bbox_y, 0), Point(- bbox_x,- bbox_y, 0), Point(- bbox_x,- bbox_y, 0), Point(- bbox_x,- bbox_y, + bbox_z), Point(+ bbox_x,- bbox_y, 0), Point(+ bbox_x,- bbox_y, + bbox_z), Point(+ bbox_x,+ bbox_y, 0), Point(+ bbox_x,+ bbox_y, + bbox_z), Point(- bbox_x,+ bbox_y, 0), Point(- bbox_x,+ bbox_y, + bbox_z), Point(- bbox_x,- bbox_y, + bbox_z), Point(+ bbox_x,- bbox_y, + bbox_z), Point(+ bbox_x,- bbox_y, + bbox_z), Point(+ bbox_x,+ bbox_y, + bbox_z), Point(+ bbox_x,+ bbox_y, + bbox_z), Point(- bbox_x,+ bbox_y, + bbox_z), Point(- bbox_x,+ bbox_y, + bbox_z), Point(- bbox_x,- bbox_y, + bbox_z), ] marker.header.frame_id = "/marker" self.visualize_pub.publish(marker) marker.pose.position.z += 0.02 + bbox_z marker.id = int(id+100) marker.type = marker.TEXT_VIEW_FACING marker.text = obj.object_id marker.scale.z = 0.02 self.visualize_pub.publish(marker) if __name__ == '__main__': rospy.init_node('art_arcode_detector') # rospy.init_node('art_arcode_detector', log_level=rospy.DEBUG) try: rospy.wait_for_service('/art_db/object/get') node = ArCodeDetector() rospy.spin() except rospy.ROSInterruptException: pass
Python
0.000006
@@ -1379,40 +1379,122 @@ -print %22Service call failed: %25s%22%25 +%0A # error or unknown object - let's ignore it%0A self.objects_cache%5Baid%5D = Non e%0A @@ -1675,32 +1675,98 @@ ox%7D%0A +%0A if self.objects_cache%5Baid%5D is None: continue%0A %0A
e4fb1dbc9030a7a2d538f83e903cd38d77c5272b
Version bump for new requirements
espwrap/__init__.py
espwrap/__init__.py
__version__ = '1.2.3'
Python
0
@@ -16,7 +16,7 @@ 1.2. -3 +4 '%0A
e964df34a7f1f7dc85da21429b2978b01034140f
Fix typo
i2v/__init__.py
i2v/__init__.py
from i2v.base import Illustration2VecBase caffe_available = False chainer_available = False try: from i2v.caffe_i2v import CaffeI2V, make_i2v_with_caffe caffe_available = True except ImportError: pass try: from i2v.chainer_i2v import ChainerI2V, make_i2v_with_chainer caffe_available = True except ImportError: pass if not any([caffe_available, chainer_available]): raise ImportError('i2v requires caffe or chainer package')
Python
0.999999
@@ -281,28 +281,30 @@ hainer%0A c -affe +hainer _available =
8731d5a3ad84872811b1e12b8f0fb85c5a4a5754
update example project
example/settings.py
example/settings.py
""" Django settings for app project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'YOUR_SECRET_KEY' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] TEMPLATE_DIRS = ( os.path.join(os.path.dirname(__file__), 'templates'), ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.core.context_processors.tz", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.request", "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django_su.context_processors.is_su", ) TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(os.path.dirname(__file__), 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.i18n', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', "django_su.context_processors.is_su", ], }, }, ] # Application definition MIDDLEWARE_CLASSES = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ] PROJECT_APPS = [ 'django_su', ] INSTALLED_APPS = [ # 'suit', # pip install django-suit 'django.contrib.auth', 'django.contrib.sites', 'django.contrib.sessions', 'django.contrib.staticfiles', 'django.contrib.contenttypes', 'django.contrib.admin', # 'guardian', 'formadmin', # pip install django-form-admin 'ajax_select', # pip install django-ajax-select ] + PROJECT_APPS ROOT_URLCONF = 'example.urls' SITE_ID = 1 # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' AUTHENTICATION_BACKENDS = ( "django.contrib.auth.backends.ModelBackend", # "guardian.backends.ObjectPermissionBackend", "django_su.backends.SuBackend", ) # ANONYMOUS_USER_ID = -1 # URL to redirect after the login. # Default: "/" SU_LOGIN_REDIRECT_URL = "/" # URL to redirect after the logout. # Default: "/" SU_LOGOUT_REDIRECT_URL = "/" # A function to specify the perms that the user must have can use django_su # Default: None SU_LOGIN_CALLBACK = "example.utils.su_login_callback" # A function to override the django.contrib.auth.login(request, user) # function so you can set session data, etc. # Default: None SU_CUSTOM_LOGIN_ACTION = "example.utils.custom_login" if 'ajax_select' in INSTALLED_APPS: AJAX_LOOKUP_CHANNELS = { 'django_su': ('example.lookups', 'UsersLookup'), }
Python
0
@@ -2447,24 +2447,56 @@ ct%0A%5D - + PROJECT_APPS%0A +%0A%0AINSTALLED_APPS = PROJECT_APPS + INSTALLED_APPS %0A%0ARO
37da5f36e4f341b0923081235885786c494239d9
Remove unused return
lexos/models/stats_model.py
lexos/models/stats_model.py
from typing import Optional, List, NamedTuple import numpy as np import pandas as pd from lexos.helpers.error_messages import EMPTY_LIST_MESSAGE from lexos.models.base_model import BaseModel from lexos.models.matrix_model import MatrixModel from lexos.receivers.matrix_receiver import MatrixReceiver, IdTempLabelMap class StatsTestOptions(NamedTuple): """A typed tuple to hold test options.""" doc_term_matrix: pd.DataFrame id_temp_label_map: IdTempLabelMap class CorpusInfo(NamedTuple): """A typed tuple to represent statistics of the whole corpus.""" q1: float # The first quartile of all file sizes. q3: float # The second quartile of all file sizes. iqr: float # The inter-quartile range of all file sizes. median: float # The median of all file sizes. average: float # The average of all file sized. num_file: int # The number of files. std_deviation: float # The standard deviation of all file sizes. anomaly_iqr: dict # The anomaly inter-quartile range of all file sizes. anomaly_std_err: dict # The anomaly standard error of all file sizes. class FileInfo(NamedTuple): """A typed tuple to represent statistics of each file in corpus.""" q1: float # The first quartile of all word counts of a file. q3: float # The second quartile of all word counts of a file. iqr: float # The inter-quartile range of all word counts of a file. hapax: int # The hapax of all word counts of a file. median: float # The median of all word counts of a file. average: float # The average of all word counts of a file. num_word: int # The number of words of a file. file_name: str # The name of a file. word_count: int # The count of all words within a file. std_deviation: float # The standard deviation of word counts. total_word_count: int # The total of all word counts of a file. class StatsModel(BaseModel): def __init__(self, test_options: Optional[StatsTestOptions] = None): """This is the class to generate statistics of the input file. :param test_options: the input used in testing to override the dynamically loaded option """ super().__init__() if test_options is not None: self._test_dtm = test_options.doc_term_matrix self._test_id_temp_label_map = test_options.id_temp_label_map else: self._test_dtm = None self._test_id_temp_label_map = None @property def _doc_term_matrix(self) -> pd.DataFrame: """:return: the document term matrix""" return self._test_dtm if self._test_dtm is not None \ else MatrixModel().get_matrix() @property def _id_temp_label_map(self) -> IdTempLabelMap: """:return: a map takes an id to temp labels""" return self._test_id_temp_label_map \ if self._test_id_temp_label_map is not None \ else MatrixModel().get_temp_label_id_map() def _get_corpus_info(self) -> CorpusInfo: """Converts word lists completely to statistic.""" assert np.sum(self._doc_term_matrix.values) > 0, EMPTY_LIST_MESSAGE # initialize file_anomaly_iqr = {} file_anomaly_std_err = {} num_file = np.size(self._doc_term_matrix.index.values) file_sizes = np.sum(self._doc_term_matrix.values, axis=1) labels = [self._id_temp_label_map[file_id] for file_id in self._doc_term_matrix.index.values] # 1 standard error analysis average_file_size = round(np.average(file_sizes), 3) # Calculate the standard deviation std_dev_file_size = np.std(file_sizes).item() # Calculate the anomaly for count, label in enumerate(labels): if file_sizes[count] > average_file_size + 2 * std_dev_file_size: file_anomaly_std_err.update({label: 'large'}) elif file_sizes[count] < average_file_size - 2 * std_dev_file_size: file_anomaly_std_err.update({label: 'small'}) # 2 iqr analysis median = np.median(file_sizes).item() q1 = np.percentile(file_sizes, 25, interpolation="midpoint") q3 = np.percentile(file_sizes, 75, interpolation="midpoint") iqr = q3 - q1 # calculate the anomaly for count, label in enumerate(labels): if file_sizes[count] > median + 1.5 * iqr: file_anomaly_iqr.update({label: 'large'}) elif file_sizes[count] < median - 1.5 * iqr: file_anomaly_iqr.update({label: 'small'}) return CorpusInfo(q1=q1, q3=q3, iqr=iqr, median=median, average=average_file_size, num_file=num_file, anomaly_iqr=file_anomaly_iqr, std_deviation=std_dev_file_size, anomaly_std_err=file_anomaly_std_err) @staticmethod def _get_file_info(count_list: np.ndarray, file_name: str) -> FileInfo: """Gives statistics of a particular file in a given file list. :param count_list: a list contains words count of a particular file. :param file_name: the file name of that file. """ assert np.sum(count_list) > 0, EMPTY_LIST_MESSAGE # initialize remove all zeros from count_list nonzero_count_list = count_list[count_list != 0] num_word = np.size(nonzero_count_list) total_word_count = int(sum(nonzero_count_list).item()) # 1 standard error analysis average_word_count = round(total_word_count / num_word, 3) # calculate the standard deviation std_word_count = np.std(nonzero_count_list).item() # 2 iqr analysis median = np.median(nonzero_count_list).item() q1 = np.percentile(nonzero_count_list, 25, interpolation="midpoint") q3 = np.percentile(nonzero_count_list, 75, interpolation="midpoint") iqr = q3 - q1 hapax = ((count_list == 1).sum()).item() return FileInfo(q1=q1, q3=q3, iqr=iqr, hapax=hapax, median=median, average=average_word_count, num_word=num_word, file_name=file_name, word_count=nonzero_count_list, std_deviation=std_word_count, total_word_count=total_word_count) def get_file_result(self) -> List[FileInfo]: """Find statistics of all files and put each result into a list.""" file_info_list = \ [self._get_file_info( count_list=self._doc_term_matrix.loc[[file_id]].values, file_name=temp_label) for file_id, temp_label in self._id_temp_label_map.items()] return file_info_list def get_corpus_result(self) -> CorpusInfo: """Return stats for the whole corpus.""" return self._get_corpus_info() @staticmethod def get_token_type() -> str: """Return token type that was used for analyzing.""" return \ MatrixReceiver().options_from_front_end().token_option.token_type
Python
0.000005
@@ -849,50 +849,8 @@ ed.%0A - num_file: int # The number of files.%0A @@ -4767,53 +4767,8 @@ ze,%0A - num_file=num_file,%0A
40c3ed631a962d0a986a7d3752ad1247ded75a33
Fix save dir of listing files
parsers/transparence-sante/crawl_details.py
parsers/transparence-sante/crawl_details.py
# -*- coding: utf-8 -*- import os, sys from crawler import TSCrawler from parse import parse_listing from utils import info from settings import EXTRACT_DETAIL_DIR import threading import logging import time import Queue from plan_crawl import get_all_tasks logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', ) HEADERS = ["rpps", "ville", "adresse", "nom", "prenom", "codepostal", "value", "nature", "intitule", "date", "id", "typologie"] class ThreadWorker(threading.Thread): def __init__(self, queue): threading.Thread.__init__(self) self.queue = queue def run(self): while True: dept_code, pages = self.queue.get() start = time.time() worker(dept_code, pages) self.queue.task_done() t = time.time() - start print "Task department=%s, page count=%s finished in: %.1f s, %.1f s/page" % (dept_code, len(pages), t, t / len(pages)) def worker(dept_code, pages): info("Crawl Dep=%s, page start=%s, page end=%s, page count=%s" % (dept_code, pages[0], pages[-1], len(pages))) dept_dir = "%s/%s" % (EXTRACT_DETAIL_DIR, dept_code) if not os.path.isdir(dept_dir): os.makedirs(dept_dir) ts_crawler = TSCrawler() info("Crawl department %s" % dept_code) form_response = ts_crawler.submit_form_by_dept(dept_code) for page in pages: info("Department=%s, page=%s" % (dept_code, page)) listing_filename = "/%s/listing-%s-%s.html" % (EXTRACT_DETAIL_DIR, dept_code, page) if os.path.isfile(listing_filename): continue if page != 0: form_response = ts_crawler.get_listing_page(page) form_html = form_response.read() data = list(parse_listing(form_html)) # Crawl detail for idx, _ in enumerate(data): detail_filename = "%s/%s/avantage-%s-%s-%s.html" % (EXTRACT_DETAIL_DIR, dept_code, dept_code, page, idx) if os.path.isfile(detail_filename): continue with open(detail_filename, "w") as detail_file: detail_response = ts_crawler.get_detail(idx) if detail_response: detail_file.write(detail_response.read()) with open(listing_filename, 'w') as tmp_out: tmp_out.write(form_html) info("Departement=%s is finished" % dept_code) def main(): if len(sys.argv) > 1: thread_count = int(sys.argv[1]) else: thread_count = 4 queue = Queue.Queue() for task in get_all_tasks(): queue.put(task) for i in range(thread_count): t = ThreadWorker(queue) t.setDaemon(True) t.start() queue.join() main()
Python
0
@@ -1530,16 +1530,18 @@ name = %22 +%25s /%25s/list @@ -1585,24 +1585,35 @@ , dept_code, + dept_code, page)%0A%0A
5d32ee0e41aee95f30f972eef3d1abcf9de50a0c
Kill unused var
untz_manager/__main__.py
untz_manager/__main__.py
"""Main entry point for untz.""" from concurrent.futures import ThreadPoolExecutor import logging import os from .encoder import apply_gain, encode_file from .utils import get_args, preflight_checks, recursive_file_search ARGS = get_args() LOGGER = logging.getLogger(__name__) threads = [] # pylint: disable=C0103 def _encode_on_filter(file_entry): LOGGER.info('Encoding "%s"', file_entry) if file_entry.lower().endswith('.flac'): encode_file(file_entry, ARGS.base_dir.rstrip('/'), ARGS.pattern, ARGS.quality, ARGS.ogg_cli_parameters) def main(): """Main logic for untz.""" if ARGS.verbose: logging.basicConfig(level=logging.DEBUG) preflight_checks() LOGGER.info('Starting %d threads.', ARGS.threads) with ThreadPoolExecutor(max_workers=ARGS.threads) as tpe: for i in ARGS.inputs: if os.path.isdir(i): tpe.map(_encode_on_filter, recursive_file_search(i)) else: tpe.submit(_encode_on_filter, i) if ARGS.replaygain: apply_gain(ARGS.base_dir) LOGGER.warning('Program exiting now.')
Python
0
@@ -276,45 +276,8 @@ __)%0A -threads = %5B%5D # pylint: disable=C0103%0A %0A%0Ade
f71b1a5c7cb8ecd8403c93c1e806387f0a6d780a
Version bump.
uploadstatic/__init__.py
uploadstatic/__init__.py
# following PEP 386 __version__ = "0.2.1"
Python
0
@@ -34,9 +34,9 @@ %220. -2.1 +3.0 %22%0A
ae4da4d5d6af27ddab376cb41067f21b053fcf5e
make tokenz lazy
goethe/corpora.py
goethe/corpora.py
import os from . import util import itertools as it class Corpus: def __init__(self, path, limit=None): """Pass path to corpus. Expects following structure: path/to/my/corpus corpus.txt corpus.tokens.txt """ self.path = os.path.normpath(path) self.limit = limit def __iter__(self): """Return generator yielding tokens. """ return self.tokens() def sents(self): """Yield sentences from file. """ path = self.file_path(use_tokens=False) with open(path) as f: yield from (l.strip() for l in f) def tokens(self): """Yield from file a list of tokens per sentence. """ path = self.file_path(use_tokens=True) with open(path) as f: tokens = [line.strip().split() for line in f] yield from limit_iter(tokens) if self.limit else tokens def limit_iter(self, iterator): """Return iterator that yields self.limit elements of the passed iterator. """ return it.islice(iterator, self.limit) def file_path(self, use_tokens): """Return path to either sentence or token file. Example: 'abc/def/corpus' --> 'abc/def/corpus/corpus.txt' --> 'abc/def/corpus/corpus.tokens.txt' """ corpus_name = os.path.basename(self.path) file = ('%s.tokens.txt' if use_tokens else '%s.txt') % corpus_name return os.path.join(self.path, file) def random(self, k): """Randomly select a list of k token lists. (Will load k elements into memory!) """ return util.rsample(self.tokens(), k)
Python
0.001279
@@ -826,9 +826,9 @@ s = -%5B +( line @@ -861,9 +861,9 @@ in f -%5D +) %0A
c6fe6e7ae212dab3b8cc4615355f782664dc2d05
raise invalid error in relations if unable to get rid or rtype
drfjsonapi/relations.py
drfjsonapi/relations.py
""" drfjsonapi.relations ~~~~~~~~~~~~~~~~~~~~ DRF relationship fields to assist with a JSON API spec compliant API. """ from django.utils.translation import ugettext_lazy as _ from rest_framework.relations import SlugRelatedField class JsonApiRelatedField(SlugRelatedField): """ JSON API related field for relationships """ default_error_messages = { 'does_not_exist': _('Does not exist.'), 'invalid': _('Invalid value.'), 'invalid_rtype': _('Invalid resource type of "{given}".'), } def __init__(self, slug_field='id', **kwargs): """ Process our custom attrs so DRF doesn't barf """ self.rtype = kwargs.pop('rtype', None) super().__init__(slug_field=slug_field, **kwargs) def get_rtype(self, instance): """ Return the "Resource Identifier" type member """ return self.rtype def to_internal_value(self, data): """ DRF override during deserialization A JSON API normalized relationship will have the following members at a minimum: { 'id': '123', 'type': 'actors', } """ rid, rtype = data['id'], data['type'] # raises exc if not found, instance guaranteed instance = super().to_internal_value(rid) _rtype = self.get_rtype(instance) if _rtype != rtype: self.fail('invalid_rtype', given=rtype) return instance def to_representation(self, obj): """ DRF override during serialization `to_representation` could return None if the relationship was cleared (OneToOne) & deleted but still present in memory. """ ret = super().to_representation(obj) if ret is not None: return { 'id': str(ret), 'type': self.get_rtype(obj), }
Python
0
@@ -1160,32 +1160,49 @@ %7D%0A %22%22%22%0A%0A + try:%0A rid, rty @@ -1231,16 +1231,76 @@ 'type'%5D%0A + except TypeError:%0A self.fail('invalid')%0A%0A @@ -1955,24 +1955,44 @@ ype(obj),%0A %7D%0A + return None%0A
6b8b2ebfd17cd96a08511a31d2f1159981b5c7cd
Update drakar.py
apps/drakar.py
apps/drakar.py
import os import sys import platform from os.path import isfile, isdir, join DRAKAR_PATH = os.environ.get('DRAKAR', '/mnt/drakar') if not isdir(DRAKAR_PATH): raise OSError("No such directory: '{}'".format(DRAKAR_PATH)) SYSTEM = sys.platform + '-' + platform.machine() sources = { 'linux2-x86_64/processing-2.1-linux64.tgz': 'http://download.processing.org/processing-2.1-linux64.tgz', } def get_archive(filename): archives_path = join(DRAKAR_PATH, 'archives', SYSTEM) # => /mnt/drakar/archives/linux2-x86_64 file_path = join(archives_path, filename) # => /mnt/drakar/archives/linux2-x86_64/filename.foo if not isfile(file_path): here = os.getcwd() source = sources[join(SYSTEM, filename)] os.makedirs(archives_path) os.chdir(archives_path) os.system('wget ' + source) os.chdir(here) if isfile(file_path): return file_path else: raise IOError("Could not obtain '{}' in Drakar".format(filename)) get_archive('processing-2.1-linux64.tgz')
Python
0
@@ -1026,16 +1026,22 @@ name))%0A%0A +print( get_arch @@ -1073,9 +1073,10 @@ 64.tgz') +) %0A
c2e36c0c59e728e246f20aacbc5f6f368ca274b7
add comment
usr/sbin/traverse_dir.py
usr/sbin/traverse_dir.py
#!/usr/bin/env python ############################################################################### # Copyright (c) 2015 Jamis Hoo # Project: # Filename: traverse_dir.py # Version: 1.0 # Author: Jamis Hoo # E-mail: hoojamis@gmail.com # Date: Aug 3, 2015 # Time: 14:06:02 # Description: ############################################################################### from __future__ import print_function import sys import os import re import config_load if __name__ == "__main__": if len(sys.argv) < 3: print("Error: Command line arguments error. Please view source code to get help. ") exit(1) lib_path = str(sys.argv[1]) conf_path = str(sys.argv[2]) if not os.path.isabs(conf_path): print("Error: Configuration path", conf_path, "is not absolute path. ") exit(1) if not os.path.isabs(lib_path): print("Error: Library pth", lib_path, "is not absolute path. ") exit(1) # non-builtin modules mustn't be loaded before this statement sys.path.insert(0, lib_path) from job_queue import JobQueue # load config config = config_load.load_config(conf_path) print(config) # check config mandatory_options = [ ("MigrateInfo", "migrate.type"), ("Local", "local.image_root_path"), ("AppInfo", "appinfo.appid"), ("AppInfo", "appinfo.secretid"), ("AppInfo", "appinfo.secretkey"), ("AppInfo", "appinfo.bucket"), ("ToolConfig", "concurrency") ] for section, option in mandatory_options: if section not in config or option not in config[section]: print("Error: Option", section + "." + option, "is required. ") exit(1) if not os.path.isabs(config["Local"]["local.image_root_path"]): print("Error: Image root path", config["Local"]["local.image_root_path"], "is not absolute path") exit(1) # only filenames matching this regex will be uploaded, others would be ignored filename_pattern = re.compile(".*\.(?:jpg|jpeg|png|gif|bmp|webp)$", re.IGNORECASE) # use this to match all filenames # filename_pattern = None image_root_path = os.path.abspath(os.path.expanduser(config["Local"]["local.image_root_path"])) job_queue = JobQueue( int(config["ToolConfig"]["concurrency"]), config["AppInfo"]["appinfo.appid"], config["AppInfo"]["appinfo.bucket"], config["AppInfo"]["appinfo.secretid"], config["AppInfo"]["appinfo.secretkey"] ) # traverse dir for dirpath, dirs, files in os.walk(image_root_path): for filename in files: if filename_pattern and not filename_pattern.match(filename): continue full_name = os.path.join(dirpath, filename) fileid = full_name[len(image_root_path) + 1:] #print(full_name, ":", fileid) job_queue.inqueue(0, full_name, fileid) job_queue.finish()
Python
0
@@ -475,16 +475,61 @@ _load%0A%0A%0A +# command line arguments: lib_path conf_path%0A if __nam
1c02294073c855fab8d6fc0965f3584b0ac0137b
clean up example file
examples/example.py
examples/example.py
#!/usr/bin/env python3 import matplotlib.pyplot as plt from oemof import db conn = db.connection(section='oedb') from dingo.core import NetworkDingo from dingo.tools import config as cfg_dingo from dingo.tools.debug import compare_graphs plt.close('all') cfg_dingo.load_config('config_db_tables') cfg_dingo.load_config('config_calc') cfg_dingo.load_config('config_files') cfg_dingo.load_config('config_misc') # get engine for database connection #conn = db.connection(db_section='ontohub_wdb', cfg_file='~/.dingo/config') # <-- TODO: include custom config file from given path (+input for oemof) # instantiate dingo network object nd = NetworkDingo(name='network') # mv_grid_districts=[360, 571, 593, 368, 491, 425, 416, 372, 387, 407, 403, 373, 482] # some MV grid_districts from SPF region # mv_grid_districts=[360, 571, 593, 368, 491, 416, 372, 387, 407, 403, 373, 482] # some MV grid_districts from SPF region # mv_grid_districts=[482] # mv_grid_districts = [386,372,406,371,402,415,480,424,489,367,359,569,591] mv_grid_districts=[480] nd.import_mv_grid_districts(conn, mv_grid_districts) nd.import_generators(conn) nd.mv_parametrize_grid() nd.mv_routing(debug=False, animation=False) nd.connect_generators() nd.set_branch_ids() #conn.close() # DEBUG (CLEAN UP THE SALT) #compare_graphs(graph=nd._mv_grid_districts[0].mv_grid._graph, # mode='compare') nd.set_circuit_breakers() # Open and close all circuit breakers in grid (for testing) [gd.mv_grid.open_circuit_breakers() for gd in nd._mv_grid_districts] #nd._mv_grid_districts[0].mv_grid.close_circuit_breakers() # Analyze grid by power flow analysis for mv_grid_district in nd._mv_grid_districts: mv_grid_district.mv_grid.run_powerflow(conn, method='onthefly') nd.export_mv_grid(conn, mv_grid_districts) conn.close() #nd.reinforce_grid() # for edge in nd._mv_grid_districts[0].mv_grid.graph_edges(): # if edge['branch'].type is not None: # print(edge['branch'].type['name']) # else: # print('None') # lvrg = [] # for mv_grid_district in nd.mv_grid_districts(): # #print(mv_grid_district._lv_load_area_groups) # #print(type(mv_grid_district._lv_load_area_groups)) # for lv_load_area_group in iter(mv_grid_district._lv_load_area_groups): # lvrg.append([str(lv_load_area_group), lv_load_area_group.peak_load_sum, lv_load_area_group.branch_length_sum]) # lvrg = sorted(lvrg, key=lambda x: x[1]) # # for lvrg_name, lvrg_load, lvrg_length in lvrg: # print(lvrg_name, lvrg_load, lvrg_length) #df = nx.to_pandas_dataframe(nd._mv_grid_districts[0].mv_grid._graph) # import pprint # for edge in nd._mv_grid_districts[0].mv_grid._graph.edge.keys(): # # print(edge, type(edge)) # pprint.pprint(edge) # pprint.pprint(nd._mv_grid_districts[0].mv_grid._graph.edge[edge]) #nd._mv_grid_districts[0].mv_grid.graph_draw()
Python
0.000002
@@ -74,44 +74,8 @@ db%0A -conn = db.connection(section='oedb') %0Afro @@ -154,53 +154,8 @@ ingo -%0Afrom dingo.tools.debug import compare_graphs %0A%0Apl @@ -330,23 +330,8 @@ )%0A%0A# - get engine for dat @@ -347,17 +347,16 @@ nection%0A -# conn = d @@ -368,19 +368,16 @@ nection( -db_ section= @@ -382,468 +382,84 @@ n='o -ntohub_wdb', cfg_file='~/.dingo/config') # %3C-- TODO: include custom config file from given path (+input for oemof)%0A%0A# instantiate dingo network object%0And = NetworkDingo(name='network')%0A%0A# mv_grid_districts=%5B360, 571, 593, 368, 491, 425, 416, 372, 387, 407, 403, 373, 482%5D # some MV grid_districts from SPF region%0A# mv_grid_districts=%5B360, 571, 593, 368, 491, 416, 372, 387, 407, 403, 373, 482%5D # some MV grid_districts from SPF region%0A# mv_grid_districts=%5B482%5D +edb')%0A%0A# instantiate dingo network object%0And = NetworkDingo(name='network')%0A %0A# m @@ -609,16 +609,17 @@ tricts)%0A +%0A nd.impor @@ -758,49 +758,114 @@ s()%0A -#conn.close()%0A# DEBUG (CLEAN UP THE SALT) +%0A# DEBUG (Compare graphs to CLEAN UP THE SALT)%0A#conn.close()%0A#from dingo.tools.debug import compare_graphs %0A#co @@ -1033,22 +1033,8 @@ grid - (for testing) %0A%5Bgd @@ -1102,67 +1102,8 @@ cts%5D -%0A#nd._mv_grid_districts%5B0%5D.mv_grid.close_circuit_breakers() %0A%0A# @@ -1318,1055 +1318,44 @@ )%0A%0A# -nd.reinforce_grid()%0A%0A# for edge in nd._mv_grid_districts%5B0%5D.mv_grid.graph_edges():%0A# if edge%5B'branch'%5D.type is not None:%0A# print(edge%5B'branch'%5D.type%5B'name'%5D)%0A# else:%0A# print('None')%0A%0A# lvrg = %5B%5D%0A# for mv_grid_district in nd.mv_grid_districts():%0A# #print(mv_grid_district._lv_load_area_groups)%0A# #print(type(mv_grid_district._lv_load_area_groups))%0A# for lv_load_area_group in iter(mv_grid_district._lv_load_area_groups):%0A# lvrg.append(%5Bstr(lv_load_area_group), lv_load_area_group.peak_load_sum, lv_load_area_group.branch_length_sum%5D)%0A# lvrg = sorted(lvrg, key=lambda x: x%5B1%5D)%0A#%0A# for lvrg_name, lvrg_load, lvrg_length in lvrg:%0A# print(lvrg_name, lvrg_load, lvrg_length)%0A%0A%0A%0A#df = nx.to_pandas_dataframe(nd._mv_grid_districts%5B0%5D.mv_grid._graph)%0A# import pprint%0A# for edge in nd._mv_grid_districts%5B0%5D.mv_grid._graph.edge.keys():%0A# # print(edge, type(edge))%0A# pprint.pprint(edge)%0A# pprint.pprint(nd._mv_grid_districts%5B0%5D.mv_grid._graph.edge%5Bedge%5D)%0A%0A#nd._mv_grid_districts%5B0%5D.mv_grid.graph_draw + reinforce MV grid%0A#nd.reinforce_grid ()%0A
2645ac98fd2698bcfbf4a23cdb6c9583e6b31a50
create test user from python updated
src/apps/flow_manager/createdb.py
src/apps/flow_manager/createdb.py
import requests dbhost = "localhost" dbport = "5984" dbuser = "root" dbpass = "asm123" database = "faucet" userdatabase = "users" username = "testflowmgr@faucetsdn.org" password = "testflowmgr" role = "1" data = {"username": username, "password": password, "role": role} # createdb = requests.put("http://"+dbuser+":"+dbpass+"@"+dbhost+":"+dbport+"/"+userdatabase+"/") # print createdb view_data = "{_id: '_design/users',views: {'users': {'map': 'function(doc){ emit(doc._id, doc)}'}}}" createvw = requests.put( url="http://" + dbuser + ":" + dbpass + "@" + dbhost + ":" + dbport + "/" + userdatabase + "/_design/users/", data=view_data) requests.put(url="http://" + dbuser + ":" + dbpass + "@" + dbhost + ":" + dbport + "/" + userdatabase + "/_design/users/", data={'_id': '_design/users','views': {'users': {'map': 'function(doc){ emit(doc._id, doc)}'}}}) print createvw.status_code, createvw.content # createuser =requests.post(url="http://"+dbhost+":"+dbport+"/"+userdatabase+"/", json={"username": username, "password": password, "role": role}) # print createuser
Python
0.000001
@@ -8,16 +8,28 @@ requests +%0Aimport json %0A%0Adbhost @@ -277,18 +277,16 @@ : role%7D%0A -# createdb @@ -373,18 +373,16 @@ se+%22/%22)%0A -# print cr @@ -404,13 +404,14 @@ a = -%22 %7B +' _id +' : '_ @@ -424,21 +424,23 @@ /users', +' views +' : %7B'user @@ -494,449 +494,213 @@ '%7D%7D%7D -%22%0Acreatevw = requests.put(%0A url=%22http://%22 + dbuser + %22:%22 + dbpass + %22@%22 + dbhost + %22:%22 + dbport + %22/%22 + userdatabase + %22/_design/users/%22,%0A data=view_data)%0Arequests.put(url=%22http://%22 + dbuser + %22:%22 + dbpass + %22@%22 + dbhost + %22:%22 + dbport + %22/%22 + userdatabase + %22/_design/users/%22,%0A data=%7B'_id': '_design/users','views': %7B'users': %7B'map': 'function(doc)%7B emit(doc._id, doc)%7D'%7D%7D%7D)%0Aprint createvw.status_code, createvw.content%0A# +%0Aview_data = json.dumps(view_data)%0Acreatevw = requests.put(url=%22http://%22 + dbuser + %22:%22 + dbpass + %22@%22 + dbhost + %22:%22 + dbport + %22/%22 + userdatabase + %22/_design/users/%22,%0A data=view_data)%0A%0Aprint createvw%0A crea @@ -844,10 +844,8 @@ e%7D)%0A -# prin
f6a5ad0d2107b60899f5abaf2dbce65beae34a59
Drop remaining "OrderedDict" usages.
utilities/export_todo.py
utilities/export_todo.py
#!/usr/bin/env python """ Export TODOs ============ """ from __future__ import annotations import codecs import os from collections import OrderedDict __copyright__ = "Copyright 2013 Colour Developers" __license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "colour-developers@colour-science.org" __status__ = "Production" __all__ = [ "TODO_FILE_TEMPLATE", "extract_todo_items", "export_todo_items", ] TODO_FILE_TEMPLATE = """ Colour - TODO ============= TODO ---- {0} About ----- | **Colour** by Colour Developers | Copyright 2013 Colour Developers – \ `colour-developers@colour-science.org <colour-developers@colour-science.org>`__ | This software is released under terms of New BSD License: \ https://opensource.org/licenses/BSD-3-Clause | `https://github.com/colour-science/colour \ <https://github.com/colour-science/colour>`__ """[ 1: ] def extract_todo_items(root_directory: str) -> OrderedDict: """ Extract the TODO items from given directory. Parameters ---------- root_directory Directory to extract the TODO items from. Returns ------- :class:`collections.OrderedDict` TODO items. """ todo_items = OrderedDict() for root, dirnames, filenames in os.walk(root_directory): for filename in filenames: if not filename.endswith(".py"): continue filename = os.path.join(root, filename) with codecs.open(filename, encoding="utf8") as file_handle: content = file_handle.readlines() in_todo = False line_number = -1 todo_item = [] for i, line in enumerate(content): line = line.strip() if line.startswith("# TODO:"): in_todo = True line_number = i todo_item.append(line) continue if in_todo and line.startswith("#"): todo_item.append(line.replace("#", "").strip()) elif len(todo_item): key = filename.replace("../", "") if not todo_items.get(key): todo_items[key] = [] todo_items[key].append((line_number, " ".join(todo_item))) in_todo = False line_number todo_item = [] return todo_items def export_todo_items(todo_items: OrderedDict, file_path: str): """ Export TODO items to given file. Parameters ---------- todo_items TODO items. file_path File to write the TODO items to. """ todo_rst = [] for module, todo_items in todo_items.items(): todo_rst.append(f"- {module}\n") for line_numer, todo_item in todo_items: todo_rst.append(f" - Line {line_numer} : {todo_item}") todo_rst.append("\n") with codecs.open(file_path, "w", encoding="utf8") as todo_file: todo_file.write(TODO_FILE_TEMPLATE.format("\n".join(todo_rst[:-1]))) if __name__ == "__main__": export_todo_items( extract_todo_items(os.path.join("..", "colour")), os.path.join("..", "TODO.rst"), )
Python
0
@@ -113,44 +113,8 @@ t os -%0Afrom collections import OrderedDict %0A%0A__ @@ -946,24 +946,17 @@ str) -%3E -OrderedD +d ict:%0A @@ -1150,28 +1150,9 @@ ss:%60 -collections.OrderedD +d ict%60 @@ -1202,21 +1202,10 @@ s = -OrderedDict() +%7B%7D %0A @@ -2453,16 +2453,9 @@ ms: -OrderedD +d ict,
937da42ae66a4f88f5fa4493c804df415559680c
fix in _adjustAttributeFields(): protect from cases when attribute list is empty
core/src/main/python/stratuslab/Monitor.py
core/src/main/python/stratuslab/Monitor.py
# # Created as part of the StratusLab project (http://stratuslab.eu), # co-funded by the European Commission under the Grant Agreement # INFSO-RI-261552." # # Copyright (c) 2010, SixSq Sarl # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import os from stratuslab.CloudConnectorFactory import CloudConnectorFactory from stratuslab.CloudInfo import CloudInfo from stratuslab.Configurable import Configurable from stratuslab.Authn import AuthnFactory import Util try: from lxml import etree except ImportError: try: # Python 2.5 import xml.etree.cElementTree as etree except ImportError: try: # Python 2.5 import xml.etree.ElementTree as etree except ImportError: try: # normal cElementTree install import cElementTree as etree except ImportError: try: # normal ElementTree install import elementtree.ElementTree as etree except ImportError: raise Exception("Failed to import ElementTree from any known place") class Monitor(Configurable): def __init__(self, configHolder): super(Monitor, self).__init__(configHolder) self._setCloud() self.hostInfoDetailAttributes = (['id',4], ['name',16], ['im_mad',8], ['vm_mad',8], ['tm_mad',8]) self.hostInfoListAttributes = (['id',4], ['name',16]) self.vmInfoDetailAttributes = (['id',4], ['state_summary', 16], ['cpu', 10], ['memory', 10], ['ip', 16]) self.vmInfoListAttributes = (['id',4], ['state_summary', 16], ['cpu', 10], ['memory', 10], ['ip', 16]) self.labelDecorator = {'state_summary': 'state'} def _setCloud(self): credentials = AuthnFactory.getCredentials(self) self.cloud = CloudConnectorFactory.getCloud(credentials) endpointEnv = 'STRATUSLAB_ENDPOINT' if endpointEnv in os.environ: self.cloud.setEndpoint(os.environ[endpointEnv]) elif 'frontendIp' in self.__dict__ and 'proxyPort' in self.__dict__: self.cloud.setEndpointFromParts(self.frontendIp, self.proxyPort) else: self.cloud.setEndpoint(self.endpoint) def nodeDetail(self, nodeIds): infoList = [] for id in nodeIds: infoList.append(self._nodeDetail(id)) return infoList def _nodeDetail(self, id): res = self.cloud.getHostInfo(int(id)) host = etree.fromstring(res) info = CloudInfo() info.populate(host) return info def vmDetail(self, ids): infoList = [] for id in ids: infoList.append(self._vmDetail(id)) return infoList def _vmDetail(self, id): res = self.cloud.getVmInfo(int(id)) vm = etree.fromstring(res) info = CloudInfo() info.populate(vm) return info def _printList(self, infoList): for info in infoList: self._printInfo(info, self.hostInfoListAttributes) def listNodes(self): nodes = self.cloud.listHosts() return self._iterate(etree.fromstring(nodes)) def listVms(self, showVmsFromAllUsers=False): vms = self.cloud.listVms(showVmsFromAllUsers) return self._iterate(etree.fromstring(vms)) def _iterate(self, list): infoList = [] for item in list: info = CloudInfo() info.populate(item) infoList.append(info) return infoList def printList(self, list): self._adjustHostAttributeFields(list) self._printInfoHeader(self.hostInfoListAttributes) for item in list: self._printInfo(item, self.hostInfoDetailAttributes) def printDetails(self, list): self._adjustHostAttributeFields(list) self._printInfoHeader(self.hostInfoDetailAttributes) for item in list: self._printInfo(item, self.hostInfoDetailAttributes) def printVmList(self, list): self._adjustVmAttributeFields(list) self._printInfoHeader(self.vmInfoListAttributes) for item in list: self._printInfo(item, self.vmInfoListAttributes) def printVmDetails(self, list): self._adjustVmAttributeFields(list) self._printInfoHeader(self.vmInfoDetailAttributes) for item in list: self._printInfo(item, self.vmInfoDetailAttributes) def _printInfoHeader(self, headerAttributes): Util.printEmphasisStart() try: for attrib in headerAttributes: label = self._decorateLabel(attrib[0]) sys.stdout.write(label.ljust(int(attrib[1]))) finally: Util.printEmphasisStop() sys.stdout.write('\n') def _decorateLabel(self, label): return self.labelDecorator.get(label,label) def _printInfo(self, info, headerAttributes): for attrib in headerAttributes: sys.stdout.write(getattr(info, attrib[0]).ljust(int(attrib[1]))) sys.stdout.write('\n') def _adjustVmAttributeFields(self, _list): attrList = ('vmInfoDetailAttributes', 'vmInfoListAttributes') self._adjustAttributeFields(_list, attrList) def _adjustHostAttributeFields(self, _list): attrList = ('hostInfoDetailAttributes','hostInfoDetailAttributes') self._adjustAttributeFields(_list, attrList) def _adjustAttributeFields(self, _list, attrList): for attr in attrList: for i,attrVal in enumerate(getattr(self, attr)): lenMax = max(map(lambda x: len(getattr(x, attrVal[0])), _list)) if lenMax >= getattr(self, attr)[i][1]: getattr(self, attr)[i][1] = lenMax + 1
Python
0.000001
@@ -5967,24 +5967,46 @@ attrList):%0A + if _list:%0A for @@ -6019,24 +6019,28 @@ n attrList:%0A + @@ -6104,16 +6104,20 @@ + + lenMax = @@ -6188,16 +6188,20 @@ + if lenMa @@ -6232,16 +6232,20 @@ %5Bi%5D%5B1%5D:%0A +
547ee3fb5db9ebb0bed0443d865ec76f44904b9e
Add url_shortener.views.render_preview function
url_shortener/views.py
url_shortener/views.py
# -*- coding: utf-8 -*- from flask import session, redirect, url_for, flash, render_template from . import app from .forms import ShortenedUrlForm from .models import ShortenedUrl, register @app.route('/', methods=['GET', 'POST']) def shorten_url(): '''Display form and handle request for url shortening If short url is successfully created or found for the given url, its alias property is saved in session, and the function redirects to its route. After redirection, the alias is used to query for newly created shortened url, and information about it is presented. If there are any errors for data entered by the user into the input tex field, they are displayed. :returns: a response generated by rendering the template, either directly or after redirection. ''' form = ShortenedUrlForm() if form.validate_on_submit(): shortened_url = ShortenedUrl.get_or_create(form.url.data) register(shortened_url) session['new_alias'] = str(shortened_url.alias) return redirect(url_for(shorten_url.__name__)) else: for error in form.url.errors: flash(error, 'error') try: new_shortened_url = ShortenedUrl.get_or_404(session['new_alias']) except KeyError: new_shortened_url = None return render_template( 'shorten_url.html', form=form, new_shortened_url=new_shortened_url ) @app.route('/<alias>') def redirect_for(alias): ''' Redirect to address assigned to given alias :param alias: a string value by which we search for an associated url. If it is not found, a 404 error occurs :returns: a redirect to target url of short url, if found. ''' shortened_url = ShortenedUrl.get_or_404(alias) return redirect(shortened_url.target) @app.route('/preview/<alias>') def preview(alias): ''' Show the preview for given alias The preview contains a short url and a target url associated with it. :param alias: a string value by which we search for an associated url. If it is not found, a 404 error occurs. :returns: a response generated from the preview template ''' shortened_url = ShortenedUrl.get_or_404(alias) return render_template( 'preview.html', short_url=shortened_url.short_url, target=shortened_url.target ) @app.errorhandler(404) def not_found(error): return render_template('not_found.html') @app.errorhandler(500) def server_error(error): return render_template('server_error.html')
Python
0.000004
@@ -1425,24 +1425,210 @@ url%0A )%0A%0A%0A +def render_preview(shortened_url, warning_message=None):%0A return render_template(%0A 'preview.html',%0A shortened_url=shortened_url,%0A warning=warning_message%0A )%0A%0A%0A @app.route('
561e7905de823e5c2ff2f4822d4051d9a7635dfc
Fix checking whether the heap contains the node when e.g. calling .pop().
idiokit/heap.py
idiokit/heap.py
class HeapError(Exception): pass class Heap(object): def __init__(self, iterable=()): self._heap = list() for value in iterable: self.push(value) def _get(self, node): if not self._heap: raise HeapError("empty heap") if node is None: node = self._heap[0] if self._heap[node._index] is not node: raise HeapError("node not in the heap") return node def push(self, value): node = _Node(len(self._heap), value) self._heap.append(node) _up(self._heap, node) return node def peek(self, node=None): return self._get(node)._value def pop(self, node=None): node = self._get(node) last = self._heap.pop() if last is not node: self._heap[node._index] = last last._index = node._index _down(self._heap, last) return node._value def __nonzero__(self): return not not self._heap class _Node(object): __slots__ = "_index", "_value" def __init__(self, index, value): self._index = index self._value = value def _swap(array, left, right): array[left._index] = right array[right._index] = left left._index, right._index = right._index, left._index return right, left def _up(array, node): while node._index > 0: parent = array[(node._index - 1) // 2] if parent._value <= node._value: break _swap(array, node, parent) def _down(array, node): length = len(array) while True: smallest = node left_index = 2 * node._index + 1 if left_index < length: left = array[left_index] if left._value < smallest._value: smallest = left right_index = left_index + 1 if right_index < length: right = array[right_index] if right._value < smallest._value: smallest = right if node is smallest: break _swap(array, node, smallest) if __name__ == "__main__": import unittest class HeapTests(unittest.TestCase): def test_pop(self): h = Heap([0, 1, 2, 3]) assert h.pop() == 0 assert h.pop() == 1 assert h.pop() == 2 assert h.pop() == 3 self.assertRaises(HeapError, h.pop) unittest.main()
Python
0
@@ -345,16 +345,50 @@ if +len(self._heap) %3C= node._index or self._he
2ec06da007f2b2118e7275acf289b0bea2e5f53d
Fix usage of method strtime
gceapi/context.py
gceapi/context.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of gceapi.""" from oslo_context import context from oslo_log import log as logging from oslo_utils import timeutils from gceapi import exception from gceapi.i18n import _ LOG = logging.getLogger(__name__) class RequestContext(context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, user_name=None, project_name=None, service_catalog=None, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ super(RequestContext, self).__init__(auth_token=auth_token, user=user_id, tenant=project_id, is_admin=is_admin, request_id=request_id, overwrite=overwrite, roles=roles) if kwargs: LOG.warning(_('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, basestring): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp self.service_catalog = service_catalog self.user_name = user_name self.project_name = project_name self.operation = None self.operation_start_time = None self.operation_get_progress_method = None self.operation_item_id = None self.operation_data = {} def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def to_dict(self): values = super(RequestContext, self).to_dict() values.update({ 'user_id': self.user_id, 'project_id': self.project_id, 'read_deleted': self.read_deleted, 'remote_address': self.remote_address, 'timestamp': timeutils.strtime(self.timestamp), 'user_name': self.user_name, 'project_name': self.project_name, 'service_catalog': self.service_catalog }) return values @classmethod def from_dict(cls, values): return cls(**values) def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def require_context(ctxt): """Raise exception.NotAuthorized() if context is not a user or an admin context. """ if not ctxt.is_admin and not is_user_context(ctxt): raise exception.NotAuthorized()
Python
0.000154
@@ -4167,26 +4167,8 @@ p': -timeutils.strtime( self @@ -4177,16 +4177,27 @@ imestamp +.isoformat( ),%0A
b95fa25a5d3a00d8d1113f2d61defee69215374b
add tests for submitted graderoster
course_grader/test/dao/test_graderoster.py
course_grader/test/dao/test_graderoster.py
from django.test import TestCase from uw_pws.util import fdao_pws_override from uw_sws.util import fdao_sws_override from course_grader.dao.person import PWS from course_grader.dao.section import get_section_by_label from course_grader.dao.graderoster import graderoster_for_section from course_grader.exceptions import ( GradingNotPermitted, ReceiptNotFound, GradingPeriodNotOpen) @fdao_sws_override @fdao_pws_override class GraderosterDAOFunctionsTest(TestCase): def test_graderoster_for_section(self): section = get_section_by_label('2013,spring,TRAIN,101/A') user = PWS().get_person_by_regid('FBB38FE46A7C11D5A4AE0004AC494FFE') gr = graderoster_for_section(section, user, user) self.assertEqual(len(gr.items), 1000) def test_submitted_graderoster_for_section(self): section = get_section_by_label('2013,spring,TRAIN,101/B') user = PWS().get_person_by_regid('FBB38FE46A7C11D5A4AE0004AC494FFE') self.assertRaises( ReceiptNotFound, graderoster_for_section, section, user, user, submitted_graderosters_only=True) def test_graderoster_not_permitted(self): section = get_section_by_label('2013,spring,TRAIN,100/AA') user = PWS().get_person_by_regid('9136CCB8F66711D5BE060004AC494FFE') self.assertRaises( GradingNotPermitted, graderoster_for_section, section, user, user)
Python
0
@@ -26,16 +26,50 @@ estCase%0A +from django.utils import timezone%0A from uw_ @@ -310,16 +310,70 @@ section%0A +from course_grader.models import SubmittedGradeRoster%0A from cou @@ -1055,146 +1055,1767 @@ -self.assertRaises(%0A ReceiptNotFound, graderoster_for_section, section, user, user,%0A submitted_graderosters_only=True +gr = graderoster_for_section(section, user, user)%0A self.assertEqual(len(gr.items), 1)%0A self.assertFalse(hasattr(gr, 'submission_id'))%0A%0A model = SubmittedGradeRoster(%0A section_id='2013,spring,TRAIN,101/B',%0A instructor_id='FBB38FE46A7C11D5A4AE0004AC494FFE',%0A term_id='2013,spring',%0A submitted_date=timezone.now(),%0A submitted_by='FBB38FE46A7C11D5A4AE0004AC494FFE',%0A accepted_date=timezone.now(),%0A status_code=200,%0A document=gr.xhtml()%0A )%0A model.save()%0A%0A gr = graderoster_for_section(section, user, user)%0A self.assertEqual(len(gr.items), 1)%0A self.assertFalse(hasattr(gr, 'submission_id'))%0A%0A def test_submitted_only_graderoster_for_section(self):%0A section = get_section_by_label('2013,spring,TRAIN,101/B')%0A user = PWS().get_person_by_regid('FBB38FE46A7C11D5A4AE0004AC494FFE')%0A%0A self.assertRaises(%0A ReceiptNotFound, graderoster_for_section, section, user, user,%0A submitted_graderosters_only=True)%0A%0A gr = graderoster_for_section(section, user, user)%0A%0A model = SubmittedGradeRoster(%0A section_id='2013,spring,TRAIN,101/B',%0A instructor_id='FBB38FE46A7C11D5A4AE0004AC494FFE',%0A term_id='2013,spring',%0A submitted_date=timezone.now(),%0A submitted_by='FBB38FE46A7C11D5A4AE0004AC494FFE',%0A accepted_date=timezone.now(),%0A status_code=200,%0A document=gr.xhtml()%0A )%0A model.save()%0A%0A gr = graderoster_for_section(%0A section, user, user, submitted_graderosters_only=True)%0A self.assertEqual(len(gr.items), 1)%0A self.assertEqual(gr.submission_id, 'B' )%0A%0A
0baa58ed1564934158d43b5f8556913014535a60
clean up: get.py imported modules
gdcmdtools/get.py
gdcmdtools/get.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from apiclient import errors from base import GDBase import logging logger = logging.getLogger() logger.setLevel(logging.ERROR) from gdcmdtools.auth import GDAuth from gdcmdtools.auth import SCOPE import requests from requests_oauthlib import OAuth2Session import re import os import json import io import sys import pprint export_format = { "application/vnd.google-apps.spreadsheet":["pdf", "ods", "xlsx"], "application/vnd.google-apps.document":["pdf", "docx", "rtf", "odt", "html", "txt"], "application/vnd.google-apps.presentation":["pdf", "pptx", "txt"], "application/vnd.google-apps.drawing":["png", "pdf", "jpeg", "svg"], "application/vnd.google-apps.script+json":["json"], } """ application/vnd.google-apps.script+json { "files": [ { "id": "bbda34aa-c700-48d9-88bd-ad2573a0620a", "name": "Code", "source": "FILE CONTENT", "type": "server_js" } ] } """ class GDGet: def __init__(self, file_id, format, save_as): # base auth = GDAuth() self.credentials = auth.get_credentials() if self.credentials == None: raise Exception("Failed to retrieve credentials") self.http = auth.get_authorized_http() base = GDBase() self.service = base.get_drive_service(self.http) self.file_id = file_id self.format = format if save_as == None: self.save_as = None else: self.save_as = os.path.abspath(save_as) def parse_gas_json(self, file_content, save_as): map_type_ext = {"server_js":"js", "html":"html"} try: jsons = json.loads(file_content) new_json = {"files":[]} for j in jsons["files"]: file_id = j["id"] file_name = j["name"] file_source = j["source"] file_type = j["type"] if file_type in map_type_ext.keys(): file_ext = map_type_ext[file_type] else: file_ext = file_type file_fullname = "%s.%s" % (file_name, file_ext) with open(file_fullname, 'wb+') as f: f.write(file_source) j.pop("source") new_json["files"].append(j) # save the project id, we need the id to upload project new_json["id"] = self.file_id with open(save_as, 'wb+') as f: f.write(json.dumps(new_json, indent=4)) except Exception, e: logger.error(e) raise def run(self): try: service_response = self.get() self.file_size = service_response.get('fileSize', None) result_title_format = self.get_title_format(service_response) logger.debug(result_title_format) title, return_format = result_title_format if self.format != "raw": title = title +"." +self.format if self.format not in return_format.keys(): raise Exception("The specified format \'%s\' is not allowed, available format are \"%s\", please correct option: --export_format" % (self.format, ', '.join(return_format.keys()))) if self.save_as == None: self.save_as = title if self.format == "json": file_content = self.get_by_format(return_format[self.format]) self.parse_gas_json(file_content, self.save_as) else: # FIXME: handle return value self.get_by_format(self.save_as, return_format[self.format]) except Exception, e: logger.error(e) raise return return_format def get(self): try: response = self.service.files().get(fileId=self.file_id).execute() logger.debug(pprint.pformat(response)) return response except errors.HttpError, error: logger.error('An error occurred: %s' % error) return None def get_title_format(self, service_response): export_links = service_response.get('exportLinks', None) return_format = {} title = service_response.get('title',None) logger.debug(title) logger.debug(export_links) if export_links == None: download_link = service_response.get(u'downloadUrl', None) return_format["raw"] = download_link else: export_link_values = export_links.values() if len(export_link_values) > 0 : for link in export_link_values: m = re.match(r'^.*[Ff]ormat=(.*)$',link) return_format[m.group(1)] = link return title, return_format def get_by_format(self, save_as, url): fd = io.FileIO(save_as, mode='wb') creds = self.credentials # refresh token? token = {"access_token":creds.access_token, "token_type":"Bearer"} session = OAuth2Session(creds.client_id, scope=SCOPE, token=token) #response = session.get(url, stream=True) with open(save_as, 'wb') as f: response = session.get(url, stream=True) total_length = self.file_size if total_length is None: # no content length header f.write(response.content) else: dl = 0 total_length = int(total_length) for data in response.iter_content(chunk_size=1024*1024): dl += len(data) f.write(data) done = int(50 * dl / total_length) sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50-done)) ) sys.stdout.flush() return
Python
0
@@ -43,269 +43,8 @@ -*-%0A -%0Afrom apiclient import errors%0Afrom base import GDBase%0Aimport logging%0Alogger = logging.getLogger()%0Alogger.setLevel(logging.ERROR)%0A%0Afrom gdcmdtools.auth import GDAuth%0Afrom gdcmdtools.auth import SCOPE%0A%0Aimport requests%0Afrom requests_oauthlib import OAuth2Session%0A%0A impo @@ -92,17 +92,16 @@ ort sys%0A -%0A import p @@ -105,16 +105,275 @@ t pprint +%0Aimport logging%0Alogger = logging.getLogger()%0Alogger.setLevel(logging.DEBUG)%0A%0Aimport requests%0Afrom requests_oauthlib import OAuth2Session%0Afrom apiclient import errors%0A%0Afrom gdcmdtools.auth import GDAuth%0Afrom gdcmdtools.auth import SCOPE%0Afrom base import GDBase %0A%0Aexport @@ -751,277 +751,15 @@ %22%5D,%0A + %7D%0A%0A -%22%22%22 application/vnd.google-apps.script+json%0A%7B %0A %22files%22: %5B %0A %7B %0A %22id%22: %22bbda34aa-c700-48d9-88bd-ad2573a0620a%22, %0A %22name%22: %22Code%22, %0A %22source%22: %22FILE CONTENT%22,%0A %22type%22: %22server_js%22%0A %7D%0A %5D%0A%7D%0A%0A%0A%22%22%22%0A%0A clas
69df5ba1baf92238e98065d60dac73d2214787d3
Fix usage of biobank_identifier parameter
import-imput.py
import-imput.py
from pymongo import MongoClient import csv import re import argparse def categorize_float(value): if value <= 0.001: return "[0,0.001]" elif value <= 0.01: return "(0.001,0.01]" elif value <= 0.1: return "(0.01,0.1]" elif value <= 0.2: return "(0.1,0.2]" elif value <= 0.3: return "(0.2,0.3]" elif value <= 0.4: return "(0.3,0.4]" elif value <= 0.5: return "(0.4,0.5]" else: raise ValueError parser = argparse.ArgumentParser() parser.add_argument("filename", help="Filename to tsv file") parser.add_argument("biobank_identifier", help="Identifier to distinguish data in database") args = parser.parse_args() mongo_client = MongoClient() db = mongo_client.gwasc all_positions = db.gwas.find() positions = {} for position in all_positions: positions["chr"+str(position["CHR_ID"])+":"+str(position["CHR_POS"])] = position print(len(positions)) counter = 0 with open(args.filename) as tsvfile: data = csv.reader(tsvfile, delimiter=' ') for line in data: chr_id = re.sub("^chr", "", line[0]) try: # some values are at X chromosome chr_id = int(chr_id) chr_pos = int(line[1]) _id = line[0] + ":" + line[1] if _id in positions: counter += 1 _imputed = { "REF": line[3], "ALT": line[4], # "ALT_Frq": categorize_float(float(line[5])), "MAF": categorize_float(float(line[6])), "AvgCall": float(line[7]), "Rsq": float(line[8]), "Genotyped": line[9] == "Genotyped", } if line[9] == "Genotyped": print("Genotyped", _id) # if line[10] != "-": # _imputed["LooRsq"] = float(line[10]) # if line[11] != "-": # _imputed["EmpR"] = float(line[11]) # if line[12] != "-": # _imputed["EmpRsq"] = float(line[12]) # if line[13] != "-": # _imputed["Dose0"] = float(line[13]) # if line[14] != "-": # _imputed["Dose1"] = float(line[14]) db.gwas.update_many({"CHR_ID": chr_id, "CHR_POS": chr_pos}, {"$set": {"imputed": {biobank_identifier: _imputed}}}) except: pass print(counter)
Python
0.00005
@@ -2446,16 +2446,21 @@ uted%22: %7B +args. biobank_
d166563f648d409f7039ce2df1149a8cb15b13ab
add sub-package: plot
gdpy3/__init__.py
gdpy3/__init__.py
__name__ = "gdpy3" __doc__ = "gdpy3: Gyrokinetic Toroidal Code Data Processing tools" __author__ = "shmilee" __version__ = "0.0.1" __status__ = "alpha" __license__ = "MIT" __email__ = "shmilee.zju@gmail.com" __all__ = ['convert', 'read']
Python
0.999514
@@ -222,16 +222,24 @@ onvert', + 'plot', 'read'%5D
f276c840f2981ec2951e07c7b847f82811db0745
Remove unnecessary None handling
openstack/database/v1/user.py
openstack/database/v1/user.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.database import database_service from openstack import resource class User(resource.Resource): id_attribute = 'name' resource_key = 'user' resources_key = 'users' base_path = '/instances/%(instance_id)s/users' service = database_service.DatabaseService() # capabilities allow_create = True allow_delete = True allow_list = True # path args instance_id = resource.prop('instance_id') # Properties databases = resource.prop('databases') name = resource.prop('name') _password = resource.prop('password') @property def password(self): try: val = self._password except AttributeError: val = None return val @password.setter def password(self, val): self._password = val @classmethod def create_by_id(cls, session, attrs, r_id=None, path_args=None): url = cls._get_url(path_args) # Create expects an array of users body = {'users': [attrs]} resp = session.post(url, service=cls.service, json=body).body return resp
Python
0.000062
@@ -1083,17 +1083,16 @@ e')%0A -_ password @@ -1125,246 +1125,8 @@ ')%0A%0A - @property%0A def password(self):%0A try:%0A val = self._password%0A except AttributeError:%0A val = None%0A return val%0A%0A @password.setter%0A def password(self, val):%0A self._password = val%0A%0A
d1596872f11f95e406a6a3a97222e499abf4f222
update plot_ts
examples/plot_ts.py
examples/plot_ts.py
""" ====================== Plotting a time series ====================== An example plot of `pyts.visualization.plot_ts` """ import numpy as np from scipy.stats import norm from pyts.visualization import plot_ts # Parameters n_samples = 100 n_features = 48 rng = np.random.RandomState(41) delta = 0.5 dt = 1 # Generate a toy dataset X = (norm.rvs(scale=delta**2 * dt, size=n_samples*n_features, random_state=rng) .reshape((n_samples, n_features))) X[:, 0] = 0 X = np.cumsum(X, axis=1) # Plot the first sample plot_ts(X[0])
Python
0.000001
@@ -71,55 +71,31 @@ ==%0A%0A -An example plot of %60pyts.visualization.plot_ts%60 +Plotting a time series. %0A%22%22%22 @@ -148,46 +148,39 @@ orm%0A -from pyts.visualization import plot_ts +import matplotlib.pyplot as plt %0A%0A# @@ -333,16 +333,21 @@ *2 * dt, +%0A size=n_ @@ -353,17 +353,19 @@ _samples -* + * n_featur @@ -367,16 +367,21 @@ eatures, +%0A random_ @@ -394,18 +394,8 @@ rng) -%0A .res @@ -491,18 +491,30 @@ ple%0A +plt. plot -_ts (X%5B0%5D)%0A +plt.show()%0A
c9762ec881fc041eafb0b8906242cefcdbc4bf7f
Add UsageRecord class
accloudtant/__main__.py
accloudtant/__main__.py
import csv def area(entry): if entry[" UsageType"].startswith("EUC1-"): return "EU (Frankfurt)" def is_data_transfer(entry): if "DataTransfer" in entry[" UsageType"] or "CloudFront" in entry[" UsageType"]: return True return False def get_areas(entries, resource_areas): areas = {} for entry in entries: area_name = area(entry) if area_name is None and entry[" Resource"] in resource_areas: area_name = resource_areas[entry[" Resource"]] if area_name not in areas: areas[area_name] = [] areas[area_name].append(entry) return areas def get_data_transfers(entries): return [entry for entry in entries if is_data_transfer(entry)] def get_concepts(entries, omit=lambda x: False): concepts = {} for entry in entries: if not omit(entry): if entry[" UsageType"] not in concepts: concepts[entry[" UsageType"]] = [] concepts[entry[" UsageType"]].append(entry) return concepts def get_total(entries): if entries[0][" UsageType"].endswith("ByteHrs"): totals = {} for entry in entries: if entry[" UsageValue"] not in totals: totals[entry[" UsageValue"]] = [] totals[entry[" UsageValue"]].append(entry) total = 0 for value, values in totals.items(): total += int(value) * len(values) / 24 return total / 1073741824 / len(entries) elif entries[0][" UsageType"].endswith("Bytes"): return sum([int(entry[" UsageValue"]) for entry in entries]) / 1073741824 return sum([int(entry[" UsageValue"]) for entry in entries]) def unit(concept): if concept.endswith("ByteHrs"): return "GB-Mo" elif concept.endswith("Bytes"): return "GB" return "Requests" if __name__ == "__main__": usage = [] resource_areas = {} with open("tests/fixtures/2021/03/S3.csv") as f: reader = csv.DictReader(f) for row in reader: usage.append(row) if area(row) is not None: resource_areas[row[" Resource"]] = area(row) print("Simple Storage Service") for area_name, entries in get_areas(usage, resource_areas).items(): print("\t", area_name) for concept, records in get_concepts(entries, omit=lambda x: is_data_transfer(x) or x[" UsageType"] == "StorageObjectCount").items(): total = get_total(records) print("\t\t", concept, "\t{:.3f}".format(total), unit(concept)) data_transfers = get_data_transfers(usage) if len(data_transfers) > 0: print("Data Transfer") for area_name, entries in get_areas(data_transfers, resource_areas).items(): print("\t", area_name) for concept, records in get_concepts(entries, omit=lambda x: not is_data_transfer(x)).items(): total = get_total(records) print("\t\t", concept, "\t{:.3f}".format(total), unit(concept))
Python
0
@@ -6,16 +6,165 @@ t csv%0A%0A%0A +class UsageRecord(object):%0A def __init__(self, data):%0A self._data = data%0A%0A def __getitem__(self, key):%0A return self._data%5Bkey%5D%0A%0A%0A def area @@ -2173,16 +2173,53 @@ reader:%0A + entry = UsageRecord(row)%0A @@ -2235,19 +2235,21 @@ .append( -row +entry )%0A @@ -2262,19 +2262,21 @@ if area( -row +entry ) is not @@ -2313,19 +2313,21 @@ e_areas%5B -row +entry %5B%22 Resou @@ -2340,19 +2340,21 @@ = area( -row +entry )%0A%0A p
78deea6602671917c5cc78de8ed20e6825179948
add PATCH support
coyote_framework/requests/requestdriver.py
coyote_framework/requests/requestdriver.py
""" Wrapper for python's "requests" library that has options to maintain session and keep a history of responses in a queue """ import requests from collections import deque class RequestDriver(object): GET = 'GET' POST = 'POST' PUT = 'PUT' DELETE = 'DELETE' session = None responses = deque([]) max_response_history = 100 def __init__(self, persist_session_between_requests=True, max_response_history=None, verify_certificates=False): self.persist_session_between_requests = persist_session_between_requests self.verify_certificates = verify_certificates if max_response_history is not None: if not max_response_history >= 0: raise ValueError('You must specify a positive integer as a max number of past requests to store') if self.persist_session_between_requests: self.session = requests.Session() else: self.session = requests def request(self, uri, method=GET, headers=None, cookies=None, params=None, data=None, post_files=None,**kwargs): """Makes a request using requests @param uri: The uri to send request @param method: Method to use to send request @param headers: Any headers to send with request @param cookies: Request cookies (in addition to session cookies) @param params: Request parameters @param data: Request data @param kwargs: other options to pass to underlying request @rtype: requests.Response @return: The response """ coyote_args = { 'headers': headers, 'cookies': cookies, 'params': params, 'files': post_files, 'data': data, 'verify': self.verify_certificates, } coyote_args.update(kwargs) if method == self.POST: response = self.session.post(uri, **coyote_args) elif method == self.PUT: response = self.session.put(uri, **coyote_args) elif method == self.DELETE: response = self.session.delete(uri, **coyote_args) else: # Default to GET response = self.session.get(uri, **coyote_args) self.responses.append(response) while len(self.responses) > self.max_response_history: self.responses.popleft() return response def get_last_response(self): """Convenience method for retrieving the last response""" try: return self.responses[-1] except IndexError: return None def wipe_session(self): """Sets the driver's session to a new request session @return: None """ self.session = requests.Session() def save_last_response_to_file(self, filename): """Saves the body of the last response to a file @param filename: Filename to save to @return: Returns False if there is an OS error, True if successful """ response = self.get_last_response() return self.save_response_to_file(response, filename) def save_response_to_file(self, response, filename): """Saves the body of the last response to a file @param filename: Filename to save to @return: Returns False if there is an OS error, True if successful """ try: last_response = self.get_last_response() with open(filename, 'w') as f: f.write(last_response.content) except OSError, e: return False return True
Python
0
@@ -250,16 +250,36 @@ = 'PUT'%0A + PATCH = 'PATCH'%0A DELE @@ -2037,32 +2037,130 @@ **coyote_args)%0A%0A + elif method == self.PATCH:%0A response = self.session.patch(uri, **coyote_args)%0A%0A elif met
28e64a576a25b7fb41997da8ecfb4472d9adee38
simplify main greenlet caching
greenhouse/compat.py
greenhouse/compat.py
import os import sys try: from greenlet import greenlet, GreenletExit except ImportError, error: try: from py.magic import greenlet GreenletExit = greenlet.GreenletExit except ImportError: # suggest standalone greenlet, not the old py.magic.greenlet raise error __all__ = ["main_greenlet", "GreenletExit"] # it's conceivable that we might not be in the main greenlet at import time, # so chase the parent tree until we get to it def _find_main(): glet = greenlet.getcurrent() while glet.parent: glet = glet.parent return glet main_greenlet = _find_main()
Python
0.000269
@@ -473,45 +473,21 @@ it%0A -def _find_main():%0A glet = +main_ greenlet .get @@ -482,17 +482,19 @@ greenlet -. + = getcurre @@ -502,47 +502,24 @@ t()%0A - while -glet.parent:%0A glet = g +main_green let. @@ -528,25 +528,14 @@ rent +: %0A -return glet%0A main @@ -550,17 +550,25 @@ t = -_find_main() +main_greenlet.parent %0A
ced03203126b9ad47a74d173627f4db8ed7c2e92
change names
model_flow/model_trainer.py
model_flow/model_trainer.py
import tensorflow as tf def average_gradients(grads_list, loss_list): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: grads_list: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ with tf.variable_scope('average_grads'): average_loss = tf.reduce_mean(loss_list) average_grads = [] for grad_and_vars in zip(*grads_list): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_loss, average_grads def var_getter(device): def custom_getter(getter, name, *args, **kwargs): with tf.device(device): return getter(name, *args, **kwargs) def single_grad(model, opt, batch_data, is_train, scope, reuse): custom_getter = var_getter('/cpu:0') with tf.name_scope(scope), tf.variable_scope('', custom_getter=custom_getter, reuse=reuse): with tf.variable_scope('network'): output = model.model_infer(batch_data, is_train) _ = model.model_loss(batch_data, output) loss = tf.add_n(tf.get_collection("losses", scope), name="total_loss") if is_train: grads = opt.compute_gradients(loss) else: grads = None return loss, grads def multi_grads(model, num_gpus, train_input=None, test_input=None): grads_list = list() loss_list = list() opt = model.model_optimizer() scope = model.scope grads = None loss = None test_loss = None is_train = False if train_input is not None: is_train = True if num_gpus >= 1: for i in xrange(num_gpus): with tf.device('/gpu:%d' % i): loss, grads = single_grad(model, opt, train_input[i], is_train, "%s_train_%i"%(scope,i), reuse=(i>0)) loss_list.append(loss) grads_list.append(grads) else: with tf.device('/cpu'): loss, grads = single_grad(model, opt, train_input, is_train, scope, reuse=False) grads_list.append(grads) loss_list.append(loss) loss, grads = average_gradients(grads_list, loss_list) if test_input is not None: reuse = is_train if num_gpus >= 1: with tf.device('/gpu:%d' % 0): test_loss, test_grads = single_grad(model, opt, test_input[i], False, "%s_test"%(scope), reuse) else: with tf.device('/cpu:0'): test_loss, _ = single_grad(model, opt, test_input, False, "%s_test"%(scope), reuse) return loss, grads, test_loss def model_trainer(model, num_gpus, train_input=None, test_input=None): """ Function for training the model. """ with tf.device('/cpu:0'): train_op = None loss = None test_loss = None loss, grads, test_loss = multi_grads(model, num_gpus, train_input, test_input) if grads is not None: opt = model.model_optimizer() with tf.variable_scope('trainer'): global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) train_op = tf.group(apply_gradient_op, variables_averages_op) train_op = apply_gradient_op return train_op, loss, test_loss
Python
0.000075
@@ -2173,17 +2173,26 @@ -_ +model_loss = model @@ -2219,24 +2219,132 @@ ta, output)%0A + tf.add_to_collection(%22losses%22, model_loss)%0A%0A # The losses will also store weight decay loss.%0A loss
da8aa20292e0df938f0e49884b21f08034b9f441
fix typo
google_auth/authentication.py
google_auth/authentication.py
# -*- coding: utf-8 -*- import requests import httplib2 from datetime import datetime, timedelta from django.conf import settings from django.contrib.auth import get_user_model from django.utils.timezone import make_aware from rest_framework import exceptions, authentication, HTTP_HEADER_ENCODING from rest_framework.authentication import BaseAuthentication, get_authorization_header from .models import get_users_by_email, get_google_auth_user_by_app_token token_verification_url = 'https://www.googleapis.com/oauth2/v1/tokeninfo?access_token={}' refresh_token_url = 'https://www.googleapis.com/oauth2/v4/token' client_id = getattr(settings, 'GOOGLE_AUTH_CLIENT_ID', '') client_secret = getattr(settings, 'GOOGLE_AUTH_CLIENT_SECRET', '') class GoogleAuthBackend(object): """ Authenticate all requests against google, so that when the access token is revoked, the user will lose access immediately. Pass the access token in the header of the request, like: Authentication: token TOKEN """ def authenticate(self, request, token=None): r = requests.get(token_verification_url.format(token)) if r.status_code != 200: return None acc_info = r.json() email = acc_info.get('email','') user = get_users_by_email(email) return user def get_user(self, user_id): try: return get_user_model.objects.get(pk=user_id) except User.DoesNotExist: return None class GoogleAuthAuthentication(BaseAuthentication): """ Returns two-tuple of (user, token) if authentication succeeds, or None otherwise. """ def authenticate(self, request): auth_header = get_authorization_header(request).decode(HTTP_HEADER_ENCODING) auth = auth_header.split() if not auth or auth[0].lower() != 'token': return None if len(auth)!=2: msg = 'Invalid authorization header.' raise exceptions.AuthenticationFailed(msg) app_token = auth[1] google_auth_user = get_google_auth_user_by_app_token(app_token) if not google_auth_user: return None if google_auth_user.token_expiry < make_aware(datetime.now()): google_auth_use = refresh_access_token(google_auth_user) token = google_auth_user.access_token r = requests.get(token_verification_url.format(token)) if r.status_code != 200: return None acc_info = r.json() email = acc_info.get('email','') user = get_users_by_email(email) return user, token def refresh_access_token(google_auth_user): r = requests.post(refresh_token_url, data = {'client_id':client_id, 'client_secret':client_secret, 'refresh_token':google_auth_user.refresh_token, 'grant_type':'refresh_token'}) if r.status_code != 200: raise Exception('user google auth token is expired and unnable to be refreshed') res = r.json() google_auth_user.access_token = res.get('access_token','') google_auth_user.token_expiry = make_aware( datetime.now() + timedelta(seconds= res.get('expires_in',0) - 10 )) google_auth_user.save() return google_auth_user
Python
0.999991
@@ -2247,16 +2247,17 @@ auth_use +r = refre
512f8a09798eb79528ea6f8a59ec752f2c4ba879
missing output value
lib/svtplay_dl/service/oppetarkiv.py
lib/svtplay_dl/service/oppetarkiv.py
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import re import copy import hashlib from urllib.parse import urlparse, parse_qs from svtplay_dl.service import Service, OpenGraphThumbMixin from svtplay_dl.error import ServiceError from svtplay_dl.log import log from svtplay_dl.fetcher.hds import hdsparse from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.dash import dashparse from svtplay_dl.utils.text import ensure_unicode, decode_html_entities from svtplay_dl.subtitle import subtitle class OppetArkiv(Service, OpenGraphThumbMixin): supported_domains = ['oppetarkiv.se'] def get(self): vid = self.find_video_id() if vid is None: yield ServiceError("Cant find video id for this video") return url = "http://api.svt.se/videoplayer-api/video/{0}".format(vid) data = self.http.request("get", url) if data.status_code == 404: yield ServiceError("Can't get the json file for {0}".format(url)) return data = data.json() if "live" in data: self.config.set("live", data["live"]) self.outputfilename(data) if "subtitleReferences" in data: for i in data["subtitleReferences"]: if i["format"] == "websrt": yield subtitle(copy.copy(self.config), "wrst", i["url"]) if len(data["videoReferences"]) == 0: yield ServiceError("Media doesn't have any associated videos (yet?)") return for i in data["videoReferences"]: parse = urlparse(i["url"]) query = parse_qs(parse.query) if i["format"] == "hls" or i["format"] == "ios": streams = hlsparse(self.config, self.http.request("get", i["url"]), i["url"]) if streams: for n in list(streams.keys()): yield streams[n] if "alt" in query and len(query["alt"]) > 0: alt = self.http.get(query["alt"][0]) if alt: streams = hlsparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output) if streams: for n in list(streams.keys()): yield streams[n] if i["format"] == "hds" or i["format"] == "flash": match = re.search(r"\/se\/secure\/", i["url"]) if not match: streams = hdsparse(self.config, self.http.request("get", i["url"], params={"hdcore": "3.7.0"}), i["url"], output=self.output) if streams: for n in list(streams.keys()): yield streams[n] if "alt" in query and len(query["alt"]) > 0: alt = self.http.get(query["alt"][0]) if alt: streams = hdsparse(self.config, self.http.request("get", alt.request.url, params={"hdcore": "3.7.0"}), alt.request.url, output=self.output) if streams: for n in list(streams.keys()): yield streams[n] if i["format"] == "dash264" or i["format"] == "dashhbbtv": streams = dashparse(self.config, self.http.request("get", i["url"]), i["url"]) if streams: for n in list(streams.keys()): yield streams[n] if "alt" in query and len(query["alt"]) > 0: alt = self.http.get(query["alt"][0]) if alt: streams = dashparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output) if streams: for n in list(streams.keys()): yield streams[n] def find_video_id(self): match = re.search('data-video-id="([^"]+)"', self.get_urldata()) if match: return match.group(1) return None def find_all_episodes(self, config): page = 1 data = self.get_urldata() match = re.search(r'"/etikett/titel/([^"/]+)', data) if match is None: match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url) if match is None: log.error("Couldn't find title") return program = match.group(1) episodes = [] n = 0 if self.config.get("all_last") > 0: sort = "tid_fallande" else: sort = "tid_stigande" while True: url = "http://www.oppetarkiv.se/etikett/titel/{0}/?sida={1}&sort={2}&embed=true".format(program, page, sort) data = self.http.request("get", url) if data.status_code == 404: break data = data.text regex = re.compile(r'href="(/video/[^"]+)"') for match in regex.finditer(data): if n == self.config.get("all_last"): break episodes.append("http://www.oppetarkiv.se{0}".format(match.group(1))) n += 1 page += 1 return episodes def outputfilename(self, data): id = hashlib.sha256(data["programVersionId"].encode("utf-8")).hexdigest()[:7] self.output["id"] = id datatitle = re.search('data-title="([^"]+)"', self.get_urldata()) if not datatitle: return None datat = decode_html_entities(datatitle.group(1)) self.output["title"] = self.name(datat) self.seasoninfo(datat) def seasoninfo(self, data): match = re.search("S.song (\d+) - Avsnitt (\d+)", data) if match: self.output["season"] = int(match.group(1)) self.output["episode"] = int(match.group(2)) else: match = re.search("Avsnitt (\d+)", data) if match: self.output["episode"] = int(match.group(1)) def name(selfs, data): if data.find(" - S.song") > 0: title = data[:data.find(" - S.song")] else: if data.find(" - Avsnitt") > 0: title = data[:data.find(" - Avsnitt")] else: title = data return title
Python
0.999983
@@ -1780,102 +1780,8 @@ s%22:%0A - streams = hlsparse(self.config, self.http.request(%22get%22, i%5B%22url%22%5D), i%5B%22url%22%5D)%0A @@ -1888,32 +1888,146 @@ ield streams%5Bn%5D%0A + streams = hlsparse(self.config, self.http.request(%22get%22, i%5B%22url%22%5D), i%5B%22url%22%5D, output=self.output)%0A
38989bf6e449bf2ada1ac4729564d9feacbc7b90
use parseargs for cli processing
activity/activitylog.py
activity/activitylog.py
from peewee import * from pprint import pprint from copy import copy import sys, getopt, os, inspect db = SqliteDatabase('activitylog.db') ################ # Model classes ################ class BaseModel(Model): is_abstract = BooleanField(default=False) class Meta: database = db class NamedModel(BaseModel): name = CharField(primary_key=True) class Person(NamedModel): first = CharField() last = CharField() born = DateField() class ActivityType(NamedModel): parent = ForeignKeyField('self', null=True, related_name='children') class MeasurementType(NamedModel): parent = ForeignKeyField('self', null=True, related_name='children') class Location(NamedModel): address = CharField() class Entry(BaseModel): person = ForeignKeyField(Person) location = ForeignKeyField(Location) props = CharField(null=True) class Activity(Entry): start = DateTimeField() end = DateTimeField() activityType = ForeignKeyField(ActivityType) distance = IntegerField(default=0) class Measurement(Entry): time = DateTimeField() measurementType = ForeignKeyField(MeasurementType) value = DecimalField() ############ # Functions ############ def main(argv): try: opts, args = getopt.getopt(argv, "", ["list=", "list-all"]) if not opts: usage() except getopt.GetoptError: usage() for opt, arg in opts: if opt == '--list': lsModel(arg) elif opt == '--list-all': for table in db.get_tables(): print table.title() else: usage() def usage(): script = os.path.basename(__file__) print "%s --ls <modelClass>" % script sys.exit(2) def lsModel(clazzStr): clazz = globals()[clazzStr] for item in clazz.select(): if item.is_abstract == False: attrs = copy(vars(item)['_data']) del(attrs['is_abstract']) pprint(attrs) if __name__ == '__main__': main(sys.argv[1:])
Python
0
@@ -77,16 +77,8 @@ sys, - getopt, os, @@ -85,16 +85,48 @@ inspect +%0Aimport optparse%0Aimport argparse %0A%0Adb = S @@ -1256,80 +1256,100 @@ -try:%0A opts, args = getopt.getopt(argv, %22%22, %5B%22list=%22, %22 +args = parse_args();%0A%0A if args.list:%0A lsModel(args.list)%0A elif (args. list -- +_ all -%22%5D ) +: %0A @@ -1357,279 +1357,477 @@ -if not opts:%0A usage()%0A except getopt.GetoptError:%0A usage( +for table in db.get_tables():%0A print table.title()%0A else:%0A script = os.path.basename(__file__ )%0A -%0A -for opt, arg in opts:%0A if opt == '--list':%0A lsModel(arg)%0A elif opt == '--list-all':%0A for table in db.get_tables():%0A print table.title() + print %22%25s: you must specify an option%22 %25 script%0A exit(2)%0A%0Adef parse_args():%0A parser = argparse.ArgumentParser()%0A parser.add_argument(%22--list%22, metavar='%3Cmodel-class%3E', dest='list',%0A help='List model objects for the specified class')%0A parser.add_argument('--list-all', dest='list_all', action='store_true', %0A @@ -1827,30 +1827,24 @@ ', %0A -else:%0A @@ -1847,126 +1847,73 @@ -usage()%0A%0Adef usage():%0A script = os.path.basename(__file__)%0A print %22%25s --ls %3C + help='List all model -C + c lass -%3E%22 %25 script%0A sys.exit(2 +es')%0A%0A return parser.parse_args( )%0A%0Ad
4e7c07d1cf1c00a0ff166fb8f314b684bbefc339
Add more customizations
graphics/wheel.py
graphics/wheel.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Wheel plot that shows continuous data in radial axes. """ import numpy as np import sys from collections import OrderedDict from itertools import groupby from jcvi.graphics.base import plt, savefig, normalize_axes, set2 from jcvi.apps.base import OptionParser, ActionDispatcher R = 30 def main(): actions = ( ('wheel', 'wheel plot that shows continuous data in radial axes'), ) p = ActionDispatcher(actions) p.dispatch(globals()) def closed_plot(ax, theta, r, *args, **kwargs): theta = list(theta) + [theta[0]] r = list(r) + [r[0]] ax.plot(theta, r, *args, **kwargs) def sector(ax, theta_min, theta_max, theta_pad, r, *args, **kwargs): theta = np.linspace(theta_min - theta_pad, theta_max + theta_pad, num=100) r = len(theta) * [r] theta = list(theta) + [0] r = list(r) + [-R] closed_plot(ax, theta, r, *args, **kwargs) def parse_data(datafile): data = {} fp = open(datafile) for row in fp: label, score, percentile = row.split(",") label = label.strip() score = float(score.strip()) data[label] = score return data def parse_groups(groupsfile): groups = OrderedDict() fp = open(groupsfile) for row in fp: group, label = row.split(",") group = group.strip() label = label.strip() groups[label] = group return groups def wheel(args): """ %prog wheel datafile.csv groups.csv Wheel plot that shows continous data in radial axes. """ p = OptionParser(wheel.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x10") if len(args) != 2: sys.exit(not p.print_help()) datafile, groupsfile = args df = parse_data(datafile) groups = parse_groups(groupsfile) labels = [g for g in groups if g in df] print labels df = [df[g] for g in labels] print df groups = [groups[g] for g in labels] print groups pf = datafile.rsplit(".", 1)[0] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) categories = len(df) ax = plt.subplot(111, projection='polar') brewer = ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a", "#ffff99", "#b15928"] # Baseline theta = np.linspace(0, 2 * np.pi, endpoint=False, num=categories) _theta = np.linspace(0, 2 * np.pi) _r = len(_theta) * [0] closed_plot(ax, _theta, _r, "k:", lw=2) # Grid for t in theta: ax.plot([t, t], [-R, R], color="gainsboro") ax.axis('off') # Sectors (groupings) gg = {} for group, c in groupby(enumerate(groups), lambda x: x[1]): c = [x[0] for x in list(c)] gg[group] = c print gg theta_interval = 2 * np.pi / categories theta_pad = theta_interval / 2 * .9 for color, group in zip(brewer, gg.values()): tmin, tmax = min(group), max(group) sector(ax, theta[tmin], theta[tmax], theta_pad, R * .95, "-", color=color) # Data r = df closed_plot(ax, theta, r, color="lightslategray") for color, group in zip(brewer, gg.values()): color_theta = [theta[x] for x in group] color_r = [r[x] for x in group] ax.plot(color_theta, color_r, "o") ax.set_rmin(-R) ax.set_rmax(R) normalize_axes(root) image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts) if __name__ == '__main__': main()
Python
0
@@ -264,14 +264,8 @@ axes -, set2 %0Afro @@ -1661,13 +1661,11 @@ ze=%22 -10x10 +5x5 %22)%0A%0A @@ -2355,16 +2355,17 @@ 15928%22%5D%0A +%0A # Ba @@ -2681,16 +2681,42 @@ upings)%0A + collapsed_groups = %5B%5D%0A gg = @@ -2716,18 +2716,18 @@ gg = -%7B%7D +%5B%5D %0A for @@ -2831,21 +2831,59 @@ -gg%5Bgroup%5D = c +collapsed_groups.append(group)%0A gg.append(c) %0A @@ -3011,33 +3011,24 @@ p(brewer, gg -.values() ):%0A t @@ -3162,16 +3162,22 @@ or=color +, lw=2 )%0A%0A # @@ -3289,17 +3289,8 @@ , gg -.values() ):%0A @@ -3417,16 +3417,29 @@ r_r, %22o%22 +, color=color )%0A%0A a @@ -3473,16 +3473,260 @@ max(R)%0A%0A + # Labels%0A from math import cos, sin%0A for i, label in enumerate(labels):%0A tl = theta%5Bi%5D%0A x, y = .5 + .5 * cos(tl), .5 + .5 * sin(tl)%0A root.text(x, y, label)%0A print x, y, label%0A%0A root.text(.5, .5, %22TEST%22)%0A norm
4565c961eca6f0d904d010cbdbf3d42fe2a6080b
Add persistence test
test/rlite.py
test/rlite.py
# coding=utf-8 from unittest import * import hirlite import sys class RliteTest(TestCase): def setUp(self): self.rlite = hirlite.Rlite() def test_none(self): self.assertEquals(None, self.rlite.command('get', 'hello')) def test_ok(self): self.assertEquals(True, self.rlite.command('set', 'hello', 'world')) def test_string(self): self.rlite.command('set', 'hello', 'world') self.assertEquals('world', self.rlite.command('get', 'hello')) def test_integer(self): self.assertEquals(2, self.rlite.command('lpush', 'list', 'value', 'other value')) def test_error(self): self.assertIsInstance(self.rlite.command('set', 'key'), hirlite.HirliteError) def test_array(self): self.rlite.command('rpush', 'mylist', '1', '2', '3') self.assertEquals(self.rlite.command('lrange', 'mylist', 0, -1), ['1', '2', '3'])
Python
0.00001
@@ -38,23 +38,23 @@ %0Aimport -hirlite +os.path %0Aimport @@ -58,16 +58,32 @@ rt sys%0A%0A +import hirlite%0A%0A %0Aclass R @@ -916,8 +916,590 @@ , '3'%5D)%0A +%0A%0Aclass PersistentTest(TestCase):%0A PATH = 'rlite.rld'%0A def setUp(self):%0A if os.path.exists(PersistentTest.PATH):%0A os.unlink(PersistentTest.PATH)%0A self.rlite = hirlite.Rlite(PersistentTest.PATH)%0A%0A def tearDown(self):%0A if os.path.exists(PersistentTest.PATH):%0A os.unlink(PersistentTest.PATH)%0A%0A def test_write_close_open(self):%0A self.rlite.command('set', 'key', 'value')%0A self.rlite = hirlite.Rlite(PersistentTest.PATH) # close db, open a new one%0A self.assertEquals('value', self.rlite.command('get', 'key'))%0A
7c24e0c3670a9d63478d0c8a095eef78f2a75142
Print output of git.pull()
clowder/utility/git_utilities.py
clowder/utility/git_utilities.py
"""Git utilities""" import os, sys from git import Repo from termcolor import colored, cprint # Disable errors shown by pylint for sh.git # pylint: disable=E1101 def git_clone_url_at_path(url, repo_path): """Clone git repo from url at path""" if not os.path.isdir(os.path.join(repo_path, '.git')): if not os.path.isdir(repo_path): os.makedirs(repo_path) repo_path_output = colored(repo_path, 'cyan') print(' - Cloning repo at: ' + repo_path_output) repo = Repo.init(repo_path) origin = repo.create_remote('origin', url) origin.fetch() master_branch = repo.create_head('master', origin.refs.master) master_branch.set_tracking_branch(origin.refs.master) master_branch.checkout() def git_current_branch(repo_path): """Return currently checked out branch of project""" repo = Repo(repo_path) git = repo.git return str(git.rev_parse('--abbrev-ref', 'HEAD')).rstrip('\n') def git_current_ref(repo_path): """Return current ref of project""" repo = Repo(repo_path) if repo.head.is_detached: return git_current_sha(repo_path) else: return git_current_sha(repo_path) def git_current_sha(repo_path): """Return current git sha for checked out commit""" repo = Repo(repo_path) git = repo.git return str(git.rev_parse('HEAD')).rstrip('\n') def git_fix(repo_path): """Commit new main clowder.yaml from current changes""" repo = Repo(repo_path) git = repo.git git.add('clowder.yaml') git.commit('-m', 'Update clowder.yaml') git.pull() git.push() def git_fix_version(repo_path, version): """Commit fixed version of clowder.yaml based on current branches""" repo = Repo(repo_path) git = repo.git git.add('versions') git.commit('-m', 'Fix versions/' + version + '/clowder.yaml') git.pull() git.push() def git_groom(repo_path): """Sync clowder repo with current branch""" repo = Repo(repo_path) git = repo.git git.fetch('--all', '--prune', '--tags') if not git_is_detached(repo_path): print(' - Pulling latest changes') git.pull() else: print(' - HEAD is detached, nothing to pull') def git_herd(repo_path, ref): """Sync git repo with default branch""" repo = Repo(repo_path) git = repo.git git.fetch('--all', '--prune', '--tags') project_ref = git_truncate_ref(ref) if git_current_branch(repo_path) != project_ref: project_output = colored(project_ref, 'magenta') print(' - Not on default branch, checking out ' + project_output) git.checkout(project_ref) print(' - Pulling latest changes') git.pull() def git_herd_version(repo_path, version, ref): """Sync fixed version of repo at path""" repo = Repo(repo_path) git = repo.git fix_branch = 'clowder-fix/' + version branch_output = colored(fix_branch, 'magenta') try: if repo.heads[fix_branch]: if repo.active_branch != repo.heads[fix_branch]: print(' - Checking out existing branch: ' + branch_output) git.checkout(fix_branch) except: print(' - No existing branch, checking out: ' + branch_output) git.checkout('-b', fix_branch, ref) def git_is_detached(repo_path): """Check if HEAD is detached""" repo = Repo(repo_path) return repo.head.is_detached def git_is_dirty(repo_path): """Check if repo is dirty""" repo = Repo(repo_path) return repo.is_dirty() def git_litter(repo_path): """Discard current changes in repository""" repo = Repo(repo_path) if repo.is_dirty(): print(' - Discarding current changes') repo.head.reset(index=True, working_tree=True) else: print(' - No changes to discard') def git_stash(repo_path): """Stash current changes in repository""" repo = Repo(repo_path) if repo.is_dirty(): print(' - Stashing current changes') repo.git.stash() else: print(' - No changes to stash') def git_truncate_ref(ref): """Return bare branch, tag, or sha""" git_branch = "refs/heads/" git_tag = "refs/tags/" if ref.startswith(git_branch): length = len(git_branch) elif ref.startswith(git_tag): length = len(git_tag) else: length = 0 return ref[length:] def git_validate_repo_state(repo_path): """Validate repo state""" git_path = os.path.join(repo_path, '.git') if not os.path.isdir(git_path): return if git_is_dirty(repo_path): repo_output = colored(repo_path, 'cyan') print(repo_output + ' is dirty') print('Please stash, commit, or discard your changes before running clowder') print('') cprint('Exiting...', 'red') print('') sys.exit() # if git_untracked_files(repo_path): # print(repo_path + ' has untracked files.') # print('Please remove these files or add to .gitignore') # print('') # cprint('Exiting...', 'red') # print('') # sys.exit() # if git_is_detached(repo_path): # repo_output = colored(repo_path, 'cyan') # print(repo_output + ' HEAD is detached') # print('Please point your HEAD to a branch before running clowder') # print('') # cprint('Exiting...', 'red') # print('') # sys.exit() def git_untracked_files(repo_path): """Check if there are untracked files""" repo = Repo(repo_path) if repo.untracked_files: return True else: return False def process_output(line): """Utility function for command output callbacks""" stripped_line = str(line).rstrip('\n') print(stripped_line)
Python
0.99994
@@ -2145,24 +2145,30 @@ s')%0A +print( git.pull()%0A @@ -2161,24 +2161,25 @@ t(git.pull() +) %0A else:%0A @@ -2693,24 +2693,30 @@ anges')%0A +print( git.pull()%0A%0A @@ -2713,16 +2713,17 @@ t.pull() +) %0A%0Adef gi