commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
4a1b670cd49f458c44bed638e2f9ecace211883a
fix and update first user as admin
websitemixer/plugins/Install/Setup.py
websitemixer/plugins/Install/Setup.py
import os from flask import render_template, request, redirect from websitemixer import app, db, models @app.route('/setup/step1/') def setup1(): return render_template("Install/step1.html") @app.route('/setup/step2/',methods=['POST']) def setup2(): secretkey = os.urandom(24).encode('hex') appname = request.form['appname'] dbname = request.form['dbname'] dbuser = request.form['dbuser'] dbpwd = request.form['dbpwd'] dbsrv = request.form['dbsrv'] with open('config.py', 'w') as file: file.seek(0) file.truncate() file.write("import os\n") file.write("basedir = os.path.abspath(os.path.dirname(__file__))\n\n") file.write("SECRET_KEY = '"+secretkey+"'\n") file.write("UPLOAD_FOLDER = basedir+'/websitemixer/static/upload/'\n") file.write("ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'zip'])\n\n") if request.form['dbmeth'] == 'mysql': file.write("SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://"+dbuser+":"+dbpwd+"@"+dbsrv+":3306/"+dbname+"'\n") elif request.form['dbmeth'] == 'postgres': file.write("SQLALCHEMY_DATABASE_URI = 'postgresql://"+dbuser+":"+dbpwd+"@"+dbsrv+":5432/"+dbname+"'\n") else: file.write("SQLALCHEMY_DATABASE_URI = 'sqlite:///'+os.path.join(basedir,'"+appname+".db')\n") file.close() return render_template("Install/step2.html") @app.route('/setup/step3/',methods=['POST']) def setup3(): db.drop_all() db.create_all() sitename = request.form['sitename'] sitedesc = request.form['sitedesc'] admuser = request.form['admuser'] admpwd1 = request.form['admpwd1'] admpwd2 = request.form['admpwd2'] admemail = request.form['admemail'] a = models.User(admuser, admpwd1, admemail) db.session.add(a) update = Setting.query.filter_by(username=admuser).update(dict(is_admin=1)) a = models.Setting('siteName',sitename) db.session.add(a) a = models.Setting('siteSubheading',sitedesc) db.session.add(a) a = models.Setting('theme','Base') db.session.add(a) a = models.Post(admuser, 'Hello World!', '/hello-world/', '<p>This is your first post! You can delete this and start posting!</p>', '', '', 'Hello World, Welcome') db.session.add(a) a = models.Page('About', '/about/', '<p>It\'s an about page!</p>', '', '') db.session.add(a) a = models.Page('Contact', '/contact/', '<p>It\'s a contact page!</p>', '', '') db.session.add(a) db.session.commit() return redirect('/')
Python
0
@@ -1852,23 +1852,27 @@ pdate = -Setting +models.User .query.f @@ -1914,11 +1914,8 @@ ict( -is_ admi
9582eefa22c5d1e5b8a71154981ca8e956bfa6f6
throw error on missing from clause
active_data/actions/__init__.py
active_data/actions/__init__.py
# encoding: utf-8 # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Contact: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import absolute_import, division, unicode_literals import flask from flask import Response from active_data import record_request from active_data.actions import save_query from jx_base import container from jx_elasticsearch import meta from jx_elasticsearch.meta import ElasticsearchMetadata from jx_python.containers.list_usingPythonList import ListContainer from mo_dots import is_container from mo_dots import is_data, set_default, split_field from mo_future import is_text, first from mo_json import STRUCT, value2json from mo_logs import Log from mo_threads import Till from mo_times import Timer from mo_times.dates import Date from mo_times.durations import MINUTE DEBUG = True QUERY_TOO_LARGE = "Query is too large" def send_error(active_data_timer, body, e): status = 400 if QUERY_TOO_LARGE in e: status = 413 record_request(flask.request, None, body, e) Log.warning("Could not process\n{{body}}", body=body.decode("latin1"), cause=e) e = e.__data__() e.meta.timing.total = active_data_timer.duration.seconds # REMOVE TRACES, BECAUSE NICER TO HUMANS # def remove_trace(e): # e.trace = e.trace[0:1:] # for c in listwrap(e.cause): # remove_trace(c) # remove_trace(e) return Response(value2json(e).encode("utf8"), status=status) def test_mode_wait(query): """ WAIT FOR METADATA TO ARRIVE ON INDEX :param query: dict() OF REQUEST BODY :return: nothing """ if not query["from"]: return try: if query["from"].startswith("meta."): return alias = split_field(query["from"])[0] after = Date.now() require_cardinality = meta.ENABLE_META_SCAN with Timer( "Get columns for {{table}} after {{after}}", {"table": alias, "after": after}, verbose=DEBUG, ): metadata_manager = find_container(alias, after=after).namespace timeout = Till(seconds=MINUTE.seconds) while not timeout: # GET FRESH VERSIONS cols = metadata_manager.get_columns( table_name=alias, after=after, timeout=timeout ) not_ready = [ c for c in cols if c.jx_type not in STRUCT and ( after >= c.last_updated or (require_cardinality and c.cardinality == None) ) ] if not_ready: Log.note( "wait for column (table={{col.es_index}}, name={{col.es_column}}, cardinality={{col.cardinality|json}}, last_updated={{col.last_updated|datetime}}) metadata to arrive", col=first(not_ready), ) else: break Till(seconds=1).wait() except Exception as e: Log.warning("could not pickup columns", cause=e) namespace = None # TODO: The container cache is a hack until a global namespace/container is built container_cache = {} # MAP NAME TO Container OBJECT def find_container(frum, after): """ :param frum: :return: """ global namespace if not namespace: if not container.config.default.settings: Log.error( "expecting jx_base.container.config.default.settings to contain default elasticsearch connection info" ) namespace = ElasticsearchMetadata(container.config.default.settings) cs = namespace.get_columns(frum, after=after) # FORCE A RELOAD if is_text(frum): if frum in container_cache: return container_cache[frum] path = split_field(frum) if path[0] == "meta": if path[1] == "columns": return namespace.meta.columns.denormalized() elif path[1] == "tables": return namespace.meta.tables else: Log.error("{{name}} not a recognized table", name=frum) type_ = container.config.default.type fact_table_name = path[0] settings = set_default( {"alias": fact_table_name, "name": frum, "exists": True}, container.config.default.settings, ) settings.type = None output = container.type2container[type_](settings) container_cache[frum] = output return output elif is_data(frum) and frum.type and container.type2container[frum.type]: # TODO: Ensure the frum.name is set, so we capture the deep queries if not frum.type: Log.error("Expecting from clause to have a 'type' property") return container.type2container[frum.type](frum.settings) elif is_data(frum) and (frum["from"] or is_container(frum["from"])): from jx_base.query import QueryOp return QueryOp.wrap(frum) elif is_container(frum): return ListContainer("test_list", frum) else: return frum
Python
0.000001
@@ -3912,12 +3912,116 @@ -cs = +if not frum:%0A Log.error(%22expecting json query expression with from clause%22)%0A%0A # FORCE A RELOAD%0A nam @@ -4057,34 +4057,16 @@ r=after) - # FORCE A RELOAD %0A%0A if
9fec79f71f6dbf80d11989fbbfc2bed43668b75d
Use skimage for circle definition
python/thunder/extraction/feature/methods/localmax.py
python/thunder/extraction/feature/methods/localmax.py
from numpy import cos, sin, pi, array, sqrt from thunder.extraction.feature.base import FeatureMethod, FeatureAlgorithm from thunder.extraction.feature.creators import MeanFeatureCreator from thunder.extraction.source import SourceModel, Source class LocalMax(FeatureMethod): def __init__(self, **kwargs): algorithm = LocalMaxFeatureAlgorithm(**kwargs) creator = MeanFeatureCreator() super(self.__class__, self).__init__(algorithm, creator, **kwargs) class LocalMaxFeatureAlgorithm(FeatureAlgorithm): """ Find sources by identifying local maxima in an array. Will first find source centers, and then automatically define a circle around each center using the specified radius and resolution Parameters ---------- minDistance : int, optional, default = 10 Minimum distance between source centers maxSources : int, optional, deafut = None Maximum number of sources radius : scalar, optional, default=5 Radius of circles defined around centers res : scalar, optional, deafult=10 Number of points to use to define circles around centers """ def __init__(self, minDistance=10, maxSources=None, radius=5, res=10, **extra): self.minDistance = minDistance if self.minDistance < 1: raise Exception("Cannot set minDistance less than 1, got %s" % minDistance) self.maxSources = maxSources self.radius = radius self.res = res def extract(self, im): """ Extract sources from an image by finding local maxima. Parameters ---------- im : ndarray The image or volume Returns ------- A SourceModel with circular regions. """ from numpy import ones, concatenate from skimage.feature import peak_local_max # extract local peaks if im.ndim == 2: peaks = peak_local_max(im, min_distance=self.minDistance, num_peaks=self.maxSources).tolist() else: peaks = [] for i in range(0, im.shape[2]): tmp = peak_local_max(im[:, :, i], min_distance=self.minDistance, num_peaks=self.maxSources) peaks = peaks.append(concatenate((tmp, ones((len(tmp), 1)) * i), axis=1)) # estimate circular regions from peak points def pointToCircle(center, radius): ccol = center[0] crow = center[1] r2 = radius * radius colrange = range(center[0] - radius + 1, center[0] + radius) rowrange = range(center[1] - radius + 1, center[1] + radius) pts = [[([c, r], radius - sqrt((c - ccol) ** 2 + (r - crow) ** 2)) for c in colrange if ((c - ccol) ** 2 + (r - crow) ** 2 < r2)] for r in rowrange] pts = concatenate(array(pts)) k = map(lambda p: p[0], pts) v = map(lambda p: p[1], pts) return k, v sources = [pointToCircle(p, self.radius) for p in peaks] return SourceModel([Source(s[0], s[1]) for s in sources])
Python
0
@@ -1510,265 +1510,8 @@ m):%0A - %22%22%22%0A Extract sources from an image by finding local maxima.%0A%0A Parameters%0A ----------%0A im : ndarray%0A The image or volume%0A%0A Returns%0A -------%0A A SourceModel with circular regions.%0A %22%22%22%0A @@ -1600,16 +1600,56 @@ ocal_max +%0A from skimage.draw import circle %0A%0A @@ -2097,16 +2097,17 @@ # -estimate +construct cir @@ -2196,138 +2196,33 @@ -ccol = center%5B0%5D%0A crow = center%5B1%5D%0A r2 = radius * radius%0A colrange = range(center%5B0%5D - radius + 1 +rr, cc = circle(center%5B0%5D , ce @@ -2230,12 +2230,11 @@ ter%5B -0%5D + +1%5D, rad @@ -2255,397 +2255,69 @@ r -owrange = range(center%5B1%5D - radius + 1, center%5B1%5D + radius)%0A pts = %5B%5B(%5Bc, r%5D, radius - sqrt((c - ccol) ** 2 + (r - crow) ** 2))%0A for c in colrange if ((c - ccol) ** 2 + (r - crow) ** 2 %3C r2)%5D for r in rowrange%5D%0A pts = concatenate(array(pts))%0A k = map(lambda p: p%5B0%5D, pts)%0A v = map(lambda p: p%5B1%5D, pts)%0A return k, v%0A +eturn array(zip(rr, cc))%0A%0A # return circles as sources %0A @@ -2321,21 +2321,21 @@ -sou +ci rc +l es = %5Bpo @@ -2417,34 +2417,25 @@ rce( -s%5B0%5D, s%5B1%5D +c ) for -s +c in -sou +ci rc +l es%5D)
40fe16d058d18d2384be464ecefed1028edace17
Fix error on SASL PLAIN authentication
txircd/modules/ircv3_sasl_plain.py
txircd/modules/ircv3_sasl_plain.py
from txircd.modbase import Module from base64 import b64decode class SaslPlainMechanism(Module): def authenticate(self, user, authentication): try: authenticationID, authorizationID, password = b64decode(authentication[0]).split("\0") except TypeError: user.sendMessage(irc.ERR_SASLFAILED, ":SASL authentication failed") return False except ValueError: user.sendMessage(irc.ERR_SASLFAILED, ":SASL authentication failed") return False if self.ircd.servconfig["server_sasl_agent"] == "": if "sasl_agent" not in self.ircd.module_data_cache: user.sendMessage(irc.ERR_SASLFAILED, ":SASL authentication failed") return False return self.ircd.module_data_cache["sasl_agent"].authenticate(user, authenticationid=authenticationID, authorizationid=authorizationID, password=password) # TODO: The rest of this doesn't really make sense until s2s, but we'll return false for now since it's failing return False def bindSaslResult(self, user, successFunction, failureFunction): if self.ircd.servconfig["server_sasl_agent"] == "": if "sasl_agent" not in self.ircd.module_data_cache: user.sendMessage(irc.ERR_SASLFAILED, ":SASL authentication failed") return self.ircd.module_data_cache["sasl_agent"].bindSaslResult(user, successFunction, failureFunction) # TODO: server_sasl_agent stuff when s2s class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): if "sasl_mechanisms" not in self.ircd.module_data_cache: self.ircd.module_data_cache["sasl_mechanisms"] = {} self.ircd.module_data_cache["sasl_mechanisms"]["PLAIN"] = SaslPlainMechanism().hook(self.ircd) return {} def cleanup(self): del self.ircd.module_data_cache["sasl_mechanisms"]["PLAIN"]
Python
0.000003
@@ -451,24 +451,75 @@ False%0A%09%09if +%22server_sasl_agent%22 not in self.ircd.servconfig or self.ircd.se @@ -1060,16 +1060,67 @@ ):%0A%09%09if +%22server_sasl_agent%22 not in self.ircd.servconfig or self.irc
026db0e635f0c82e1b24884cb768d53b7fadfc0c
use lots of connections for the pool
feedly/storage/cassandra/connection.py
feedly/storage/cassandra/connection.py
from pycassa.pool import ConnectionPool def get_cassandra_connection(keyspace_name, hosts): if get_cassandra_connection._connection is None: get_cassandra_connection._connection = ConnectionPool( keyspace_name, hosts) return get_cassandra_connection._connection get_cassandra_connection._connection = None
Python
0
@@ -235,16 +235,80 @@ e, hosts +, pool_size=len(hosts)*24,%0A prefill=False, timeout=10 )%0A re
4e2affde042fab083ec24ec8d6e04ba2f45d1f7d
add utcnow to if conditional evaluation
flexget/plugins/filter/if_condition.py
flexget/plugins/filter/if_condition.py
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin from future.moves import builtins import logging import datetime from copy import copy from jinja2 import UndefinedError from flexget import plugin from flexget.event import event from flexget.task import Task from flexget.entry import Entry from flexget.utils.template import evaluate_expression log = logging.getLogger('if') class FilterIf(object): """Can run actions on entries that satisfy a given condition. Actions include accept, reject, and fail, as well as the ability to run other filter plugins on the entries.""" schema = { 'type': 'array', 'items': { 'type': 'object', 'additionalProperties': { 'anyOf': [ {'$ref': '/schema/plugins'}, {'enum': ['accept', 'reject', 'fail']} ] } } } def check_condition(self, condition, entry): """Checks if a given `entry` passes `condition`""" # Make entry fields and other utilities available in the eval namespace # We need our namespace to be an Entry instance for lazy loading to work eval_locals = copy(entry) eval_locals.update({'has_field': lambda f: f in entry, 'timedelta': datetime.timedelta, 'now': datetime.datetime.now()}) try: # Restrict eval namespace to have no globals and locals only from eval_locals passed = evaluate_expression(condition, eval_locals) if passed: log.debug('%s matched requirement %s' % (entry['title'], condition)) return passed except UndefinedError as e: # Extract the name that did not exist missing_field = e.args[0].split('\'')[1] log.debug('%s does not contain the field %s' % (entry['title'], missing_field)) except Exception as e: log.error('Error occurred while evaluating statement `%s`. (%s)' % (condition, e)) def __getattr__(self, item): """Provides handlers for all phases.""" for phase, method in plugin.phase_methods.items(): if item == method and phase not in ['accept', 'reject', 'fail', 'input']: break else: raise AttributeError(item) def handle_phase(task, config): entry_actions = { 'accept': Entry.accept, 'reject': Entry.reject, 'fail': Entry.fail} for item in config: requirement, action = list(item.items())[0] passed_entries = (e for e in task.entries if self.check_condition(requirement, e)) if isinstance(action, str): if not phase == 'filter': continue # Simple entry action (accept, reject or fail) was specified as a string for entry in passed_entries: entry_actions[action](entry, 'Matched requirement: %s' % requirement) else: # Other plugins were specified to run on this entry fake_task = Task(task.manager, task.name, config=action, options=task.options) fake_task.session = task.session # This entry still belongs to our feed, accept/reject etc. will carry through. fake_task.all_entries[:] = passed_entries methods = {} for plugin_name, plugin_config in action.items(): p = plugin.get_plugin_by_name(plugin_name) method = p.phase_handlers.get(phase) if method: methods[method] = (fake_task, plugin_config) # Run the methods in priority order for method in sorted(methods, reverse=True): method(*methods[method]) handle_phase.priority = 80 return handle_phase @event('plugin.register') def register_plugin(): plugin.register(FilterIf, 'if', api_ver=2)
Python
0.000001
@@ -1419,16 +1419,82 @@ edelta,%0A + 'utcnow': datetime.datetime.utcnow(),%0A
fb4b9e4570c4053204304fc934d0fe816d4c056d
add new split dictionary and dependencies
tests/resources/dictionaries/transaction_dictionary.py
tests/resources/dictionaries/transaction_dictionary.py
# -*- coding: utf-8 -*- from tests.resources.dictionaries import card_dictionary from tests.resources.dictionaries import customer_dictionary from tests.resources import pagarme_test BOLETO_TRANSACTION = {'amount': '10000', 'payment_method': 'boleto'} CALCULATE_INTALLMENTS_AMOUNT = {'amount': '10000', 'free_installments': "1", 'interest_rate': '13', 'max_installments': '12'} PAY_BOLETO = {'status':'paid'} REFUNDED_OR_CAPTURE_TRANSACTION = {'amount':'10000'} INVALID_CREDIT_CARD_TRANSACTION_DICTIONARY = {'amount':'10000', 'card_number':card_dictionary.INVALID_CARD_DICTIONARY['card_number'], 'card_holder_name': card_dictionary.INVALID_CARD_DICTIONARY['card_holder_name'], 'card_cvv':card_dictionary.INVALID_CARD_DICTIONARY['card_cvv'], 'card_expiration_date':card_dictionary.INVALID_CARD_DICTIONARY['card_expiration_date'], 'customer': customer_dictionary.CUSTOMER_DICTIONARY} VALID_CREDIT_CARD_TRANSACTION_CAPTURE_FALSE_DICTIONARY = {'amount':'10000', 'capture':'false', 'card_number':card_dictionary.VALID_CARD_DICTIONARY['card_number'], 'card_holder_name':card_dictionary.VALID_CARD_DICTIONARY['card_holder_name'], 'card_cvv':card_dictionary.VALID_CARD_DICTIONARY['card_cvv'], 'card_expiration_date':card_dictionary.VALID_CARD_DICTIONARY['card_expiration_date'], 'customer': customer_dictionary.CUSTOMER_DICTIONARY} VALID_CREDIT_CARD_TRANSACTION_DICTIONARY = {'amount':'10000', 'card_number':card_dictionary.VALID_CARD_DICTIONARY['card_number'], 'card_holder_name': card_dictionary.VALID_CARD_DICTIONARY['card_holder_name'], 'card_cvv':card_dictionary.VALID_CARD_DICTIONARY['card_cvv'], 'card_expiration_date':card_dictionary.VALID_CARD_DICTIONARY['card_expiration_date'], 'customer': customer_dictionary.CUSTOMER_DICTIONARY} VALID_CREDIT_CARD_TRANSACTION__WITH_POSTBACK_DICTIONARY = {'amount':'10000', 'card_number':card_dictionary.VALID_CARD_DICTIONARY['card_number'], 'postback_url':pagarme_test.create_postback_url(), 'card_holder_name': card_dictionary.VALID_CARD_DICTIONARY['card_holder_name'], 'card_cvv':card_dictionary.VALID_CARD_DICTIONARY['card_cvv'], 'card_expiration_date':card_dictionary.VALID_CARD_DICTIONARY['card_expiration_date'], 'customer': customer_dictionary.CUSTOMER_DICTIONARY}
Python
0
@@ -160,27 +160,119 @@ rces - import pagarme_tes +.dictionaries import recipient_dictionary%0Afrom tests.resources import pagarme_test%0Afrom pagarme import recipien t%0A%0AB @@ -440,16 +440,48 @@ : '13',%0A + 'max_ins @@ -522,16 +522,17 @@ status': + 'paid'%7D%0A @@ -572,24 +572,25 @@ = %7B'amount': + '10000'%7D%0A%0AIN @@ -587,16 +587,435 @@ 0000'%7D%0A%0A +RECIPIENT = recipient.create(recipient_dictionary.RECIPIENT_DICTIONARY)%0A%0ASPLIT_RULE_PERCENTAGE = %7B'recipient_id': RECIPIENT%5B'id'%5D, 'percentage': 100, 'liable': 'true',%0A 'charge_processing_fee': 'true'%7D%0A%0ABOLETO_TRANSACTION_SPLIT = %7B'amount': BOLETO_TRANSACTION%5B'amount'%5D, 'payment_method': BOLETO_TRANSACTION%5B'payment_method'%5D,%0A 'split_rules':%5BSPLIT_RULE_PERCENTAGE%5D%7D%0A%0A INVALID_ @@ -1053,32 +1053,33 @@ ARY = %7B'amount': + '10000',%0A'card_n @@ -1077,32 +1077,33 @@ ,%0A'card_number': + card_dictionary. @@ -1226,32 +1226,33 @@ e'%5D,%0A'card_cvv': + card_dictionary. @@ -1303,32 +1303,33 @@ xpiration_date': + card_dictionary. @@ -1490,32 +1490,33 @@ ARY = %7B'amount': + '10000', 'captur @@ -1518,16 +1518,17 @@ apture': + 'false', @@ -1534,32 +1534,33 @@ ,%0A'card_number': + card_dictionary. @@ -1616,16 +1616,17 @@ r_name': + card_dic @@ -1679,32 +1679,33 @@ e'%5D,%0A'card_cvv': + card_dictionary. @@ -1754,32 +1754,33 @@ xpiration_date': + card_dictionary. @@ -1925,32 +1925,33 @@ ARY = %7B'amount': + '10000',%0A'card_n @@ -1949,32 +1949,33 @@ ,%0A'card_number': + card_dictionary. @@ -2094,32 +2094,33 @@ e'%5D,%0A'card_cvv': + card_dictionary. @@ -2169,32 +2169,33 @@ xpiration_date': + card_dictionary. @@ -2363,16 +2363,17 @@ amount': + '10000', @@ -2387,16 +2387,17 @@ number': + card_dic @@ -2457,16 +2457,17 @@ ck_url': + pagarme_ @@ -2584,16 +2584,17 @@ rd_cvv': + card_dic @@ -2659,16 +2659,17 @@ n_date': + card_dic @@ -2755,28 +2755,29 @@ tionary.CUSTOMER_DICTIONARY%7D +%0A
fa23d59a66cfc192bcfed6cdbb8426479487ccca
Add unit tests
tests/unit/synapseutils/unit_test_synapseutils_walk.py
tests/unit/synapseutils/unit_test_synapseutils_walk.py
import json import uuid import pytest from unittest.mock import patch, call import synapseclient import synapseutils.walk_functions def test_helpWalk_not_container(syn): """Test if entry entity isn't a container""" entity = {"id": "syn123", "concreteType": "File"} with patch.object(syn, "get", return_value=entity): result = synapseutils.walk_functions._helpWalk(syn=syn, synId="syn123", includeTypes=["folder", "file"]) # Execute generator gen_result = list(result) assert gen_result == [] def test_helpWalk_one_child_file(syn): """Test if there is one file in parent directory""" entity = {"id": "syn123", "concreteType": "org.sagebionetworks.repo.model.Project", "name": "parent_folder"} child = [{"id": "syn2222", "conreteType": "File", "name": "test_file"}] expected = [ (('parent_folder', 'syn123'), [], [('test_file', 'syn2222')]) ] with patch.object(syn, "get", return_value=entity),\ patch.object(syn, "getChildren", return_value=child): result = synapseutils.walk_functions._helpWalk(syn=syn, synId="syn123", includeTypes=["folder", "file"]) # Execute generator gen_result = list(result) assert gen_result == expected def test_helpWalk_directory(syn): """Test recursive functionality""" entity_list = [ {"id": "syn123", "concreteType": "org.sagebionetworks.repo.model.Project", "name": "parent_folder"}, {"id": "syn124", "concreteType": "org.sagebionetworks.repo.model.Folder", "name": "test_folder"} ] child_list = [ [{"id": "syn2222", "concreteType": "File", "name": "test_file"}, {"id": "syn124", "concreteType": "org.sagebionetworks.repo.model.Folder", "name": "test_folder"}], [{"id": "syn22223", "conreteType": "File", "name": "test_file_2"}] ] expected = [ (('parent_folder', 'syn123'), [('test_folder', 'syn124')], [('test_file', 'syn2222')]), (('parent_folder/test_folder', 'syn124'), [], [('test_file_2', 'syn22223')]) ] with patch.object(syn, "get", side_effect=entity_list),\ patch.object(syn, "getChildren", side_effect=child_list): result = synapseutils.walk_functions._helpWalk(syn=syn, synId="syn123", includeTypes=["folder", "file"]) # Execute generator gen_result = list(result) assert gen_result == expected # def test_helpWalk_not_container(syn): # entity = {"id": "syn123", "concreteType": "File"} # with patch.object(syn, "get", return_value=entity),\ # patch.object(syn, "getChildren", return_value=None): # synapseutils.walk._helpWalk(syn, "syn123", "syn456", updateLinks=False)
Python
0.000001
@@ -962,16 +962,32 @@ =entity) + as mock_syn_get ,%5C%0A @@ -1042,16 +1042,34 @@ e=child) + as mock_get_child :%0A @@ -1195,32 +1195,32 @@ ecute generator%0A - gen_resu @@ -1229,32 +1229,184 @@ = list(result)%0A + mock_syn_get.assert_called_once_with(%22syn123%22, downloadFile=False)%0A mock_get_child.assert_called_once_with(%22syn123%22, %5B%22folder%22, %22file%22%5D)%0A assert gen_r @@ -2564,305 +2564,4 @@ ted%0A -# def test_helpWalk_not_container(syn):%0A# entity = %7B%22id%22: %22syn123%22, %22concreteType%22: %22File%22%7D%0A# with patch.object(syn, %22get%22, return_value=entity),%5C%0A# patch.object(syn, %22getChildren%22, return_value=None):%0A# synapseutils.walk._helpWalk(syn, %22syn123%22, %22syn456%22, updateLinks=False)%0A
ac21a23540f84e72931e2a82afc57aff420a2151
Move speech_services_speech_tests back to qemu.
tools/android/emulated_devices/macro/emulator_info.bzl
tools/android/emulated_devices/macro/emulator_info.bzl
"""Defines the emulators and their supported configs in android_test_support.""" load( "//tools/android/emulated_devices:macro/emulator.bzl", "new_emulator", "emulator_type", "emulator_files", ) load("//tools/android/emulated_devices:macro/image.bzl", "image_files") load("//tools/android/emulated_devices:macro/props.bzl", "new_props") _EMULATOR_TYPE_PROP = "ro.mobile_ninjas.emulator_type" # QEMU1 is the legacy emulator. It is also currently our default emulator. # Most of android-emulator's team development work focuses on QEMU2, we're # actively migrating to it. QEMU = new_emulator( "qemu", props = new_props(boot_properties = {_EMULATOR_TYPE_PROP: "qemu"}), supports = { "x86": [ 10, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ], "arm": [ 10, 15, 16, 17, 18, 19, ], }, # We're phasing out qemu1, so only whitelisted projects can depend on # explicit qemu1 device targets. default_visibility = [ "//googlex/gcam/hdrplus:__subpackages__", "//gws/fruit:__subpackages__", "//java/com/google/android/apps/auto:__subpackages__", "//java/com/google/corp/android/apps/campus:__subpackages__", "//java/com/google/android/apps/auth/test/support:__subpackages__", "//java/com/google/android/apps/calendar/calendar:__subpackages__", "//java/com/google/android/apps/cardviewer:__subpackages__", "//java/com/google/android/apps/nbu/freighter:__subpackages__", "//java/com/google/android/apps/shopping/express:__subpackages__", "//java/com/google/android/apps/youtube/app:__subpackages__", "//java/com/google/android/clockwork/home:__subpackages__", "//java/com/google/gws/tools/carddevserver:__subpackages__", "//java/com/google/testing/screendiffing/android/simpleactivity:__subpackages__", "//javatests/com/google/android/apps/auto/launcher/vanagon/scuba:__subpackages__", "//javatests/com/google/android/apps/calendar/uitest/groomcardscuba:__subpackages__", "//javatests/com/google/android/apps/calendar/uitest/timelinechipscuba:__subpackages__", "//javatests/com/google/android/apps/common/testing/services/location:__subpackages__", "//javatests/com/google/android/apps/contacts/integration/uidatatests/mocktests:__subpackages__", "//javatests/com/google/android/apps/inputmethod:__subpackages__", "//javatests/com/google/android/apps/nbu/freighter/integration:__subpackages__", "//javatests/com/google/android/apps/playconsole/instrumentation:__subpackages__", "//javatests/com/google/android/apps/shopping/express/shoppinghistory:__subpackages__", "//javatests/com/google/android/apps/youtube/app/functional/fakes/social:__subpackages__", "//javatests/com/google/android/clockwork/home/espresso/quicksettings:__subpackages__", "//javatests/com/google/android/gmscore:__subpackages__", "//javatests/com/google/android/libraries/analytics/testing/seaworld:__subpackages__", "//javatests/com/google/corp/android/apps/campus:__subpackages__", "//javatests/com/google/testing/screendiffing/android/simpleactivity:__subpackages__", "//third_party/java_src/android_libs/aosp_calendar/tests/espresso:__subpackages__", ] ) QEMU2_APIS = [ 10, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, ] # QEMU2 is the new hotness. It requires a different kernel to work. We're # backporting support to older api levels, but it is slow going. QEMU2 = new_emulator( "qemu2", extra_files = [ "@androidsdk//:qemu2_x86", ], props = new_props(boot_properties = {_EMULATOR_TYPE_PROP: "qemu2"}), supports = {"x86": QEMU2_APIS}, ) def _t2e(): t2e = dict() for e in [QEMU, QEMU2]: t2e[emulator_type(e)] = e return t2e TYPE_TO_EMULATOR = _t2e() def extra_system_image_contents(emulator, image): """Returns a list of targets to include the the system image filegroup. Mostly this is figured out by information stored in the emulator and image objects. For QEMU2 we have to add an extra target to get the ranchu kernel. Arguments: emulator: an emulator image: an image Returns: a list of srcs to put in the file system image filegroup. """ contents = [image_files(image)] contents += emulator_files(emulator) if emulator_type(emulator) == emulator_type(QEMU2): maybe_extra_kernel_target = "%s_qemu2_extra" % image_files(image) contents.append(maybe_extra_kernel_target) return contents
Python
0
@@ -1629,32 +1629,118 @@ subpackages__%22,%0A + %22//java/com/google/android/apps/gsa/binaries/speechservices:__subpackages__%22,%0A %22//java/ @@ -2643,32 +2643,99 @@ subpackages__%22,%0A + %22//javatests/com/google/android/apps/gsa:__subpackages__%22,%0A %22//javat
d2fe267359feec48888469909bec3b432d1f4a93
Fix `BundleIntegrationTest`. (#4953)
tests/python/pants_test/engine/legacy/test_bundle_integration.py
tests/python/pants_test/engine/legacy/test_bundle_integration.py
# coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from contextlib import contextmanager from pants.base.deprecated import deprecated_conditional from pants.util.contextutil import temporary_dir from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine class BundleIntegrationTest(PantsRunIntegrationTest): TARGET_PATH = 'testprojects/src/java/org/pantsbuild/testproject/bundle' def test_bundle_basic(self): args = ['-q', 'bundle', self.TARGET_PATH] self.do_command(*args, success=True, enable_v2_engine=True) @contextmanager def bundled(self, target_name): with temporary_dir() as temp_distdir: with self.pants_results( ['-q', '--pants-distdir={}'.format(temp_distdir), 'bundle', '{}:{}'.format(self.TARGET_PATH, target_name)]) as pants_run: self.assert_success(pants_run) yield os.path.join(temp_distdir, '{}.{}-bundle'.format(self.TARGET_PATH.replace('/', '.'), target_name)) @ensure_engine def test_bundle_mapper(self): with self.bundled('mapper') as bundle_dir: self.assertTrue(os.path.isfile(os.path.join(bundle_dir, 'bundle_files/file1.txt'))) @ensure_engine def test_bundle_relative_to(self): with self.bundled('relative_to') as bundle_dir: self.assertTrue(os.path.isfile(os.path.join(bundle_dir, 'b/file1.txt'))) @ensure_engine def test_bundle_rel_path(self): with self.bundled('rel_path') as bundle_dir: self.assertTrue(os.path.isfile(os.path.join(bundle_dir, 'b/file1.txt'))) @ensure_engine def test_bundle_directory(self): with self.bundled('directory') as bundle_dir: root = os.path.join(bundle_dir, 'a/b') self.assertTrue(os.path.isdir(root)) # NB: The behaviour of this test will change with the relevant deprecation # in `pants.backend.jvm.tasks.bundle_create`, because the parent directory # will not be symlinked. deprecated_conditional( lambda: os.path.isfile(os.path.join(root, 'file1.txt')), '1.5.0.dev0', 'default recursive inclusion of files in directory', 'A non-recursive/literal glob should no longer include child paths.' ) def test_bundle_explicit_recursion(self): with self.bundled('explicit_recursion') as bundle_dir: root = os.path.join(bundle_dir, 'a/b') self.assertTrue(os.path.isdir(root)) self.assertTrue(os.path.isfile(os.path.join(root, 'file1.txt'))) @ensure_engine def test_bundle_resource_ordering(self): """Ensures that `resources=` ordering is respected.""" pants_run = self.run_pants( ['-q', 'run', 'testprojects/src/java/org/pantsbuild/testproject/bundle:bundle-resource-ordering'] ) self.assert_success(pants_run) self.assertEquals(pants_run.stdout_data, 'Hello world from Foo\n\n')
Python
0
@@ -3105,16 +3105,24 @@ out_data +.strip() , 'Hello @@ -3140,11 +3140,7 @@ Foo -%5Cn%5Cn ')%0A
f48432a61ab6d3d97c4a93dc9c2178a9b01695a9
Add now-needed response_type param to login urls
linode/login_client.py
linode/login_client.py
import requests from enum import Enum from linode.errors import ApiError try: from urllib.parse import urlparse from urllib.parse import urlencode from urllib.parse import urlunparse except ImportError: from urlparse import urlparse from urllib import urlencode from urlparse import urlunparse class AllWrapper(): def __repr__(self): return '*' class OAuthScopes: all = AllWrapper() class Linodes(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "linodes:*" return "linodes:{}".format(self.name) class Domains(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "domains:*" return "domains:{}".format(self.name) class StackScripts(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "stackscripts:*" return "stackscripts:{}".format(self.name) class Users(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "users:*" return "users:{}".format(self.name) class Tokens(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "tokens:*" return "tokens:{}".format(self.name) class IPs(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "ips:*" return "ips:{}".format(self.name) class Tickets(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "tickets:*" return "tickets:{}".format(self.name) class Clients(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "clients:*" return "clients:{}".format(self.name) class Account(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "account:*" return "account:{}".format(self.name) class Events(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "events:*" return "events:{}".format(self.name) class Volumes(Enum): view = 0 create = 1 modify = 2 delete = 3 all = 4 def __repr__(self): if(self.name == 'all'): return "volumes:*" return "volumes:{}".format(self.name) _scope_families = { 'linodes': Linodes, 'domains': Domains, 'stackscripts': StackScripts, 'users': Users, 'tokens': Tokens, } @staticmethod def parse(scopes): ret = [] # special all-scope case if scopes == '*': return [ getattr(OAuthScopes._scope_families[s], 'all') for s in OAuthScopes._scope_families ] for scope in scopes.split(','): resource = access = None if ':' in scope: resource, access = scope.split(':') else: resource = scope access = '*' parsed_scope = OAuthScopes._get_parsed_scope(resource, access) if parsed_scope: ret.append(parsed_scope) return ret @staticmethod def _get_parsed_scope(resource, access): resource = resource.lower() access = access.lower() if resource in OAuthScopes._scope_families: if access == '*': access = 'delete' if hasattr(OAuthScopes._scope_families[resource], access): return getattr(OAuthScopes._scope_families[resource], access) return None @staticmethod def serialize(scopes): ret = '' if not type(scopes) is list: scopes = [ scopes ] for scope in scopes: ret += "{},".format(repr(scope)) if ret: ret = ret[:-1] return ret class LinodeLoginClient: def __init__(self, client_id, client_secret, base_url="https://login.linode.com"): self.base_url = base_url self.client_id = client_id self.client_secret = client_secret def _login_uri(self, path): return "{}{}".format(self.base_url, path) def generate_login_url(self, scopes=None, redirect_uri=None): url = self.base_url + "/oauth/authorize" split = list(urlparse(url)) params = { "client_id": self.client_id, } if scopes: params["scopes"] = OAuthScopes.serialize(scopes) if redirect_uri: params["redirect_uri"] = redirect_uri split[4] = urlencode(params) return urlunparse(split) def finish_oauth(self, code): r = requests.post(self._login_uri("/oauth/token"), data={ "code": code, "client_id": self.client_id, "client_secret": self.client_secret }) if r.status_code != 200: raise ApiError("OAuth token exchange failed", r) token = r.json()["access_token"] scopes = OAuthScopes.parse(r.json()["scopes"]) return token, scopes def expire_token(self, token): r = requests.post(self._login_uri("/oauth/token/expire"), data={ "client_id": self.client_id, "client_secret": self.client_secret, "token": token, }) if r.status_code != 200: raise ApiError("Failed to expire token!", r) return True
Python
0
@@ -5372,32 +5372,93 @@ self.client_id,%0A + %22response_type%22: %22code%22, # needed for all logins%0A %7D%0A
1bccf48e6e142e6c62374dd9d7dc94330f15c650
Update ipc_lista1.3.py
lista1/ipc_lista1.3.py
lista1/ipc_lista1.3.py
#ipc_lista1.3 #Professor: Jucimar Junior #Any Mendes Carvalho - 1615310044 # # # # #Faça um programa que peça dois números e imprima a soma. number1 = input("Digite o primeiro: ") number2 = input("Digite o segundo número: ") print(number1+number2)
Python
0
@@ -66,17 +66,16 @@ 61531004 -4 %0A#%0A#%0A#%0A#
4e6fc94fde8eace1b461eba59dc4a56611664877
Update ipc_lista1.7.py
lista1/ipc_lista1.7.py
lista1/ipc_lista1.7.py
#ipc_lista1.7 #Professor: Jucimar Junior #Any Mendes Carvalho # # # # #Faça um programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o #usuário. altura = input("Digite a altura do quadrado em metros: "
Python
0
@@ -229,9 +229,10 @@ etros: %22 +) %0A
950e9f82be8b3a02ce96db47061cf828da231be9
Update ipc_lista1.8.py
lista1/ipc_lista1.8.py
lista1/ipc_lista1.8.py
#ipc_lista1.8 #Professor: Jucimar Junior #Any Mendes Carvalho - 1615310044 # # # # #Faça um programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. #Calcule e mostre o total do seu salário no referido mês. QntHora = input("Entre com o valor de seu rendimento por hora: ") hT = input
Python
0
@@ -310,9 +310,16 @@ = input +(%22Entre %0A
26c781807937038ec2c4fbfd4413ae2c60decd1b
add stdint.h for c++ default header include.
src/py/cpp_fragment_tmpl.py
src/py/cpp_fragment_tmpl.py
#!/usr/bin/env python # -*- coding:utf-8 -*- hpp_tmpl="""#ifndef __FRAGMENT_HPP__ #define __FRAGMENT_HPP__ #include <string> #include <vector> #include <map> #include <list> {includes} void fragment_container(); #endif """ cpp_tmpl="""#include "{head_file}" #include <iostream> #include <stdio.h> void fragment_container() {{ // tmp code begin {tmp_cpp} // tmp code end }} """
Python
0
@@ -170,16 +170,129 @@ %3Clist%3E%0A%0A +// linux int type define; should be remore/add by system dependent in the future version.%0A#include %3Cstdint.h%3E %0A%0A %7Binclude
5e21c7d0fa46e2b290368533cc6dc741b1d366e2
correct src path in settings
functional-tests/clickerft/settings.py
functional-tests/clickerft/settings.py
import os BASEDIR = os.path.dirname(os.getcwd()) HOME = "file://" + BASEDIR + "/src/"
Python
0.000001
@@ -1,51 +1,96 @@ -import os%0A%0ABASEDIR = os.path.dirname(os.getcwd( +from os.path import dirname, realpath%0A%0ABASEDIR = dirname(dirname(dirname(realpath(__file__)) ))%0AH
b251092c7f752df365392efba805f2c04d4617a0
add pragmas
galpy/potential_src/linearPotential.py
galpy/potential_src/linearPotential.py
import os, os.path import pickle import numpy as nu import galpy.util.bovy_plot as plot from Potential import PotentialError, Potential class linearPotential: """Class representing 1D potentials""" def __init__(self,amp=1.): self._amp= amp self.dim= 1 self.isRZ= False self.hasC= False return None def __call__(self,x,t=0.): """ NAME: __call__ PURPOSE: evaluate the potential INPUT: x - position t= time (optional) OUTPUT: Phi(x,t) HISTORY: 2010-07-12 - Written - Bovy (NYU) """ try: return self._amp*self._evaluate(x,t=t) except AttributeError: raise PotentialError("'_evaluate' function not implemented for this potential") def force(self,x,t=0.): """ NAME: force PURPOSE: evaluate the force INPUT: x - position t= time (optional) OUTPUT: F(x,t) HISTORY: 2010-07-12 - Written - Bovy (NYU) """ try: return self._amp*self._force(x,t=t) except AttributeError: raise PotentialError("'_force' function not implemented for this potential") def plot(self,t=0.,min=-15.,max=15,ns=21,savefilename=None): """ NAME: plot PURPOSE: plot the potential INPUT: t - time to evaluate the potential at min - minimum x max - maximum x ns - grid in x savefilename - save to or restore from this savefile (pickle) OUTPUT: plot to output device HISTORY: 2010-07-13 - Written - Bovy (NYU) """ if not savefilename == None and os.path.exists(savefilename): print "Restoring savefile "+savefilename+" ..." savefile= open(savefilename,'rb') potx= pickle.load(savefile) xs= pickle.load(savefile) savefile.close() else: xs= nu.linspace(min,max,ns) potx= nu.zeros(ns) for ii in range(ns): potx[ii]= self._evaluate(xs[ii],t=t) if not savefilename == None: print "Writing savefile "+savefilename+" ..." savefile= open(savefilename,'wb') pickle.dump(potx,savefile) pickle.dump(xs,savefile) savefile.close() return plot.bovy_plot(xs,potx, xlabel=r"$x/x_0$",ylabel=r"$\Phi(x)$", xrange=[min,max]) class linearPotentialFromRZPotential(linearPotential): def __init__(self,RZPot,R=1.): """ NAME: __init__ PURPOSE: Initialize INPUT: RZPot - RZPotential instance R - Galactocentric radius at which to use the zPotential OUTPUT: linearAxiPotential instance HISTORY: 2010-07-13 - Written - Bovy (NYU) """ linearPotential.__init__(self,amp=1.) self._RZPot= RZPot self._R= R return None def _evaluate(self,x,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential INPUT: x t OUTPUT: Pot(x,t) HISTORY: 2010-07-13 - Written - Bovy (NYU) """ return self._RZPot(self._R,x,t=t) def _force(self,x,t=0.): """ NAME: _force PURPOSE: evaluate the force INPUT: x t OUTPUT: F(x,t) HISTORY: 2010-07-13 - Written - Bovy (NYU) """ return self._RZPot.Rforce(self._R,x,t=t) def RZTolinearPotential(RZPot,R=1.): """ NAME: RZTolinearPotential PURPOSE: convert an RZPotential to a linearPotential at some radius R INPUT: RZPot - RZPotential instance or list R - Galactocentric radius at which to evaluate the zPotential OUTPUT: linearPotential instance or list HISTORY: 2010-07-13 - Written - Bovy (NYU) """ if isinstance(RZPot,list): out= [] for pot in RZPot: out.append(linearPotentialFromRZPotential(pot,R=R)) return out elif isinstance(RZPot,Potential): return linearPotentialFromRZPotential(RZPot,R=R) else: raise PotentialError("Input to 'RZTolinearPotential' is neither an RZPotential-instance or a list of such instances") def evaluatelinearPotentials(x,Pot,t=0.): """ NAME: evaluatelinearPotentials PURPOSE: evaluate the sum of a list of potentials INPUT: x - evaluate potentials at this position Pot - (list of) linearPotential instance(s) t - time to evaluate at OUTPUT: pot(x,t) HISTORY: 2010-07-13 - Written - Bovy (NYU) """ if isinstance(Pot,list): sum= 0. for pot in Pot: sum+= pot(x,t=t) return sum elif isinstance(Pot,linearPotential): return Pot(x,t=t) else: raise PotentialError("Input to 'evaluatelinearPotentials' is neither a linearPotential-instance or a list of such instances") def evaluatelinearForces(x,Pot,t=0.): """ NAME: evaluatelinearForces PURPOSE: evaluate the forces due to a list of potentials INPUT: x - evaluate forces at this position Pot - (list of) linearPotential instance(s) t - time to evaluate at OUTPUT: force(x,t) HISTORY: 2010-07-13 - Written - Bovy (NYU) """ if isinstance(Pot,list): sum= 0. for pot in Pot: sum+= pot.force(x,t=t) return sum elif isinstance(Pot,linearPotential): return Pot.force(x,t=t) else: raise PotentialError("Input to 'evaluateForces' is neither a linearPotential-instance or a list of such instances") def plotlinearPotentials(Pot,t=0.,min=-15.,max=15,ns=21,savefilename=None): """ NAME: plotlinearPotentials PURPOSE: plot a combination of potentials INPUT: t - time to evaluate potential at min - minimum x max - maximum x ns - grid in x savefilename - save to or restore from this savefile (pickle) OUTPUT: plot to output device HISTORY: 2010-07-13 - Written - Bovy (NYU) """ if not savefilename == None and os.path.exists(savefilename): print "Restoring savefile "+savefilename+" ..." savefile= open(savefilename,'rb') potx= pickle.load(savefile) xs= pickle.load(savefile) savefile.close() else: xs= nu.linspace(min,max,ns) potx= nu.zeros(ns) for ii in range(ns): potx[ii]= evaluatelinearPotentials(xs[ii],Pot,t=t) if not savefilename == None: print "Writing savefile "+savefilename+" ..." savefile= open(savefilename,'wb') pickle.dump(potx,savefile) pickle.dump(xs,savefile) savefile.close() return plot.bovy_plot(xs,potx, xlabel=r"$x/x_0$",ylabel=r"$\Phi(x)$", xrange=[min,max])
Python
0.999885
@@ -742,32 +742,50 @@ AttributeError: + #pragma: no cover %0A rai @@ -1259,16 +1259,34 @@ teError: + #pragma: no cover %0A
623a4d54d2bd15ada9efd82437ba6387dd1ba7b7
use shorter backoff function
gcloud_requests/requests_connection.py
gcloud_requests/requests_connection.py
import logging import requests import time from gcloud.datastore.connection import Connection as GCloudDatastoreConnection from gcloud.connection import Connection as GCloudConnection from gcloud.storage.connection import Connection as GCloudStorageConnection from threading import local logger = logging.getLogger(__file__) _state = local() class ResponseProxy(requests.structures.CaseInsensitiveDict): def __init__(self, response): super(ResponseProxy, self).__init__() self.response = response self.update(response.headers) self.update(status=str(self.status)) @property def status(self): return self.response.status_code class RequestsProxy(object): """Wraps a ``requests`` library :class:`.Session` instance and exposes a `request` method that is compatible with the ``httplib2`` `request` method. """ def __init__(self): # XXX: This is required for the proxy to have the correct shape. self.connections = {} def _request(self, uri, method="GET", body=None, headers=None, redirections=5, connection_type=None, retries=0): # NOTE: `retries` is the number of retries there have been so # far. It is passed in to/controlled by `_handle_response_error`. # XXX: Ensure we use one connection-pooling session per thread. session = getattr(_state, "session", None) if session is None: session = _state.session = requests.Session() logger.debug("Using session={!r}, retries={!r}.".format(session, retries)) response = session.request( method, uri, data=body, headers=headers, allow_redirects=redirections > 0, # XXX: The connect timeout is set to 3.05 based on a # recommendation in requests' docs and the read timeout is # arbitrary. timeout=(3.05, 7) ) if response.status_code >= 400: response = self._handle_response_error( response, retries, uri=uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type ) return ResponseProxy(response), response.content # NOTE: This instance method will get replaced with a decorated # version inside the connection object. The reason we keep both # around is so we can refer to the un-decorated version when # retrying requests. TODO: There is a small chance that some # retries may fail because of this due to an expired access token. request = _request def _handle_response_error(self, response, retries, **kwargs): """Provides a way for each connection wrapper to handle error responses. :param Response response: An instance of :class:`.requests.Response`. :param int retries: The number of times :meth:`.request` has been called so far. :param \**kwargs: The parameters with which :meth:`.request` was called. The `retries` parameter is excluded from `kwargs` intentionally. :returns: A :class:`.requests.Response`. """ return response class DatastoreRequestsProxy(RequestsProxy): """A Datastore-specific RequestsProxy that handles retries according to https://cloud.google.com/datastore/docs/concepts/errors. """ def _handle_response_error(self, response, retries, **kwargs): """Handles Datastore response errors according to their documentation. :param Response response: An instance of :class:`.requests.Response`. :param int retries: The number of times :meth:`.request` has been called so far. :param \**kwargs: The parameters with which :meth:`.request` was called. The `retries` parameter is excluded from `kwargs` intentionally. :returns: A :class:`.requests.Response`. .. [#] https://cloud.google.com/datastore/docs/concepts/errors """ content_type = response.headers.get("content-type", "") if "application/json" in content_type: json = response.json() reasons = [error["reason"] for error in json["errors"]] if "INVALID_ARGUMENT" in reasons or \ "PERMISSION_DENIED" in reasons or \ "RESOURCE_EXHAUSTED" in reasons or \ "FAILED_PRECONDITION" in reasons: return response if response.status_code == 500 and retries < 1 or \ response.status_code == 503 and retries < 5 or \ response.status_code == 403 and retries < 5 or \ response.status_code == 409 and retries < 3: backoff = min(2 ** retries, 5) logger.debug("Sleeping for %r before retrying failed request...", backoff) time.sleep(backoff) logger.debug("Retrying failed request...") # XXX: We need to make sure we unwrap the response before we # return back to the `request` method. response_proxy, _ = self._request(retries=retries + 1, **kwargs) return response_proxy.response return response class RequestsConnectionMixin(GCloudConnection): """This mixin injects itself into the MRO of any subclass of :class:`.GCloudConnection` and overwrites the :meth:`.http` property so that a :class:`.RequestsProxy` is used instead of an ``httplib2`` request object. """ REQUESTS_PROXY_CLASS = RequestsProxy REQUESTS_PROXY_KEY = "__requests_proxy__" @property def http(self): if not hasattr(self, self.REQUESTS_PROXY_KEY): setattr(self, self.REQUESTS_PROXY_KEY, self.REQUESTS_PROXY_CLASS()) self._http = getattr(self, self.REQUESTS_PROXY_KEY) if self._credentials: self._http = self._credentials.authorize(self._http) return self._http class DatastoreConnection( GCloudDatastoreConnection, RequestsConnectionMixin): "A datastore-compatible connection." REQUESTS_PROXY_CLASS = DatastoreRequestsProxy class StorageConnection( GCloudStorageConnection, RequestsConnectionMixin): "A Storage-compatible connection."
Python
0.000109
@@ -4801,16 +4801,25 @@ f = min( +0.0625 * 2 ** ret @@ -4824,17 +4824,19 @@ etries, -5 +1.0 )%0A
54563933a265a7c70adce3996d0a31eb9c915203
Use kwarg normally in piratepad.controllers.Form
addons/piratepad/controllers.py
addons/piratepad/controllers.py
from openobject.tools import expose from openerp.controllers import form from openerp.utils import rpc, common, TinyDict import cherrypy class Form(form.Form): _cp_path = "/piratepad/form" @expose('json', methods=('POST',)) def save(self, **kwargs): params, data = TinyDict.split(cherrypy.session['params']) pad_name=kwargs.get('pad_name') ctx = dict(rpc.session.context, default_res_model=params.model, default_res_id=params.id, active_id=False, active_ids=[]) pad_link = "http://piratepad.net/"+'-'.join(pad_name.split()) attachment_id = rpc.RPCProxy('ir.attachment').create({ 'name': pad_name, 'url': pad_link, }, ctx) return {'id': attachment_id, 'name': pad_name, 'url': pad_link}
Python
0
@@ -100,16 +100,8 @@ rpc, - common, Tin @@ -242,16 +242,16 @@ lf, -**kwargs +pad_name ):%0A @@ -319,48 +319,8 @@ '%5D)%0A - pad_name=kwargs.get('pad_name')%0A
8dfed1bb3070d5dde5a727c9d5739c6dc419069b
fix squeeze bug and some other refinement (#1874)
python/dllib/src/bigdl/dllib/utils/tf_utils.py
python/dllib/src/bigdl/dllib/utils/tf_utils.py
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import tempfile import tensorflow as tf import shutil from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.python.client import session from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.platform import gfile from bigdl.nn.layer import Model from bigdl.util.common import JTensor from bigdl.util.common import callBigDlFunc import os def get_path(output_name, sess=None): if sess is None: sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) temp = tempfile.mkdtemp() saver = tf.train.Saver() saver.save(sess, temp + '/model.chkp') tf.train.write_graph(sess.graph, temp, 'model.pbtxt') merge_checkpoint(temp + '/model.pbtxt', temp + '/model.chkp', [output_name], temp + '/model.pb', sess) return temp + '/model.pb' def convert(input_ops, output_ops, byte_order, bigdl_type): """ Convert tensorflow model to bigdl model :param input_ops: operation list used for input, should be placeholders :param output_ops: operations list used for output :param sess: current tensorflow session :return: bigdl model """ sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) input_names = map(lambda x: x.name.split(":")[0], input_ops) output_names = map(lambda x: x.name.split(":")[0], output_ops) temp = tempfile.mkdtemp() saver = tf.train.Saver() saver.save(sess, temp + '/model.chkp') tf.train.write_graph(sess.graph, temp, 'model.pbtxt') merge_checkpoint(temp + '/model.pbtxt', temp + '/model.chkp', output_names, temp + '/model.pb', sess) model = Model.load_tensorflow(temp + '/model.pb', input_names, output_names, byte_order, bigdl_type) try: shutil.rmtree(temp) except OSError as e: if e.errno != errno.ENOENT: raise return model def export_checkpoint(checkpoint_path): """ Export variable tensors from the checkpoint files. :param checkpoint_path: tensorflow checkpoint path :return: dictionary of tensor. The key is the variable name and the value is the numpy """ reader = tf.train.NewCheckpointReader(checkpoint_path) # Get tensor name list tensor_names = filter(lambda n: n!='global_step', reader.get_variable_to_shape_map().keys()) # Prepare key-value dictionary tensors = {} for tn in tensor_names: tensors[tn] = reader.get_tensor(tn) return tensors def save_variable_bigdl(tensors, target_path, bigdl_type="float"): """ Save a variable dictionary to a Java object file, so it can be read by BigDL :param tensors: tensor dictionary :param target_path: where is the Java object file store :param bigdl_type: model variable numeric type :return: nothing """ jtensors = {} for tn in tensors.keys(): jtensors[tn] = JTensor.from_ndarray(tensors[tn]) callBigDlFunc(bigdl_type, "saveTensorDictionary", jtensors, target_path) def dump_model(path, sess=None, graph=None, bigdl_type="float"): """ Dump a tensorflow model to files. The graph will be dumped to path/model.pb, and the checkpoint will be dumped to path/model.bin :param path: dump folder path :param sess: if user pass in session, we assume that the variable of the graph in the session has been inited :param graph: tensorflow graph. Default use the default graph of the session :param bigdl_type: model variable numeric type :return: nothing """ if not os.path.isdir(path): print("Folder " + path + " does not exist") raise if sess is None: sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) temp = tempfile.mkdtemp() # dump checkpoint to temp files checkpoint = temp + '/model.chkp' saver = tf.train.Saver() saver.save(sess, checkpoint) # generate bin files tensors = export_checkpoint(checkpoint) save_variable_bigdl(tensors, path + "/model.bin", bigdl_type) # dump grap to pb file graph = sess.graph if graph is None else graph with gfile.GFile(path + "/model.pb", "wb") as f: f.write(graph.as_graph_def().SerializeToString()) try: shutil.rmtree(temp) except OSError as e: if e.errno != errno.ENOENT: raise def merge_checkpoint(input_graph, checkpoint, output_node_names, output_graph, sess): """ Get the variable values from the checkpoint file, and merge them to the GraphDef file Args: input_graph: the GraphDef file, doesn't contain variable values checkpoint: the checkpoint file output_node_names: A list of string, the output names output_graph: String of the location and the name of the output graph """ restore_op_name = "save/restore_all" filename_tensor_name = "save/Const:0" input_graph_def = graph_pb2.GraphDef() with gfile.FastGFile(input_graph, "r") as f: text_format.Merge(f.read().decode("utf-8"), input_graph_def) for node in input_graph_def.node: node.device = "" importer.import_graph_def(input_graph_def, name="") sess.run([restore_op_name], {filename_tensor_name: checkpoint}) output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names, variable_names_blacklist="" ) with gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString())
Python
0
@@ -3627,24 +3627,47 @@ ing%0A %22%22%22%0A + import numpy as np%0A jtensors @@ -3702,16 +3702,156 @@ keys():%0A + if not isinstance(tensors%5Btn%5D, np.ndarray):%0A value = np.array(tensors%5Btn%5D)%0A else:%0A value = tensors%5Btn%5D%0A @@ -3882,35 +3882,29 @@ rom_ndarray( -tensors%5Btn%5D +value )%0A %0A
31aa44ef336c497be9f545c9bd4af64aac250748
Fix remote coverage execution
python/helpers/coverage_runner/run_coverage.py
python/helpers/coverage_runner/run_coverage.py
"""Coverage.py's main entrypoint.""" import os import sys bundled_coverage_path = os.getenv('BUNDLED_COVERAGE_PATH') if bundled_coverage_path: sys_path_backup = sys.path sys.path = [p for p in sys.path if p != bundled_coverage_path] from coverage.cmdline import main sys.path = sys_path_backup else: from coverage.cmdline import main coverage_file = os.getenv('PYCHARM_COVERAGE_FILE') coverage_file = coverage_file[0:-len(".coverage")] run_cov = os.getenv('PYCHARM_RUN_COVERAGE') if os.getenv('CREATE_TEMP_COVERAGE_FILE'): line = 'LOG: PyCharm: File mapping:%s\t%s\n' import tempfile (h, new_cov_file) = tempfile.mkstemp(prefix='pycharm-coverage') print(line%(coverage_file + ".coverage", new_cov_file + ".coverage")) print(line%(coverage_file + '.syspath.txt', new_cov_file + '.syspath.txt')) print(line%(coverage_file + '.xml', new_cov_file + '.xml')) coverage_file = new_cov_file + ".cov" if coverage_file: os.environ['COVERAGE_FILE'] = coverage_file + ".coverage" if run_cov: a_file = open(coverage_file + '.syspath.txt', mode='w') a_file.write(os.getcwd()+"\n") for path in sys.path: a_file.write(path + "\n") a_file.close() argv = [] for arg in sys.argv: if arg.startswith('-m'): argv.append('-m') argv.append(arg[2:]) else: argv.append(arg) sys.argv = argv cwd = os.getcwd() try: main() finally: if run_cov: os.chdir(cwd) main(["xml", "-o", coverage_file + ".xml", "--ignore-errors"])
Python
0.000001
@@ -932,17 +932,8 @@ file - + %22.cov%22 %0A%0Aif
1f6a154967ecd74c538f9ddda3f4a83018a6eef7
Attempt to fix iris_val_based_early_stopping test. Change: 127441610
tensorflow/examples/skflow/iris_val_based_early_stopping.py
tensorflow/examples/skflow/iris_val_based_early_stopping.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNClassifier for Iris plant dataset, with early stopping.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import datasets from sklearn import metrics from sklearn.cross_validation import train_test_split import tensorflow as tf from tensorflow.contrib import learn def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) x_train, x_val, y_train, y_val = train_test_split( x_train, y_train, test_size=0.2, random_state=42) val_monitor = learn.monitors.ValidationMonitor( x_val, y_val, early_stopping_rounds=200) # classifier with early stopping on training data classifier1 = learn.DNNClassifier( feature_columns=learn.infer_real_valued_columns_from_input(x_train), hidden_units=[10, 20, 10], n_classes=3, model_dir='/tmp/iris_model/') classifier1.fit(x=x_train, y=y_train, steps=2000) score1 = metrics.accuracy_score(y_test, classifier1.predict(x_test)) # classifier with early stopping on validation data, save frequently for # monitor to pick up new checkpoints. classifier2 = learn.DNNClassifier( feature_columns=learn.infer_real_valued_columns_from_input(x_train), hidden_units=[10, 20, 10], n_classes=3, model_dir='/tmp/iris_model_val/', config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1)) classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor]) score2 = metrics.accuracy_score(y_test, classifier2.predict(x_test)) # In many applications, the score is improved by using early stopping print('score1: ', score1) print('score2: ', score2) print('score2 > score1: ', score2 > score1) if __name__ == '__main__': tf.app.run()
Python
0.999225
@@ -796,16 +796,31 @@ nction%0A%0A +import shutil%0A%0A from skl @@ -986,16 +986,150 @@ learn%0A%0A%0A +def clean_folder(folder):%0A %22%22%22Cleans the given folder if it exists.%22%22%22%0A try:%0A shutil.rmtree(folder)%0A except OSError:%0A pass%0A%0A%0A def main @@ -1498,16 +1498,75 @@ s=200)%0A%0A + model_dir = '/tmp/iris_model'%0A clean_folder(model_dir)%0A%0A # clas @@ -1781,26 +1781,17 @@ dir= -'/tmp/iris_ model -/' +_dir )%0A @@ -1908,24 +1908,87 @@ t(x_test))%0A%0A + model_dir = '/tmp/iris_model_val'%0A clean_folder(model_dir)%0A%0A # classifi @@ -2262,30 +2262,17 @@ dir= -'/tmp/iris_ model_ -val/' +dir ,%0A
0f1a046b2c9de38fb73dbdef9b9c64845fd28147
Allow to filter marketplace offering by billable flag.
src/waldur_mastermind/marketplace/filters.py
src/waldur_mastermind/marketplace/filters.py
import json from django.db.models import Q import django_filters from django.utils.translation import ugettext_lazy as _ from django_filters.rest_framework import DjangoFilterBackend from rest_framework import exceptions as rf_exceptions from waldur_core.core import filters as core_filters from . import models class ServiceProviderFilter(django_filters.FilterSet): customer = core_filters.URLFilter(view_name='customer-detail', name='customer__uuid') customer_uuid = django_filters.UUIDFilter(name='customer__uuid') class Meta(object): model = models.ServiceProvider fields = [] class OfferingFilter(django_filters.FilterSet): name = django_filters.CharFilter(lookup_expr='icontains') customer = core_filters.URLFilter(view_name='customer-detail', name='customer__uuid') customer_uuid = django_filters.UUIDFilter(name='customer__uuid') allowed_customer_uuid = django_filters.UUIDFilter(name='customer__uuid', method='filter_allowed_customer') attributes = django_filters.CharFilter(name='attributes', method='filter_attributes') state = core_filters.MappedMultipleChoiceFilter( choices=[(representation, representation) for db_value, representation in models.Offering.States.CHOICES], choice_mappings={representation: db_value for db_value, representation in models.Offering.States.CHOICES}, ) category_uuid = django_filters.UUIDFilter(name='category__uuid') o = django_filters.OrderingFilter(fields=('name', 'created')) def filter_allowed_customer(self, queryset, name, value): return queryset.filter(Q(shared=True) | Q(customer__uuid=value) | Q(allowed_customers__uuid=value)) def filter_attributes(self, queryset, name, value): try: value = json.loads(value) except ValueError: raise rf_exceptions.ValidationError(_('Filter attribute is not valid json.')) if not isinstance(value, dict): raise rf_exceptions.ValidationError(_('Filter attribute should be an dict.')) for k, v in value.items(): if isinstance(v, list): # If a filter value is a list, use multiple choice. queryset = queryset.filter(**{'attributes__{key}__has_any_keys'.format(key=k): v}) else: queryset = queryset.filter(attributes__contains={k: v}) return queryset class Meta(object): model = models.Offering fields = ['shared', 'type'] class OfferingCustomersFilterBackend(DjangoFilterBackend): def filter_queryset(self, request, queryset, view): return queryset.filter_for_user(request.user) class ScreenshotFilter(django_filters.FilterSet): offering = core_filters.URLFilter(view_name='marketplace-offering-detail', name='offering__uuid') offering_uuid = django_filters.UUIDFilter(name='offering__uuid') o = django_filters.OrderingFilter(fields=('name', 'created')) class Meta(object): model = models.Screenshot fields = [] class OrderFilter(django_filters.FilterSet): project = core_filters.URLFilter(view_name='project-detail', name='project__uuid') project_uuid = django_filters.UUIDFilter(name='project__uuid') state = core_filters.MappedMultipleChoiceFilter( choices=[(representation, representation) for db_value, representation in models.Order.States.CHOICES], choice_mappings={representation: db_value for db_value, representation in models.Order.States.CHOICES}, ) o = django_filters.OrderingFilter(fields=('created', 'approved_at', 'total_cost', 'state')) class Meta(object): model = models.Order fields = [] class OrderItemFilter(django_filters.FilterSet): offering = core_filters.URLFilter(view_name='marketplace-offering-detail', name='offering__uuid') offering_uuid = django_filters.UUIDFilter(name='offering__uuid') project_uuid = django_filters.UUIDFilter(name='order__project__uuid') category_uuid = django_filters.UUIDFilter(name='offering__category__uuid') provider_uuid = django_filters.UUIDFilter(name='offering__customer__uuid') customer_uuid = django_filters.UUIDFilter(name='order__project__customer__uuid') state = core_filters.MappedMultipleChoiceFilter( choices=[(representation, representation) for db_value, representation in models.OrderItem.States.CHOICES], choice_mappings={representation: db_value for db_value, representation in models.OrderItem.States.CHOICES}, ) order = core_filters.URLFilter(view_name='marketplace-order-detail', name='order__uuid') order_uuid = django_filters.UUIDFilter(name='order__uuid') class Meta(object): model = models.OrderItem fields = [] class ResourceFilter(django_filters.FilterSet): offering = core_filters.URLFilter(view_name='marketplace-offering-detail', name='offering__uuid') offering_uuid = django_filters.UUIDFilter(name='offering__uuid') project_uuid = django_filters.UUIDFilter(name='project__uuid') customer_uuid = django_filters.UUIDFilter(name='project__customer__uuid') category_uuid = django_filters.UUIDFilter(name='offering__category__uuid') provider_uuid = django_filters.UUIDFilter(name='offering__customer__uuid') state = core_filters.MappedMultipleChoiceFilter( choices=[(representation, representation) for db_value, representation in models.Resource.States.CHOICES], choice_mappings={representation: db_value for db_value, representation in models.Resource.States.CHOICES}, ) class Meta(object): model = models.Resource fields = [] class ResourceScopeFilterBackend(core_filters.GenericKeyFilterBackend): def get_related_models(self): return [] def get_field_name(self): return 'scope' class PlanFilter(django_filters.FilterSet): offering = core_filters.URLFilter(view_name='marketplace-offering-detail', name='offering__uuid') offering_uuid = django_filters.UUIDFilter(name='offering__uuid') class Meta(object): model = models.Plan fields = []
Python
0
@@ -177,16 +177,65 @@ Backend%0A +from django_filters.widgets import BooleanWidget%0A from res @@ -1483,24 +1483,90 @@ ory__uuid')%0A + billable = django_filters.BooleanFilter(widget=BooleanWidget)%0A o = djan
150d3b5a8eebf69e22432406fe4a8217997995dc
Fix a typo
libdiscid/compat/discid.py
libdiscid/compat/discid.py
# -*- coding: utf-8 -*- # Copyright 2013 Sebastian Ramacher <sebastian+dev@ramacher.at> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the “Software”), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ python-discid compat layer This module provides a compatible layer so that python-libdiscid can be used as a replacement for python-discid. It provides an interface compatible with python-discid version 1.0.2. """ from __future__ import division import libdiscid import libdiscid.discid import operator import functools _INVERSE_FEATURES= { libdiscid.FEATURES_MAPPING[libdiscid.FEATURE_READ]: libdiscid.FEATURE_READ, libdiscid.FEATURES_MAPPING[libdiscid.FEATURE_MCN]: libdiscid.FEATURE_MCN, libdiscid.FEATURES_MAPPING[libdiscid.FEATURE_ISRC]: libdiscid.FEATURE_ISRC } class _NoneHelper(object): def __getattr__(self, name): if name in ('id', 'freedb_id', 'submission_url', 'first_track', 'last_track', 'sectors', 'mcn'): return None return super(NoneHelper, self).__getattr__(name) def _sectors_to_seconds(sectors): SECTORS_PER_SECOND = 75 remainder = sectors % SECTORS_PER_SECOND return sectors // SECTORS_PER_SECOND + \ (1 if remainder > SECTORS_PER_SECOND // 2 else 0) def _decode(string): # Let's do the same thing discid is doing. It always accepts both strings and # unicode objects and encodes/decodes them as it sees fit. libdiscid always # wants unicode objects, so let's handle this. try: return string.decode() except AttributeError: return string # exceptions defined in discid DiscError = libdiscid.discid.DiscError class TOCError(Exception): pass # classes defined in discid class Track(object): def __init__(self, disc, number): self.disc = disc self.number = number def __str__(self): return str(self.number) @property def offset(self): return self.disc.track_offsets[self.number - self.disc.first_track] @property def sectors(self): return self.disc.track_lengths[self.number - self.disc.first_track] length = sectors @property def seconds(self): return _sectors_to_seconds(self.sectors) @property def isrc(self): try: value = self.disc.track_isrcs[self.number - self.disc.first_track] except NotImplementedError: return None return value if value != '' else None class Disc(object): def __init__(self): self.disc = _NoneHelper() self.tracks = [] def read(self, device, features=[]): self.disc = libdiscid.read(device, functools.reduce( operator.or_, (_INVERSE_FEATURES[feature] for feature in FEATURES), 0)) self.tracks = [ Track(self.disc, numb) for numb in range(self.disc.first_track, self.disc.last_track + 1)] return True def put(self, first, last, disc_sectors, track_offsets): try: self.disc = libdiscid.put(first, last, disc_sectors, track_offsets) except DiscError as disc_error: raise TOCError(str(disc_error)) self.tracks = [ Track(self.disc, num) for num in range(self.disc.first_track, self.disc.last_track + 1)] return True @property def id(self): return self.disc.id @property def freedb_id(self): return self.disc.freedb_id @property def submission_url(self): return self.disc.submission_url @property def first_track_num(self): return self.disc.first_track @property def last_track_num(self): return self.disc.last_track @property def sectors(self): return self.disc.sectors length = sectors @property def seconds(self): return _sectors_to_seconds(self.sectors) if self.sectors is not None \ else None @property def mcn(self): try: value = self.disc.mcn except NotImplementedError: return None return value if value != '' else None # functions defined in discid get_default_device = libdiscid.default_device def read(device=None, features=[]): disc = Disc() disc.read(_decode(device), map(_decode, features)) return disc def put(first, last, disc_sectors, track_offsets): disc = Disc() disc.put(first, last, disc_sectors, track_offsets) return disc # constants defined in discid __version__ = '1.0.2 (compat layer from python-discid %s)' % \ (libdiscid.__version__, ) """ This is the version of python-discid this layer is compatible with. """ LIBDISCID_VERSION_STRING = libdiscid.__discid_version__ FEATURES = libdiscid.FEATURES FEATURES_IMPLEMENTED = (libdiscid.FEATURE_READ, libdiscid.FEATURE_MCN, libdiscid.FEATURE_ISRC)
Python
0.999999
@@ -1942,16 +1942,17 @@ n super( +_ NoneHelp
c109728986a3a583fe037780c88bdaa458e663c4
Bump 2.1.1
appium/version.py
appium/version.py
version = '2.1.0'
Python
0.000011
@@ -12,7 +12,7 @@ 2.1. -0 +1 '%0A
28f3564470ec4f8df0bf4d2a90102489bef76bfe
Add a notification for raids, so we can do stuff.
apps/api/views.py
apps/api/views.py
# -*- coding: utf-8 -*- from django.conf import settings from django.shortcuts import get_object_or_404 from rest_framework import views, viewsets from rest_framework.response import Response from socketIO_client import SocketIO from apps.broadcasts.models import Broadcast, Host, Raid, Series from apps.games.models import Game, Platform from apps.quotes.models import Quote from apps.subscribers.models import Ticket from .serializers import (BroadcastSerializer, GameSerializer, HostSerializer, PlatformSerializer, QuoteSerializer, RaidSerializer, SeriesSerializer, TicketSerializer) def notify(event, data): data = data.copy() data['event'] = event with SocketIO('socket.avalonstar.tv') as socketIO: socketIO.emit('event sent', data) socketIO.wait(seconds=1) class BroadcastViewSet(viewsets.ReadOnlyModelViewSet): queryset = Broadcast.objects.order_by('-number') serializer_class = BroadcastSerializer class GameViewSet(viewsets.ReadOnlyModelViewSet): queryset = Game.objects.all() serializer_class = GameSerializer class PlatformViewSet(viewsets.ReadOnlyModelViewSet): queryset = Platform.objects.all() serializer_class = PlatformSerializer class QuoteViewSet(viewsets.ModelViewSet): queryset = Quote.objects.all() serializer_class = QuoteSerializer def retrieve(self, request, pk=None): if pk == '0': quote = Quote.objects.order_by('?').first() serializer = QuoteSerializer(quote) return Response(serializer.data) else: return super().retrieve(request, pk) class HostViewSet(viewsets.ModelViewSet): queryset = Host.objects.order_by('-timestamp') serializer_class = HostSerializer def create(self, request, *args, **kwargs): notify('host', {'username': request.data['username']}) return super().create(request, *args, **kwargs) class RaidViewSet(viewsets.ModelViewSet): queryset = Raid.objects.order_by('-timestamp') serializer_class = RaidSerializer class TicketViewSet(viewsets.ModelViewSet): queryset = Ticket.objects.order_by('-updated') serializer_class = TicketSerializer def create(self, request, *args, **kwargs): # TODO: Somehow sync the use of "name" and "username" across methods. notify('subscription', {'username': request.data['name']}) return super().create(request, *args, **kwargs) def retrieve(self, request, pk=None): queryset = Ticket.objects.all() ticket = get_object_or_404(queryset, name=pk) serializer = TicketSerializer(ticket) return Response(serializer.data) def update(self, request, pk=None): data = request.data.copy() queryset = Ticket.objects.all() ticket = get_object_or_404(queryset, name=pk) data['name'] = ticket.name serializer = TicketSerializer(ticket, data=data) serializer.is_valid(raise_exception=True) self.perform_update(serializer) # If 'streak' is included in the payload, then we consider it a # "substreak" and should notify() as such. if 'streak' in request.data: notify('substreak', { 'length': data['streak'], 'username': ticket.name}) else: notify('resubscription', {'username': ticket.name}) return Response(serializer.data) class PusherDonationView(views.APIView): def post(self, request): notify('donation', request.data) return Response(status=202) class PusherHostView(views.APIView): def post(self, request): notify('host', request.data) return Response(status=202) class PusherResubscriptionView(views.APIView): def post(self, request): notify('resubscription', request.data) return Response(status=202) class PusherSubscriptionView(views.APIView): def post(self, request): notify('subscription', request.data) return Response(status=202) class PusherSubstreakView(views.APIView): def post(self, request): notify('substreak', request.data) return Response(status=202)
Python
0
@@ -2038,24 +2038,192 @@ Serializer%0A%0A + def create(self, request, *args, **kwargs):%0A notify('raid', %7B'username': request.data%5B'username'%5D%7D)%0A return super().create(request, *args, **kwargs)%0A%0A %0Aclass Ticke
05140304c1ef08e7e291eec92de4091320bdfc0e
Add acceleration to example
encoder/examples/encoder_lcd.py
encoder/examples/encoder_lcd.py
# -*- coding: utf-8 -*- """Read encoder and print position value to LCD.""" from machine import sleep_ms from pyb_encoder import Encoder from hd44780 import HD44780 class STM_LCDShield(HD44780): _default_pins = ('PD2','PD1','PD6','PD5','PD4','PD3') def main(): lcd.set_string("Value: ") lastval = 0 while True: val = enc.value if lastval != val: lastpos = val lcd.set_cursor(6, 0) for c in "%3i" % val: lcd.send_byte(c) sleep_ms(50) if __name__ == '__main__': lcd = STM_LCDShield() enc = Encoder('A0', 'A1', max_value=999) main()
Python
0
@@ -504,16 +504,74 @@ yte(c)%0A%0A + enc.cur_accel = max(0, enc.cur_accel - enc.accel)%0A @@ -681,16 +681,25 @@ alue=999 +, accel=5 )%0A ma
2268ebdc47b1d9221c06622a7b1992cae14013c2
Test endpoint for the web server
web/server.py
web/server.py
import http.client import os from flask import Flask from pymongo import MongoClient MONGO_URL = os.environ.get('MONGO_URL', 'mongodb://mongo:27017/') MONGO_DATABASE = os.environ.get('MONGO_DATABASE', 'whistleblower') DATABASE = MongoClient(MONGO_URL)[MONGO_DATABASE] app = Flask(__name__) @app.route('/facebook_webhook', methods=['POST']) def facebook_webhook(): DATABASE.facebook_webhook.insert(request.form) return ('', http.client.NO_CONTENT)
Python
0
@@ -287,16 +287,80 @@ me__)%0A%0A%0A +@app.route('/')%0Adef hello_world():%0A return 'Hello, World!'%0A%0A%0A @app.rou
db40a42c2825b157017e6730a2b5c95371bbe598
Allow user to adjust nyquist freq and freq spacing in cp_utils.py
arfit/cp_utils.py
arfit/cp_utils.py
import carmcmc as cm from gatspy.periodic import LombScargleFast import matplotlib.pyplot as plt import numpy as np def csample_from_files(datafile, chainfile, p, q): data = np.loadtxt(datafile) times, tind = np.unique(data[:,0], return_index=True) data = data[tind, :] chain = np.loadtxt(chainfile) assert chain.shape[1] == p + q + 5, 'dimension mismatch' return cm.CarmaSample(data[:,0], data[:,1], data[:,2], None, q=q, trace=chain[:,:-2], loglike=chain[:,-2], logpost=chain[:,-1]) def normalised_lombscargle(ts, ys, dys): model = LombScargleFast().fit(ts, ys, dys) T = np.max(ts)-np.min(ts) dts = np.diff(np.sort(ts)) fny = 1.0/(2.0*np.min(dts)) df = 1.0/T N = fny/df fs = np.linspace(df, fny, N) pows = model.score_frequency_grid(df, df, N) mu = 1.0/T*np.trapz(ys, ts) s2 = 1.0/T*np.trapz(np.square(ys-mu), ts) return fs, s2*pows/np.trapz(pows, fs) def plot_psd_sample_data(sample): psd_low, psd_high, psd_med, fs = sample.plot_power_spectrum(doShow=False) plt.clf() plt.loglog(fs, psd_med, '-b', alpha=0.33) plt.fill_between(fs, psd_low, psd_high, color='b', alpha=0.17) fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig) bw = fs[-1] - fs[0] T = sample.time[-1] - sample.time[0] s2 = 1/T*np.trapz(np.square(sample.ysig), sample.time) noise_level = s2/bw levels = noise_level*np.sqrt(sample.get_samples('measerr_scale')) plt.axhline(np.median(levels), color='g', alpha=0.33) plt.fill_between(fs, np.percentile(levels, 84)+0*fs, np.percentile(levels, 16)+0*fs, color='g', alpha=0.17) plt.loglog(fs, psd, '-r', alpha=0.33) def plot_psd_sample_draw(sample, loc='upper left'): fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig) ys_draw = sample.predict(sample.time, bestfit='random')[0] fs, dpsd = normalised_lombscargle(sample.time, ys_draw, sample.ysig) plt.loglog(fs, psd, '-k', label='Data', alpha=0.5) plt.loglog(fs, dpsd, '-b', label='Prediction', alpha=0.5) plt.legend(loc=loc)
Python
0
@@ -549,16 +549,50 @@ ys, dys +, oversampling=5, nyquist_factor=3 ):%0A m @@ -642,211 +642,151 @@ -T = np.max(ts)-np.min(ts)%0A dts = np.diff(np.sort(ts))%0A%0A fny = 1.0/(2.0*np.min(dts) +pers, pows = model.periodogram_auto(oversampling=oversampling, nyquist_factor=nyquist_factor )%0A -d f +s = 1.0/ -T%0A%0A N = fny/df +pers %0A%0A -fs +T = np. -linspace(df, fny, N)%0A%0A pows = model.score_frequency_grid(df, df, N +max(ts) - np.min(ts )%0A%0A @@ -790,26 +790,24 @@ %0A%0A mu = 1 -.0 /T*np.trapz( @@ -824,18 +824,16 @@ s2 = 1 -.0 /T*np.tr @@ -934,16 +934,50 @@ a(sample +, oversampling=5, nyquist_factor=3 ):%0A p @@ -1246,24 +1246,82 @@ sample.ysig +, oversampling=oversampling, nyquist_factor=nyquist_factor )%0A%0A bw = @@ -1793,16 +1793,50 @@ er left' +, oversampling=5, nyquist_factor=3 ):%0A f @@ -1897,24 +1897,82 @@ sample.ysig +, oversampling=oversampling, nyquist_factor=nyquist_factor )%0A%0A ys_dr @@ -2097,16 +2097,74 @@ ple.ysig +, oversampling=oversampling, nyquist_factor=nyquist_factor )%0A%0A p
762397854e90d0288081f0741fb742024217be8f
fix for a bug reported by Kirill (unknown charset '8859-1')
lib/request/basic.py
lib/request/basic.py
#!/usr/bin/env python """ $Id$ Copyright (c) 2006-2010 sqlmap developers (http://sqlmap.sourceforge.net/) See the file 'doc/COPYING' for copying permission """ import codecs import gzip import os import re import StringIO import zlib from lib.core.common import extractErrorMessage from lib.core.common import extractRegexResult from lib.core.common import getCompiledRegex from lib.core.common import getUnicode from lib.core.common import isWindowsDriveLetterPath from lib.core.common import posixToNtSlashes from lib.core.common import sanitizeAsciiString from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.settings import LIST_EMAIL from lib.core.settings import META_CHARSET_REGEX from lib.core.settings import DEFAULT_PAGE_ENCODING from lib.core.settings import UNICODE_ENCODING from lib.parse.headers import headersParser from lib.parse.html import htmlParser def forgeHeaders(cookie, ua, referer): """ Prepare HTTP Cookie, HTTP User-Agent and HTTP Referer headers to use when performing the HTTP requests """ headers = {} for header, value in conf.httpHeaders: if cookie and header == "Cookie": headers[header] = cookie elif ua and header == "User-Agent": headers[header] = ua elif referer and header == "Referer": headers[header] = referer else: headers[header] = value if kb.redirectSetCookie: if "Cookie" in headers: headers["Cookie"] = "%s; %s" % (headers["Cookie"], kb.redirectSetCookie) else: headers["Cookie"] = kb.redirectSetCookie return headers def parseResponse(page, headers): """ @param page: the page to parse to feed the knowledge base htmlFp (back-end DBMS fingerprint based upon DBMS error messages return through the web application) list and absFilePaths (absolute file paths) set. """ if headers: headersParser(headers) if page: htmlParser(page) # Detect injectable page absolute system path # NOTE: this regular expression works if the remote web # application is written in PHP and debug/error messages are # enabled for regex in ( r" in <b>(?P<result>.*?)</b> on line", r"(?:>|\s)(?P<result>[A-Za-z]:[\\/][\w.\\/]*)", r"(?:>|\s)(?P<result>/\w[/\w.]+)" ): regObj = getCompiledRegex(regex) for match in regObj.finditer(page): absFilePath = match.group("result").strip() page = page.replace(absFilePath, "") if isWindowsDriveLetterPath(absFilePath): absFilePath = posixToNtSlashes(absFilePath) if absFilePath not in kb.absFilePaths: kb.absFilePaths.add(absFilePath) def checkCharEncoding(encoding): if encoding: encoding = encoding.lower() else: return encoding # http://www.destructor.de/charsets/index.htm translate = { 'windows-874': 'iso-8859-11', 'en_us': 'utf8' } for delimiter in (';', ','): if delimiter in encoding: encoding = encoding[:encoding.find(delimiter)] # http://philip.html5.org/data/charsets-2.html if encoding in translate: encoding = translate[encoding] elif encoding.startswith('iso-8858'): #very popular typo encoding = encoding.replace('8858', '8859') elif encoding.startswith('cp-'): encoding = 'cp%s' % encoding[3:] elif encoding.startswith('windows') and not encoding.startswith('windows-'): encoding = 'windows-%s' % encoding[7:] elif encoding == 'null': return None # http://www.iana.org/assignments/character-sets try: codecs.lookup(encoding) except LookupError: warnMsg = "unknown charset '%s'. " % encoding warnMsg += "Please report by e-mail to %s." % LIST_EMAIL logger.warn(warnMsg) encoding = UNICODE_ENCODING return encoding def decodePage(page, contentEncoding, contentType): """ Decode compressed/charset HTTP response """ if not page or (conf.nullConnection and len(page) < 2): return getUnicode(page) if isinstance(contentEncoding, basestring) and contentEncoding.lower() in ('gzip', 'x-gzip', 'deflate'): if contentEncoding == 'deflate': # http://stackoverflow.com/questions/1089662/python-inflate-and-deflate-implementations data = StringIO.StringIO(zlib.decompress(page, -15)) else: data = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(page)) page = data.read() charset = None # http://stackoverflow.com/questions/1020892/python-urllib2-read-to-unicode if contentType and (contentType.find('charset=') != -1): charset = contentType.split('charset=')[-1] elif extractRegexResult(META_CHARSET_REGEX, page, re.DOTALL | re.IGNORECASE): charset = extractRegexResult(META_CHARSET_REGEX, page, re.DOTALL | re.IGNORECASE) charset = checkCharEncoding(charset) kb.pageEncoding = charset or DEFAULT_PAGE_ENCODING if contentType and any(map(lambda x: x in contentType.lower(), ('text/txt', 'text/raw', 'text/html', 'text/xml'))): # can't do for all responses because we need to support binary files too page = getUnicode(page, kb.pageEncoding) return page def processResponse(page, responseHeaders): parseResponse(page, responseHeaders) if conf.parseErrors: msg = extractErrorMessage(page) if msg: logger.info("parsed error message: '%s'" % msg) return page
Python
0
@@ -3196,24 +3196,206 @@ er)%5D%0A%0A # +popular typos/errors%0A if '8858' in encoding:%0A encoding = encoding.replace('8858', '8859')%0A if encoding.startswith('8859'):%0A encoding = 'iso-%25s' %25 encoding%0A%0A # http://phili @@ -3499,121 +3499,8 @@ ing%5D -%0A elif encoding.startswith('iso-8858'): #very popular typo%0A encoding = encoding.replace('8858', '8859') %0A
5169175c8e6d88e67849006d976e4d3967e113c5
fix pep8 error
rasa_nlu/extractors/duckling_http_extractor.py
rasa_nlu/extractors/duckling_http_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import os import time import requests import simplejson from typing import Any from typing import List from typing import Optional from typing import Text from rasa_nlu.config import RasaNLUModelConfig from rasa_nlu.extractors import EntityExtractor from rasa_nlu.extractors.duckling_extractor import ( filter_irrelevant_matches, convert_duckling_format_to_rasa) from rasa_nlu.model import Metadata from rasa_nlu.training_data import Message logger = logging.getLogger(__name__) class DucklingHTTPExtractor(EntityExtractor): """Searches for structured entites, e.g. dates, using a duckling server.""" name = "ner_duckling_http" provides = ["entities"] defaults = { # by default all dimensions recognized by duckling are returned # dimensions can be configured to contain an array of strings # with the names of the dimensions to filter for "dimensions": None, # http url of the running duckling server "url": None, # locale - if not set, we will use the language of the model "locale": None, # timezone like Europe/Berlin # if not set the default timezone of Duckling is going to be used "timezone": None } def __init__(self, component_config=None, language=None): # type: (Text, Optional[List[Text]]) -> None super(DucklingHTTPExtractor, self).__init__(component_config) self.language = language @classmethod def create(cls, config): # type: (RasaNLUModelConfig) -> DucklingHTTPExtractor return DucklingHTTPExtractor(config.for_component(cls.name, cls.defaults), config.language) def _locale(self): if not self.component_config.get("locale"): # this is king of a quick fix to generate a proper locale # works most of the time locale_fix = "{}_{}".format(self.language, self.language.upper()) self.component_config["locale"] = locale_fix return self.component_config.get("locale") def _url(self): """Return url of the duckling service. Environment var will override.""" if os.environ.get("RASA_DUCKLING_HTTP_URL"): return os.environ["RASA_DUCKLING_HTTP_URL"] return self.component_config.get("url") def _payload(self, text, reference_time): return { "text": text, "locale": self._locale(), "tz": self.component_config.get("timezone"), "reftime": reference_time } def _duckling_parse(self, text, reference_time): """Sends the request to the duckling server and parses the result.""" try: payload = self._payload(text, reference_time) headers = {"Content-Type": "application/x-www-form-urlencoded; " "charset=UTF-8"} response = requests.post(self._url() + "/parse", data=payload, headers=headers) if response.status_code == 200: return simplejson.loads(response.text) else: logger.error("Failed to get a proper response from remote " "duckling. Status Code: {}. Response: {}" "".format(response.status_code, response.text)) return [] except requests.exceptions.ConnectionError as e: logger.error("Failed to connect to duckling http server. Make sure " "the duckling server is running and the proper host " "and port are set in the configuration. More " "information on how to run the server can be found on " "github: " "https://github.com/facebook/duckling#quickstart " "Error: {}".format(e)) return [] @staticmethod def _reference_time_from_message(message): if message.time is not None: try: return int(message.time)*1000 except ValueError as e: logging.warning("Could not parse timestamp {}. Instead " "current UTC time will be passed to " "duckling. Error: {}".format(message.time, e)) # fallbacks to current time, multiplied by 1000 because duckling # requires the reftime in miliseconds return int(time.time())*1000 def process(self, message, **kwargs): # type: (Message, **Any) -> None if self._url() is not None: reference_time = self._reference_time_from_message(message) matches = self._duckling_parse(message.text, reference_time) dimensions = self.component_config["dimensions"] relevant_matches = filter_irrelevant_matches(matches, dimensions) extracted = convert_duckling_format_to_rasa(relevant_matches) else: extracted = [] logger.warning("Duckling HTTP component in pipeline, but no " "`url` configuration in the config " "file nor is `RASA_DUCKLING_HTTP_URL` " "set as an environment variable.") extracted = self.add_extractor_name(extracted) message.set("entities", message.get("entities", []) + extracted, add_to_output=True) @classmethod def load(cls, model_dir=None, # type: Text model_metadata=None, # type: Metadata cached_component=None, # type: Optional[DucklingHTTPExtractor] **kwargs # type: **Any ): # type: (...) -> DucklingHTTPExtractor component_config = model_metadata.for_component(cls.name) return cls(component_config, model_metadata.get("language"))
Python
0.000001
@@ -4357,17 +4357,19 @@ ge.time) -* + * 1000%0A @@ -4773,17 +4773,19 @@ .time()) -* + * 1000%0A%0A
e76c6be06e0bfcb0f2e0ac2876dee46c6236ea6c
mark developer help function
ports/pyqt-webengine/next-pyqt.py
ports/pyqt-webengine/next-pyqt.py
import logging import utility import buffers import window # from dbus.mainloop.glib import DBusGMainLoop import dbus import dbus.service from dbus.mainloop.pyqt5 import DBusQtMainLoop from PyQt5.QtWidgets import QApplication logging.basicConfig(level=logging.INFO) """ This is a Next port with Qt's Web Engine, through PyQt. It is possible to test this from the Python or the Lisp REPL. To send commands to the web engine from Lisp: - start the PyQt port (make run) - start lisp, quickload next - start and initialize the lisp side: (in-package :next) (start) this creates an `*interface*` object. Now you can use any built-in methods such as (window-make *interface*). """ PLATFORM_PORT_OBJECT_PATH = "/engineer/atlas/next/platform" PLATFORM_PORT_NAME = "engineer.atlas.next.platform" class DBusWindow(dbus.service.Object): # lisp core dbus proxy. core_dbus_proxy = None def __init__(self, conn, object_path=PLATFORM_PORT_OBJECT_PATH, core_dbus_proxy=None): dbus.service.Object.__init__(self, conn, object_path) self.core_dbus_proxy = core_dbus_proxy @dbus.service.method(PLATFORM_PORT_NAME, in_signature='s') def window_make(self, window_id): return window.make(window_id) @dbus.service.method(PLATFORM_PORT_NAME) def window_set_title(self, window_id, title): _window = window.get_window(window_id) return _window.set_title(title) @dbus.service.method(PLATFORM_PORT_NAME, in_signature='s') def window_delete(self, window_id): _window = window.get_window(window_id) return _window.delete(window_id) @dbus.service.method(PLATFORM_PORT_NAME) def window_killall(self): return window.killall() @dbus.service.method(PLATFORM_PORT_NAME) def window_active(self): pass @dbus.service.method(PLATFORM_PORT_NAME, in_signature='s') def window_exists(self, window_id): return window.exists(window_id) @dbus.service.method(PLATFORM_PORT_NAME) def window_list(self): return window.list_windows() @dbus.service.method(PLATFORM_PORT_NAME, in_signature='ss') def window_set_active_buffer(self, window_id, buffer_id): _window = window.get_window(window_id) _buffer = buffers.get_buffer(buffer_id) return _window.set_active_buffer(_buffer) @dbus.service.method(PLATFORM_PORT_NAME, in_signature='si') def window_set_minibuffer_height(self, window_id, height): _window = window.get_window(window_id) _window.set_minibuffer_height(height) @dbus.service.method(PLATFORM_PORT_NAME, in_signature='s') def buffer_make(self, buffer_id): return buffers.make(buffer_id) @dbus.service.method(PLATFORM_PORT_NAME) def buffer_delete(self, buffer_id): pass @dbus.service.method(PLATFORM_PORT_NAME) def buffer_load(self, buffer_id, url): _buffer = buffers.get_buffer(buffer_id) _buffer.load(url) @dbus.service.method(PLATFORM_PORT_NAME) def buffer_evaluate_javascript(self, buffer_id, script): _buffer = buffers.get_buffer(buffer_id) return _buffer.evaluate_javascript(script) @dbus.service.method(PLATFORM_PORT_NAME, in_signature='ss') def minibuffer_evaluate_javascript(self, window_id, script): _window = window.get_window(window_id) _window.minibuffer_evaluate_javascript(script) @dbus.service.method(PLATFORM_PORT_NAME) def generate_input_event(self): utility.generate_input_event() def main(): app = QApplication([]) DBusQtMainLoop(set_as_default=True) # DBusGMainLoop(set_as_default=True) session_bus = dbus.SessionBus() # name/dbuswindow MUST be defined even if not used. name = dbus.service.BusName('engineer.atlas.next.platform', session_bus) # noqa: F841 CORE_INTERFACE = "engineer.atlas.next.core" CORE_OBJECT_PATH = "/engineer/atlas/next/core" core_dbus_proxy = session_bus.get_object(CORE_INTERFACE, CORE_OBJECT_PATH) dbuswindow = DBusWindow(session_bus, core_dbus_proxy=core_dbus_proxy) # noqa: F841 window.make("0", core_dbus_proxy) app.exec_() if __name__ == '__main__': main()
Python
0.000001
@@ -1602,32 +1602,63 @@ ete(window_id)%0A%0A + # DEVELOPER HELP FUNCTION%0A @dbus.servic @@ -1731,22 +1731,23 @@ return -window +utility .killall
4fd80a9a593a4f9100899e96a383782c68a41af1
Fix to subtract USDT withdrawals from balance
poloniex_apis/api_models/deposit_withdrawal_history.py
poloniex_apis/api_models/deposit_withdrawal_history.py
from collections import defaultdict from poloniex_apis.api_models.ticker_price import TickerData class DWHistory: def __init__(self, history): self.withdrawals = defaultdict(float) self.deposits = defaultdict(float) self.history = history def get_dw_history(self): for deposit in self.history['deposits']: if deposit['currency'] in self.deposits: self.deposits[deposit['currency']] += float(deposit['amount']) else: self.deposits[deposit['currency']] = float(deposit['amount']) for withdrawal in self.history['withdrawals']: if withdrawal['currency'] in self.withdrawals: self.withdrawals[withdrawal['currency']] += float(withdrawal['amount']) else: self.withdrawals[withdrawal['currency']] = float(withdrawal['amount']) return self.deposits, self.withdrawals def get_btc_balance(self, ticker): balance = 0 for deposit_symbol, amount in self.deposits.items(): if deposit_symbol == u"USDT": balance += amount * ticker.get_price("USDT_BTC") if deposit_symbol != u'BTC': balance += amount * ticker.get_price("BTC_" + deposit_symbol) else: balance += amount for withdrawal_symbol, amount in self.withdrawals.items(): if withdrawal_symbol == u"USDT": balance += amount * ticker.get_price("USDT_BTC") if withdrawal_symbol != u'BTC': balance -= amount * ticker.get_price("BTC_" + withdrawal_symbol) else: balance -= amount return balance
Python
0.000007
@@ -1452,33 +1452,33 @@ balance -+ +- = amount * ticke
1e16048c7ceb50377fdfdda3a39ef9910d2021bb
Bump version to 0.2
wagtail_simple_gallery/__init__.py
wagtail_simple_gallery/__init__.py
__version__ = '0.1'
Python
0.000001
@@ -14,6 +14,6 @@ '0. -1 +2 '
f5f33b7d7ae8a6eefe60d826c06fbbf67de95861
Make html protected
lightning/visualization.py
lightning/visualization.py
import requests import json import webbrowser class Visualization(object): def __init__(self, session=None, json=None, auth=None): self.session = session self.id = json.get('id') self.auth = auth if self.session.lgn.ipython_enabled: from IPython.kernel.comm import Comm self.comm = Comm('lightning', {'id': self.id}) self.comm_handlers = {} self.comm.on_msg(self._handle_comm_message) def _format_url(self, url): if not url.endswith('/'): url += '/' try: from urllib.parse import quote except ImportError: from urllib import quote return url + '?host=' + quote(self.session.host) def _update_image(self, image): url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/images' url = self._format_url(url) files = {'file': image} return requests.put(url, files=files, data={'type': 'image'}, auth=self.auth) def _append_image(self, image): url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/images' url = self._format_url(url) files = {'file': image} return requests.post(url, files=files, data={'type': 'image'}, auth=self.auth) def _append_data(self, data=None, field=None): payload = {'data': data} headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/' if field: url += field url = self._format_url(url) return requests.post(url, data=json.dumps(payload), headers=headers, auth=self.auth) def _update_data(self, data=None, field=None): payload = {'data': data} headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/' if field: url += field url = self._format_url(url) return requests.put(url, data=json.dumps(payload), headers=headers, auth=self.auth) def get_permalink(self): return self.session.host + '/visualizations/' + str(self.id) def get_embed_link(self): return self._format_url(self.get_permalink() + '/embed') def get_html(self): r = requests.get(self.get_embed_link(), auth=self.auth) return r.text def open(self): webbrowser.open(self.session.host + '/visualizations/' + str(self.id) + '/') def delete(self): url = self.get_permalink() return requests.delete(url) def on(self, event_name, handler): if self.session.lgn.ipython_enabled: self.comm_handlers[event_name] = handler else: raise Exception('The current implementation of this method is only compatible with IPython.') def _handle_comm_message(self, message): # Parsing logic taken from similar code in matplotlib message = json.loads(message['content']['data']) if message['type'] in self.comm_handlers: self.comm_handlers[message['type']](message['data']) @classmethod def create(cls, session=None, data=None, images=None, type=None, options=None): if options is None: options = {} url = session.host + '/sessions/' + str(session.id) + '/visualizations' if not images: payload = {'data': data, 'type': type, 'opts': options} headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} r = requests.post(url, data=json.dumps(payload), headers=headers, auth=session.auth) if not r.status_code == requests.codes.ok: raise Exception('Problem uploading data') viz = cls(session=session, json=r.json(), auth=session.auth) else: first_image, remaining_images = images[0], images[1:] files = {'file': first_image} r = requests.post(url, files=files, data={'type': type, 'opts': options}, auth=session.auth) if not r.status_code == requests.codes.ok: raise Exception('Problem uploading images') viz = cls(session=session, json=r.json(), auth=session.auth) for image in remaining_images: viz._append_image(image) return viz class VisualizationLocal(object): def __init__(self, html): self.html = html @classmethod def create(cls, host=None, id=None, data=None, images=None, type=None, options=None): import base64 from jinja2 import Template, escape import lightning import os loc = os.path.join(os.path.dirname(lightning.__file__), 'lib/template.html') t = Template(open(loc).read()) options = escape(json.dumps(options)) fields = {'viz': type, 'host': host, 'id': id, 'options': options} if images: bytes = ['data:image/png;base64,' + base64.b64encode(img) + ',' for img in images] fields['images'] = escape(json.dumps(bytes)) else: data = escape(json.dumps(data)) fields['data'] = data html = t.render(**fields) viz = cls(html) return viz def get_html(self): return self.html
Python
0.000013
@@ -4643,24 +4643,25 @@ self. +_ html = html%0A @@ -5512,12 +5512,13 @@ rn self. +_ html
7e9d3b3d2c4e46c2b16595b7acc6aa670ece9e6e
use correct API to save to bucket.
astrobin/tasks.py
astrobin/tasks.py
from django.conf import settings from celery.decorators import task from celery.task.sets import subtask from PIL import Image as PILImage from subprocess import call import StringIO import os import os.path from image_utils import * from s3 import * from notifications import * @task() def solve_image(image, callback=None): # Solve path = settings.UPLOADS_DIRECTORY uid = image.filename original_ext = image.original_ext solved = False command = ['nice', '-n', '5', '/usr/local/astrometry/bin/solve-field', path + uid + original_ext] call(command) solved_filename = settings.UPLOADS_DIRECTORY + image.filename + '-ngc.png' if os.path.exists(settings.UPLOADS_DIRECTORY + image.filename + '.solved'): solved = True solved_file = open(solved_filename) solved_data = StringIO.StringIO(solved_file.read()) solved_image = PILImage.open(solved_data) (w, h) = solved_image.size (w, h) = scale_dimensions(w, h, settings.RESIZED_IMAGE_SIZE) solved_resizedImage = solved_image.resize((w, h), PILImage.ANTIALIAS) # Then save to bucket solved_resizedFile = StringIO.StringIO() solved_resizedImage.save(solved_resizedFile, 'PNG') save_to_bucket(solved_resizedFile.getvalue(), 'image/png', settings.S3_SOLVED_BUCKET, uid, '.png') if solved: push_notification([image.user], 'image_solved', {'object_url':image.get_absolute_url() + '?mod=solved'}) else: push_notification([image.user], 'image_not_solved', {'object_url':image.get_absolute_url()}) if callback: callback(image, solved, '%s%s*' % (path, uid)) @task() def store_image(image, solve, callback=None): try: store_image_in_s3(settings.UPLOADS_DIRECTORY, image.filename, image.original_ext) except S3CreateError, exc: store_image.retry(exc=exc) push_notification([image.user], 'image_ready', {'object_url':image.get_absolute_url()}) if callback: callback(image, True, solve) @task def delete_image(filename, ext): delete_image_from_s3(filename, ext)
Python
0
@@ -1257,16 +1257,37 @@ _bucket( +uid + '_solved.png', solved_r @@ -1311,153 +1311,8 @@ ue() -,%0A 'image/png',%0A settings.S3_SOLVED_BUCKET,%0A uid,%0A '.png' )%0A
4b1f8fb2077055bc97e8a69ff8e36ef53567ccb4
Remove extraneous debug stuff.
audio_matching.py
audio_matching.py
import os, sys import pyaudio import wave import numpy import scikits.audiolab as audiolab import matplotlib.pyplot as pyplot from scipy.ndimage.filters import maximum_filter from scipy.ndimage.morphology import generate_binary_structure, iterate_structure, binary_erosion from time import sleep # DEBUG # Audio stream settings. AUDIO_PATH = './audio_files/' FORMAT = pyaudio.paInt16 RATE = 44100 CHUNK = 1024 # Filter settings. AMP_MIN = 1e-10 audio = pyaudio.PyAudio() def get_audio_files(): result = {} files = [ f for f in os.listdir(AUDIO_PATH) if os.path.isfile(os.path.join(AUDIO_PATH, f)) ] index = 1 for filename in files: if filename == '.DS_Store': continue result[index] = filename index = index + 1 return result audio_files = get_audio_files() def show_menu(): os.system('clear') print "\n-- Audio matching with Python --" print "\n (1) Record an audio clip." if audio_files is not None: print " (2) Play an audio clip." print " (3) Match an audio clip." print "\n-- Audio library --\n" for index in audio_files: print "{:3}: {}".format(index, audio_files[index]) try: return int(raw_input("\nPlease choose an option from the menu above, or CTRL-C and ENTER to quit: ")) except: raise def get_2D_peaks(array2D): # This function is based on the function 'get_2D_peaks()' available at the URL below. # https://github.com/worldveil/dejavu/blob/master/dejavu/fingerprint.py # Copyright (c) 2013 Will Drevo, use permitted under the terms of the open-source MIT License. # Create a filter to extract peaks from the image data. struct = generate_binary_structure(2, 1) neighborhood = iterate_structure(struct, 25) # Find local maxima using our fliter shape. These are boolean arrays. local_maxima = maximum_filter(array2D, footprint=neighborhood) == array2D background = (array2D == 0) eroded_background = binary_erosion(background, structure=neighborhood, border_value=1) # Boolean mask of array2D with True at peaks. detected_peaks = local_maxima - eroded_background # Extract peak amplitudes and locations. amps = array2D[detected_peaks] j, i = numpy.where(detected_peaks) # Filter peaks for those exceeding the minimum amplitude. amps = amps.flatten() peaks = zip(i, j, amps) peaks_filtered = [x for x in peaks if x[2] > AMP_MIN] # Get frequency and time at peaks. frequency_idx = [x[1] for x in peaks_filtered] time_idx = [x[0] for x in peaks_filtered] return (frequency_idx, time_idx) def record(): try: print "\nYou chose record." filename = raw_input("Please enter a filename: ") print "Recording to '%s.wav'... Hit CTRL-C to stop recording." % filename frames = [] while True: try: stream = audio.open(format=FORMAT, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK) data = stream.read(CHUNK) frames.append(data) except KeyboardInterrupt: # Stop recording. stream.stop_stream() stream.close() # Write to a WAV file. wave_file = wave.open(AUDIO_PATH + filename + '.wav', 'wb') wave_file.setnchannels(1) wave_file.setsampwidth(audio.get_sample_size(FORMAT)) wave_file.setframerate(RATE) wave_file.writeframes(b''.join(frames)) wave_file.close() break except: pass def play(): print "\nYou chose play." try: index_choice = int(raw_input("Please choose an audio file from the library to play: ")) wave_file = wave.open(AUDIO_PATH + audio_files[index_choice], 'rb') stream = audio.open(format=FORMAT, channels=1, rate=RATE, output=True) data = wave_file.readframes(CHUNK) while data != '': stream.write(data) data = wave_file.readframes(CHUNK) stream.stop_stream() stream.close() except: pass def match(): print "\nYou chose match." try: index_choice = int(raw_input("Please choose an audio file from the library to match: ")) signal, fs, enc = audiolab.wavread(AUDIO_PATH + audio_files[index_choice]) specgram, frequency, time, img = pyplot.specgram(signal, Fs=fs) peak_indices = get_2D_peaks(specgram) peak_times = [time[time_idx] for time_idx in peak_indices[1]] peak_frequencies = [frequency[freq_idx] for freq_idx in peak_indices[0]] # Plot the spectrogram image with an overlayed scatter plot of local peaks. pyplot.xlim(right=max(time)) pyplot.ylim(top=20000) pyplot.ylabel('Frequency (Hz)') pyplot.xlabel('Time (s)') pyplot.title(audio_files[index_choice]) pyplot.scatter(peak_times, peak_frequencies) pyplot.show() except: pass # Define the menu options. menu_option = { 1: record, 2: play, 3: match, } while True: try: # Put matplotlib in interactive mode so that the plots are shown in a background thread. pyplot.ion() # Main loop prompting user with menu options choice = show_menu() # Execute the menu option menu_option[choice]() audio_files = get_audio_files() except KeyboardInterrupt: print "" sys.exit(0)
Python
0
@@ -271,40 +271,8 @@ on%0A%0A -from time import sleep # DEBUG%0A%0A # Au
555b48cf41cea940a3a13c9570d03876db3b62cd
add comment explaining purpose of clearing sys.modules in DataSetRepo
src/main/python/smv/datasetrepo.py
src/main/python/smv/datasetrepo.py
# # This file is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import traceback import pkgutil import inspect from error import SmvRuntimeError from utils import for_name, smv_copy_array """Python implementations of IDataSetRepoPy4J and IDataSetRepoFactoryPy4J interfaces """ class DataSetRepoFactory(object): def __init__(self, smvApp): self.smvApp = smvApp def createRepo(self): try: return DataSetRepo(self.smvApp) except BaseException as e: traceback.print_exc() raise e class Java: implements = ['org.tresamigos.smv.IDataSetRepoFactoryPy4J'] class DataSetRepo(object): def __init__(self, smvApp): self.smvApp = smvApp self._clear_sys_modules() def _clear_sys_modules(self): """Clear all client modules from sys.modules """ for fqn in sys.modules.keys(): for stage_name in self.smvApp.stages: if fqn.startswith(stage_name): sys.modules.pop(fqn) break # Implementation of IDataSetRepoPy4J loadDataSet, which loads the dataset # from the most recent source def loadDataSet(self, fqn): try: ds = for_name(fqn)(self.smvApp) # Python issue https://bugs.python.org/issue1218234 # need to invalidate inspect.linecache to make dataset hash work srcfile = inspect.getsourcefile(ds.__class__) if srcfile: inspect.linecache.checkcache(srcfile) return ds except BaseException as e: traceback.print_exc() raise e def dataSetsForStage(self, stageName): try: return self._moduleUrnsForStage(stageName, lambda obj: obj.IsSmvPyDataSet) except BaseException as e: traceback.print_exc() raise e def outputModsForStage(self, stageName): return self.moduleUrnsForStage(stageName, lambda obj: obj.IsSmvPyModule and obj.IsSmvPyOutput) def _moduleUrnsForStage(self, stageName, fn): # `walk_packages` can generate AttributeError if the system has # Gtk modules, which are not designed to use with reflection or # introspection. Best action to take in this situation is probably # to simply suppress the error. def err(name): pass # print("Error importing module %s" % name) # t, v, tb = sys.exc_info() # print("type is {0}, value is {1}".format(t, v)) buf = [] # import the stage and only walk the packages in the path of that stage, recursively try: stagemod = __import__(stageName) except: # may be a scala-only stage pass else: for loader, name, is_pkg in pkgutil.walk_packages(stagemod.__path__, stagemod.__name__ + '.' , onerror=err): # The additional "." is necessary to prevent false positive, e.g. stage_2.M1 matches stage if name.startswith(stageName + ".") and not is_pkg: pymod = __import__(name) for c in name.split('.')[1:]: pymod = getattr(pymod, c) for n in dir(pymod): obj = getattr(pymod, n) try: # Class should have an fqn which begins with the stageName. # Each package will contain among other things all of # the modules that were imported into it, and we need # to exclude these (so that we only count each module once) if fn(obj) and obj.fqn().startswith(name): buf.append(obj.urn()) except AttributeError: continue return smv_copy_array(self.smvApp.sc, *buf) def notFound(self, modUrn, msg): raise ValueError("dataset [{0}] is not found in {1}: {2}".format(modUrn, self.__class__.__name__, msg)) class Java: implements = ['org.tresamigos.smv.IDataSetRepoPy4J']
Python
0
@@ -1217,24 +1217,141 @@ pp = smvApp%0A + # Remove client modules from sys.modules to force reload of all client%0A # code in the new transaction%0A self @@ -1877,28 +1877,16 @@ smvApp)%0A - %0A
70f137998b2cc3b9c873a57e17a435c6ca181192
improve code for getting the pricelist
product_supplier_intercompany/models/purchase_order.py
product_supplier_intercompany/models/purchase_order.py
# Copyright 2021 Akretion (https://www.akretion.com). # @author Sébastien BEAU <sebastien.beau@akretion.com> # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). from odoo import _, models from odoo.exceptions import UserError class PurchaseOrder(models.Model): _inherit = "purchase.order" def _prepare_sale_order_data( self, name, partner, dest_company, direct_delivery_address ): res = super()._prepare_sale_order_data( name, partner, dest_company, direct_delivery_address ) pricelist = self.env["product.pricelist"].search( [ ("company_id", "=", dest_company.id), ("is_intercompany_supplier", "=", True), ] ) if not len(pricelist) == 1: raise UserError( _("Company %s do not have an intercompany pricelist configured"), dest_company.name, ) else: res["pricelist_id"] = pricelist.id return res
Python
0
@@ -187,16 +187,24 @@ mport _, + fields, models%0A @@ -324,154 +324,136 @@ ef _ -prepare_sale_order_data(%0A self, name, partner, dest_company, direct_delivery_address%0A ):%0A res = super()._prepare_sale_order_data( +get_intercompany_pricelist(self, partner, dest_company):%0A if partner.property_product_pricelist.is_intercompany_supplier: %0A @@ -465,21 +465,22 @@ -name, +return partner , de @@ -479,58 +479,54 @@ tner -, dest_company, direct_delivery_address%0A )%0A +.property_product_pricelist%0A else:%0A @@ -579,16 +579,20 @@ search(%0A + @@ -613,16 +613,20 @@ + (%22compan @@ -655,16 +655,20 @@ ny.id),%0A + @@ -732,18 +732,25 @@ -%5D%0A + %5D%0A )%0A @@ -737,34 +737,39 @@ %5D%0A + )%0A + if not l @@ -763,20 +763,16 @@ if -not len(pric @@ -785,11 +785,15 @@ == -1 +0 :%0A + @@ -837,19 +837,82 @@ -_(%22 + _(%0A (%0A %22The Company %25s d @@ -911,10 +911,10 @@ any -%25s +%7B%7D do @@ -952,38 +952,188 @@ ist -configured%22),%0A +%22%0A %22configured.%5CnPlease contact them and ask them to %22%0A %22active the option on the pricelist%22%0A ).format( dest @@ -1145,18 +1145,25 @@ any.name -,%0A +)%0A @@ -1158,32 +1158,33 @@ + )%0A else:%0A @@ -1181,57 +1181,561 @@ -else:%0A res%5B%22pricelist_id%22%5D = pricelist + )%0A else:%0A # Note in case that there is several pricelist that match we take%0A # the first one and the user will change it manually if needed%0A return fields.first(pricelist)%0A%0A def _prepare_sale_order_data(%0A self, name, partner, dest_company, direct_delivery_address%0A ):%0A res = super()._prepare_sale_order_data(%0A name, partner, dest_company, direct_delivery_address%0A )%0A res%5B%22pricelist_id%22%5D = self._get_intercompany_pricelist(partner, dest_company) .id%0A
590ba3c9d645f6eac41687bee9f12f7c914858d6
revert to http for loading clusters
progressivis/datasets/__init__.py
progressivis/datasets/__init__.py
import os from progressivis import ProgressiveError from .random import generate_random_csv from .wget import wget_file DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../data')) def get_dataset(name, **kwds): if not os.path.isdir(DATA_DIR): os.mkdir(DATA_DIR) if name == 'bigfile': return generate_random_csv('%s/bigfile.csv'%DATA_DIR, 1000000, 30) if name == 'smallfile': return generate_random_csv('%s/smallfile.csv'%DATA_DIR, 30000, 10) if name == 'warlogs': return wget_file(filename='%s/warlogs.vec.bz2'%DATA_DIR, url='http://www.cs.ubc.ca/labs/imager/video/2014/QSNE/warlogs.vec.bz2', **kwds) if name.startswith('cluster:'): fname = name[len('cluster:'):] + ".txt" return wget_file(filename='%s/%s'%(DATA_DIR, fname), url='https://cs.joensuu.fi/sipu/datasets/%s'%fname) raise ProgressiveError('Unknow dataset %s'%name) __all__ = ['get_dataset', 'generate_random_csv']
Python
0
@@ -896,17 +896,16 @@ rl='http -s ://cs.jo
0658934a7a7a1581c6f1d871c192f49b42144b09
fix issue with ControlPlayer on mac
pyforms/gui/Controls/ControlPlayer/VideoQt5GLWidget.py
pyforms/gui/Controls/ControlPlayer/VideoQt5GLWidget.py
from pyforms.gui.Controls.ControlPlayer.AbstractGLWidget import AbstractGLWidget from PyQt5 import QtGui from PyQt5.QtWidgets import QOpenGLWidget from PyQt5 import QtCore class VideoQt5GLWidget(AbstractGLWidget, QOpenGLWidget): def initializeGL(self): self.gl = self.context().versionFunctions() self.gl.initializeOpenGLFunctions() ''' Sets up the OpenGL rendering context, defines display lists, etc. Gets called once before the first time resizeGL() or paintGL() is called. ''' self.gl.glClearDepth(1.0) self.gl.glClearColor(0, 0, 0, 1.0) self.gl.glEnable(GL.GL_DEPTH_TEST) def perspective(self, fovy, aspect, zNear, zFar): ymax = zNear * math.tan( fovy * math.pi / 360.0 ); ymin = -ymax; xmin = ymin * aspect; xmax = ymax * aspect; self.gl.glFrustum( xmin, xmax, ymin, ymax, zNear, zFar ) def resizeGL(self, width, height): self.setupViewport(width, height) def setupViewport(self, width, height): side = min(width, height) self.gl.glViewport((width - side) // 2, (height - side) // 2, side, side) self.gl.glMatrixMode(self.gl.GL_PROJECTION) self.gl.glLoadIdentity() #self.gl.glOrtho(-0.5, +0.5, +0.5, -0.5, 4.0, 15.0) self.perspective(40.0, float(width) / float(height), 0.01, 10.0) self.gl.glMatrixMode(self.gl.GL_MODELVIEW)
Python
0
@@ -583,18 +583,23 @@ lEnable( -GL +self.gl .GL_DEPT
055fb6e4a3cec3d9ed8406c2d2795d6089e6241d
fix optimMethod (#1806)
python/orca/src/bigdl/orca/tfpark/gan/gan_estimator.py
python/orca/src/bigdl/orca/tfpark/gan/gan_estimator.py
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import inspect import tempfile import os import tensorflow as tf import numpy as np from tensorflow.python.util import function_utils from zoo.tfpark import TFOptimizer from zoo.tfpark.gan.common import GanOptimMethod # todo make it inherit Estimator from zoo.util import nest class GANEstimator(object): def __init__(self, generator_fn, discriminator_fn, generator_loss_fn, discriminator_loss_fn, generator_optimizer, discriminator_optimizer, generator_steps=1, discriminator_steps=1, model_dir=None, ): self._generator_fn = generator_fn self._discriminator_fn = discriminator_fn self._generator_loss_fn = generator_loss_fn self._discriminator_loss_fn = discriminator_loss_fn self._generator_steps = generator_steps self._discriminator_steps = discriminator_steps self._generator_optim_method = generator_optimizer self._discriminator_optim_method = discriminator_optimizer if model_dir is None: folder = tempfile.mkdtemp() self.checkpoint_path = os.path.join(folder, "gan_model") else: self.checkpoint_path = model_dir @staticmethod def _call_fn_maybe_with_counter(fn, counter, *args): fn_args = inspect.getargspec(fn).args if "counter" in fn_args: return fn(*args, counter=counter) else: return fn(*args) def train(self, dataset, end_trigger): with tf.Graph().as_default() as g: generator_inputs = dataset.feature_tensors real_data = dataset.label_tensors counter = tf.Variable(0, dtype=tf.int32) period = self._discriminator_steps + self._generator_steps is_discriminator_phase = tf.less(tf.mod(counter, period), self._discriminator_steps) with tf.variable_scope("generator"): gen_data = self._call_fn_maybe_with_counter(self._generator_fn, counter, generator_inputs) with tf.variable_scope("discriminator"): fake_d_outputs = self._call_fn_maybe_with_counter(self._discriminator_fn, counter, gen_data, generator_inputs) with tf.variable_scope("discriminator", reuse=True): real_d_outputs = self._call_fn_maybe_with_counter(self._discriminator_fn, counter, real_data, generator_inputs) with tf.name_scope("generator_loss"): generator_loss = self._call_fn_maybe_with_counter(self._generator_loss_fn, counter, fake_d_outputs) with tf.name_scope("discriminator_loss"): discriminator_loss = self._call_fn_maybe_with_counter(self._discriminator_loss_fn, counter, real_d_outputs, fake_d_outputs) generator_variables = tf.trainable_variables("generator") generator_grads = tf.gradients(generator_loss, generator_variables) discriminator_variables = tf.trainable_variables("discriminator") discriminator_grads = tf.gradients(discriminator_loss, discriminator_variables) variables = generator_variables + discriminator_variables def true_fn(): return [tf.zeros_like(grad) for grad in generator_grads] def false_fn(): return generator_grads g_grads = tf.cond(is_discriminator_phase, true_fn=true_fn, false_fn=false_fn) d_grads = tf.cond(is_discriminator_phase, lambda: discriminator_grads, lambda: [tf.zeros_like(grad) for grad in discriminator_grads]) loss = tf.cond(is_discriminator_phase, lambda: discriminator_loss, lambda: generator_loss) grads = g_grads + d_grads with tf.control_dependencies(grads): increase_counter = tf.assign_add(counter, 1) g_param_size = sum([np.product(g.shape) for g in g_grads]) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) optimizer = TFOptimizer(loss, GanOptimMethod(self._discriminator_optim_method, self._generator_optim_method, g_param_size.value), sess=sess, dataset=dataset, inputs=dataset._original_tensors, grads=grads, variables=variables, graph=g, updates=[increase_counter], model_dir=self.checkpoint_path) optimizer.optimize(end_trigger) steps = sess.run(counter) saver = tf.train.Saver() saver.save(optimizer.sess, self.checkpoint_path, global_step=steps)
Python
0
@@ -2276,23 +2276,18 @@ set. -feature_ tensors +%5B0%5D %0A @@ -2319,21 +2319,18 @@ set. -label_ tensors +%5B1%5D %0A%0A @@ -5681,18 +5681,230 @@ ze.value -) , +%0A self._discriminator_steps,%0A self._generator_steps),%0A sess=se
d0ed8aeb2126a4b14b8413bd8c6d54952451e890
Update version number.
libcloud/__init__.py
libcloud/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ libcloud provides a unified interface to the cloud computing resources. @var __version__: Current version of libcloud """ __all__ = ["__version__", "enable_debug"] __version__ = "0.4.3-dev" def enable_debug(fo): """ Enable library wide debugging to a file-like object. @param fo: Where to append debugging information @type fo: File like object, only write operations are used. """ from libcloud.base import ConnectionKey, LoggingHTTPConnection, LoggingHTTPSConnection LoggingHTTPSConnection.log = fo LoggingHTTPConnection.log = fo ConnectionKey.conn_classes = (LoggingHTTPConnection, LoggingHTTPSConnection) def _init_once(): """ Utility function that is ran once on Library import. This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists is where we will log debug information about the provider transports. If LIBCLOUD_DEBUG is not a path, C{/tmp/libcloud_debug.log} is used by default. """ import os d = os.getenv("LIBCLOUD_DEBUG") if d: if d.isdigit(): d = "/tmp/libcloud_debug.log" fo = open(d, "a") enable_debug(fo) _init_once()
Python
0.000021
@@ -967,11 +967,11 @@ %220. -4.3 +5.0 -dev
4aacd356fe7354b044d7c5787fb2366219294658
Add 'auto_correlation' to _allowed_symbols.
tensorflow/contrib/distributions/__init__.py
tensorflow/contrib/distributions/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes representing statistical distributions and ops for working with them. See the @{$python/contrib.distributions} guide. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member from tensorflow.contrib.distributions.python.ops import bijectors from tensorflow.contrib.distributions.python.ops.autoregressive import * from tensorflow.contrib.distributions.python.ops.binomial import * from tensorflow.contrib.distributions.python.ops.cauchy import * from tensorflow.contrib.distributions.python.ops.chi2 import * from tensorflow.contrib.distributions.python.ops.conditional_distribution import * from tensorflow.contrib.distributions.python.ops.conditional_transformed_distribution import * from tensorflow.contrib.distributions.python.ops.deterministic import * from tensorflow.contrib.distributions.python.ops.distribution_util import fill_triangular from tensorflow.contrib.distributions.python.ops.distribution_util import matrix_diag_transform from tensorflow.contrib.distributions.python.ops.distribution_util import reduce_weighted_logsumexp from tensorflow.contrib.distributions.python.ops.distribution_util import softplus_inverse from tensorflow.contrib.distributions.python.ops.distribution_util import tridiag from tensorflow.contrib.distributions.python.ops.estimator import * from tensorflow.contrib.distributions.python.ops.geometric import * from tensorflow.contrib.distributions.python.ops.half_normal import * from tensorflow.contrib.distributions.python.ops.independent import * from tensorflow.contrib.distributions.python.ops.inverse_gamma import * from tensorflow.contrib.distributions.python.ops.logistic import * from tensorflow.contrib.distributions.python.ops.mixture import * from tensorflow.contrib.distributions.python.ops.mixture_same_family import * from tensorflow.contrib.distributions.python.ops.moving_stats import * from tensorflow.contrib.distributions.python.ops.mvn_diag import * from tensorflow.contrib.distributions.python.ops.mvn_diag_plus_low_rank import * from tensorflow.contrib.distributions.python.ops.mvn_full_covariance import * from tensorflow.contrib.distributions.python.ops.mvn_tril import * from tensorflow.contrib.distributions.python.ops.negative_binomial import * from tensorflow.contrib.distributions.python.ops.normal_conjugate_posteriors import * from tensorflow.contrib.distributions.python.ops.onehot_categorical import * from tensorflow.contrib.distributions.python.ops.poisson import * from tensorflow.contrib.distributions.python.ops.poisson_lognormal import * from tensorflow.contrib.distributions.python.ops.quantized_distribution import * from tensorflow.contrib.distributions.python.ops.relaxed_bernoulli import * from tensorflow.contrib.distributions.python.ops.relaxed_onehot_categorical import * from tensorflow.contrib.distributions.python.ops.sample_stats import * from tensorflow.contrib.distributions.python.ops.sinh_arcsinh import * from tensorflow.contrib.distributions.python.ops.test_util import * from tensorflow.contrib.distributions.python.ops.vector_diffeomixture import * from tensorflow.contrib.distributions.python.ops.vector_exponential_diag import * from tensorflow.contrib.distributions.python.ops.vector_laplace_diag import * from tensorflow.contrib.distributions.python.ops.vector_sinh_arcsinh_diag import * from tensorflow.contrib.distributions.python.ops.wishart import * from tensorflow.python.ops.distributions.bernoulli import * from tensorflow.python.ops.distributions.beta import * from tensorflow.python.ops.distributions.categorical import * from tensorflow.python.ops.distributions.dirichlet import * from tensorflow.python.ops.distributions.dirichlet_multinomial import * from tensorflow.python.ops.distributions.distribution import * from tensorflow.python.ops.distributions.exponential import * from tensorflow.python.ops.distributions.gamma import * from tensorflow.python.ops.distributions.kullback_leibler import * from tensorflow.python.ops.distributions.laplace import * from tensorflow.python.ops.distributions.multinomial import * from tensorflow.python.ops.distributions.normal import * from tensorflow.python.ops.distributions.student_t import * from tensorflow.python.ops.distributions.transformed_distribution import * from tensorflow.python.ops.distributions.uniform import * # pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'bijectors', 'Cauchy', 'ConditionalDistribution', 'ConditionalTransformedDistribution', 'FULLY_REPARAMETERIZED', 'NOT_REPARAMETERIZED', 'ReparameterizationType', 'Distribution', 'Autoregressive', 'Binomial', 'Bernoulli', 'BernoulliWithSigmoidProbs', 'Beta', 'BetaWithSoftplusConcentration', 'Categorical', 'Chi2', 'Chi2WithAbsDf', 'Deterministic', 'VectorDeterministic', 'Exponential', 'ExponentialWithSoftplusRate', 'VectorExponentialDiag', 'Gamma', 'GammaWithSoftplusConcentrationRate', 'Geometric', 'HalfNormal', 'Independent', 'InverseGamma', 'InverseGammaWithSoftplusConcentrationRate', 'Laplace', 'LaplaceWithSoftplusScale', 'Logistic', 'NegativeBinomial', 'Normal', 'NormalWithSoftplusScale', 'Poisson', 'PoissonLogNormalQuadratureCompound', 'SinhArcsinh', 'StudentT', 'StudentTWithAbsDfSoftplusScale', 'Uniform', 'MultivariateNormalDiag', 'MultivariateNormalFullCovariance', 'MultivariateNormalTriL', 'MultivariateNormalDiagPlusLowRank', 'MultivariateNormalDiagWithSoftplusScale', 'Dirichlet', 'DirichletMultinomial', 'Multinomial', 'VectorDiffeomixture', 'VectorLaplaceDiag', 'VectorSinhArcsinhDiag', 'WishartCholesky', 'WishartFull', 'TransformedDistribution', 'QuantizedDistribution', 'Mixture', 'MixtureSameFamily', 'ExpRelaxedOneHotCategorical', 'OneHotCategorical', 'RelaxedBernoulli', 'RelaxedOneHotCategorical', 'kl_divergence', 'RegisterKL', 'fill_triangular', 'matrix_diag_transform', 'reduce_weighted_logsumexp', 'softplus_inverse', 'tridiag', 'normal_conjugates_known_scale_posterior', 'normal_conjugates_known_scale_predictive', 'percentile', 'assign_moving_mean_variance', 'assign_log_moving_mean_exp', 'moving_mean_variance', 'estimator_head_distribution_regression', 'quadrature_scheme_softmaxnormal_gauss_hermite', 'quadrature_scheme_softmaxnormal_quantiles', 'quadrature_scheme_lognormal_gauss_hermite', 'quadrature_scheme_lognormal_quantiles', ] remove_undocumented(__name__, _allowed_symbols)
Python
0.000854
@@ -5290,16 +5290,40 @@ ols = %5B%0A + 'auto_correlation',%0A 'bij
b999240903bb71e14818fb3f2d8eb12bda75ada2
Bump tensorflow to 2.1.0 (#721)
tensorflow_io/core/python/ops/version_ops.py
tensorflow_io/core/python/ops/version_ops.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """version_ops""" package = 'tensorflow>=2.1.0rc2' version = '0.11.0'
Python
0
@@ -726,18 +726,15 @@ flow -%3E += =2.1.0 -rc2 '%0Ave
ce0a3a4b13b8257fa95c95376a043b02958e73f2
Fix exception parameters
src/sentry_gitlab/plugin.py
src/sentry_gitlab/plugin.py
""" sentry_gitlab.plugin ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from django import forms from sentry.plugins.bases.issue import IssuePlugin from django.utils.translation import ugettext_lazy as _ from gitlab import * import sentry_gitlab class GitLabOptionsForm(forms.Form): gitlab_url = forms.CharField( label=_('GitLab URL'), widget=forms.TextInput(attrs={'placeholder': 'e.g. https://gitlab.example.com'}), help_text=_('Enter the URL for your GitLab server'), required=True) gitlab_token = forms.CharField( label=_('GitLab Private Token'), widget=forms.TextInput(attrs={'placeholder': 'e.g. g5DWFtLzaztgYFrqhVfE'}), help_text=_('Enter your GitLab API token'), required=True) gitlab_repo = forms.CharField( label=_('Repository Name'), widget=forms.TextInput(attrs={'placeholder': 'e.g. namespace/repo'}), help_text=_('Enter your repository name, including namespace.'), required=True) class GitLabPlugin(IssuePlugin): author = 'Pancentric Ltd' author_url = 'https://github.com/pancentric/sentry-gitlab' version = sentry_gitlab.VERSION description = "Integrate GitLab issues by linking a repository to a project" resource_links = [ ('Bug Tracker', 'https://github.com/pancentric/sentry-gitlab/issues'), ('Source', 'https://github.com/pancentric/sentry-gitlab'), ] slug = 'gitlab' title = _('GitLab') conf_title = title conf_key = 'gitlab' project_conf_form = GitLabOptionsForm def is_configured(self, request, project, **kwargs): return bool(self.get_option('gitlab_repo', project)) def get_new_issue_title(self, **kwargs): return 'Create GitLab Issue' def create_issue(self, request, group, form_data, **kwargs): url = self.get_option('gitlab_url', group.project) token = self.get_option('gitlab_token', group.project) repo = self.get_option('gitlab_repo', group.project) if repo.find('/') == -1: repo_url = str(repo) else: repo_url = str(repo.replace('/', '%2F')) gl = Gitlab(url, token) try: gl.auth() except GitlabAuthenticationError: raise forms.ValidationError(_('Unauthorized: Invalid Private Token: %s') % (e,)) except Exception: raise forms.ValidationError(_('Error Communicating with GitLab: %s') % (e,)) data = {'title': form_data['title'], 'description': form_data['description']} proj = gl.Project(id=repo_url) issue = proj.Issue(data) issue.save() return issue.id def get_issue_label(self, group, issue_id, **kwargs): return 'GL-%s' % issue_id def get_issue_url(self, group, issue_id, **kwargs): url = self.get_option('gitlab_url', group.project) repo = self.get_option('gitlab_repo', group.project) return '%s/%s/issues/%s' % (url, repo, issue_id)
Python
0.000011
@@ -2346,16 +2346,21 @@ ionError + as e :%0A @@ -2470,16 +2470,21 @@ xception + as e :%0A
0260b5d1d222fe9c8fa629ea8063c7a6e4964603
Remove unused import
overlay/composite-frame.py
overlay/composite-frame.py
#!/usr/bin/env python3 import os import time import io import re import argparse import sys from PIL import Image import cairosvg from SVGGenerator import SVGGenerator from DataManager import DataManager from Data import Data from Chart import Chart # create regex for extracting time data from file names TIME_AND_FRAME_PATTERN = re.compile(r"(\d+)-(\d+)$") def process_args(): start = 0 end = sys.maxsize show_svg = False # parse arguments parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", help="The directory that contains the input frames") parser.add_argument("-o", "--output", help="The directory to write the composited frames") parser.add_argument("-s", "--start", type=int, help="The frame number to start processing") parser.add_argument("-e", "--end", type=int, help="The frame number to end processing") args = parser.parse_args() # check for required arguments if args.input is None: raise ValueError('input directory must be defined') if args.output is None: raise ValueError('output directory must be defined') # handle optional arguments if args.start is not None: start = args.start if args.end is not None: end = args.end # return dictionary of values # NOTE: what do we need here to be able to use dot notation for these properties? return { "input": args.input, "output": args.output, "start": start, "end": end, "show_svg": show_svg } def map_range(x, in_min, in_max, out_min, out_max): out_delta = out_max - out_min in_delta = in_max - in_min return (x - in_min) * out_delta / in_delta + out_min def frame_info(frame_file): frame_no_ext = os.path.splitext(frame_file)[0] file_match = TIME_AND_FRAME_PATTERN.search(frame_no_ext) base_time = float(file_match.group(1)) frame_number = float(file_match.group(2)) frame_time = base_time + frame_number / 24 return (frame_no_ext, frame_number, frame_time) def get_frame_data(frame_time, frame_full_path): # load data def map_depth(item): result = ",".join([ str(round(map_range(item[0], frame_time - 60, frame_time, 0, 100), 3)), str(round(map_range(item[1], map_depth.start, map_depth.end, 0, 100), 3)) ]) # print("{0} became {1}".format(item, result)) return result depth_data = data_manager.select_depths(frame_time - 60, frame_time) if len(depth_data) > 0: map_depth.start = depth_data[-1][1] - 50 map_depth.end = depth_data[-1][1] + 50 depth_text = "{0:0.2f} ft".format(depth_data[-1][1]) depth_path_data = "M" + " ".join(map(map_depth, depth_data)) else: depth_text = "-- ft" depth_path_data = "" def map_temperature(item): result = ",".join([ str(round(map_range(item[0], frame_time - 60, frame_time, 0, 100), 3)), str(round(map_range(item[1], 40, 55, 100, 0), 3)) ]) # print("{0} became {1}".format(item, result)) return result temperature_data = data_manager.select_temperatures(frame_time - 60, frame_time) if len(temperature_data) > 0: temperature_text = "{0:0.2f} °F".format(temperature_data[-1][1]) temperature_path_data = "M" + " ".join(map(map_temperature, temperature_data)) else: temperature_text = "-- °F" temperature_path_data = "" # for testing depth_chart = Chart("Depth", depth_text, depth_path_data) depth_chart.x = 5 depth_chart.y = 972 - 5 - 110 temperature_chart = Chart("Temperature", temperature_text, temperature_path_data) temperature_chart.x = 5 + 5 + 100 + 5 + 5 temperature_chart.y = 972 - 5 - 110 # print(depth_path_data) return Data(frame_time, depth_chart, temperature_chart, frame_full_path) # process command line arguments options = process_args() frame_dir = options["input"] composite_dir = options["output"] start_frame = options["start"] end_frame = options["end"] show_svg = options["show_svg"] # load data data_manager = DataManager() data_manager.load("../db/g2x-1479064727.db") # make svg generator generator = SVGGenerator('./overlay.svg.mustache') # process all frames in the frame directory for frame_file in os.listdir(frame_dir): if frame_file == ".DS_Store": continue # extract frame number and time from file name (frame_no_ext, frame_number, frame_time) = frame_info(frame_file) # skip frames we don't want to process if frame_number < start_frame or end_frame < frame_number: continue # let the user know which frame we're currently processing print("frame={0}, time={1}".format(str(int(frame_number)), str(round(frame_time, 3)))) # load frame frame_full_path = frame_dir + "/" + frame_file frame = Image.open(frame_full_path, 'r') # get data for this frame frame_data = get_frame_data(frame_time, frame_full_path) # render SVG text svg = generator.to_svg(frame_data) if show_svg: print(svg) # create overlay image from SVG overlay_bytes = cairosvg.svg2png(bytestring=svg) overlay = Image.open(io.BytesIO(overlay_bytes)) # create composite image holder composite = Image.new('RGB', frame.size, (255, 255, 255)) # composite images composite.paste(frame) composite.paste(overlay, (0, 0), overlay) # output result composite_full_path = composite_dir + "/" + frame_no_ext + ".png" composite.save(composite_full_path, optimize=False) # cairosvg.svg2png(bytestring=svg, write_to=composite_full_path)
Python
0.000001
@@ -31,20 +31,8 @@ os%0A -import time%0A impo
f45cd2ff52cb672068e4bcf31b9c260cd43032ee
Use timeout decorator with use_signals=False
paasta_tools/remote_git.py
paasta_tools/remote_git.py
# Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import unicode_literals import time import dulwich.client import dulwich.errors from paasta_tools.utils import timeout def _make_determine_wants_func(ref_mutator): """Returns a safer version of ref_mutator, suitable for passing as the determine_wants argument to dulwich's send_pack method. The returned function will not delete or modify any existing refs.""" def determine_wants(old_refs): refs = {k.decode('UTF-8'): v.decode('UTF-8') for k, v in old_refs.items()} new_refs = ref_mutator(refs) new_refs = {k.encode('UTF-8'): v.encode('UTF-8') for k, v in new_refs.items()} new_refs.update(old_refs) # Make sure we don't delete/modify anything. return new_refs return determine_wants def make_force_push_mutate_refs_func(targets, sha): """Create a 'force push' function that will inform send_pack that we want to mark a certain list of target branches/tags to point to a particular git_sha. :param targets: List of branches/tags to point at the input sha :param sha: The git sha to point the branches/tags at :returns: A function to do the ref manipulation that a dulwich client can use""" def mutate_refs(refs): for target in targets: refs[target.encode('UTF-8')] = sha.encode('UTF-8') return refs return mutate_refs def create_remote_refs(git_url, ref_mutator, force=False): """Creates refs (tags, branches) on a remote git repo. :param git_url: the URL or path to the remote git repo. :param ref_mutator: A function that determines the new refs to create on the remote repo. This gets passed a dictionary of the remote server's refs in the format {name : hash, ...}, and should return a dictionary of the same format. :param force: Bool, defaults to false. If true we will overwrite refs even if they are already set. :returns: The map of refs, with our changes applied. """ client, path = dulwich.client.get_transport_and_path(git_url) if force is False: determine_wants = _make_determine_wants_func(ref_mutator) else: determine_wants = ref_mutator # We know we don't need to push any objects. def generate_pack_contents(have, want): return [] return client.send_pack(path, determine_wants, generate_pack_contents) class LSRemoteException(Exception): pass @timeout() def list_remote_refs(git_url): """Get the refs from a remote git repo as a dictionary of name->hash.""" time.sleep(15) client, path = dulwich.client.get_transport_and_path(git_url) try: refs = client.fetch_pack(path, lambda refs: [], None, lambda data: None) return {k.decode('UTF-8'): v.decode('UTF-8') for k, v in refs.items()} except dulwich.errors.HangupException as e: raise LSRemoteException("Unable to fetch remote refs: %s" % e)
Python
0.023608
@@ -655,21 +655,8 @@ ls%0A%0A -import time%0A%0A impo @@ -3088,16 +3088,122 @@ timeout( +error_message=%22Timed out connecting to git server, is it reachable from where you are?%22, use_signals=False )%0Adef li @@ -3308,27 +3308,8 @@ %22%22%22%0A - time.sleep(15)%0A
a9cd7d6eaa7ea70e962cf4d1c9e4aa53a2845968
Bump version number
lillebror/version.py
lillebror/version.py
# Copyright 2012 Loop Lab # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __title__ = 'lillebror' __version__ = '0.1.1' __author__ = 'Max Persson' __license__ = 'Apache License 2.0' __copyright__ = 'Copyright 2013 Max Persson' __project_url__ = 'https://github.com/looplab/lillebror'
Python
0.000002
@@ -612,11 +612,11 @@ '0. -1.1 +2.0 '%0A__
d9f445796599bf1ecb48e21a53f3188925012053
Correct order of synced/desynced subtitles in calc_correction
linear-correction.py
linear-correction.py
#!/usr/bin/env python import srt import datetime import utils def timedelta_to_milliseconds(delta): return delta.days * 86400000 + \ delta.seconds * 1000 + \ delta.microseconds / 1000 def parse_args(): def srt_timestamp_to_milliseconds(parser, arg): try: delta = srt.srt_timestamp_to_timedelta(arg) except ValueError: parser.error('not a valid SRT timestamp: %s' % arg) else: return timedelta_to_milliseconds(delta) parser = utils.basic_parser() parser.add_argument( '--desynced-start', type=lambda arg: srt_timestamp_to_milliseconds(parser, arg), required=True, help='the first desynchronised timestamp', ) parser.add_argument( '--synced-start', type=lambda arg: srt_timestamp_to_milliseconds(parser, arg), required=True, help='the first synchronised timestamp', ) parser.add_argument( '--desynced-end', type=lambda arg: srt_timestamp_to_milliseconds(parser, arg), required=True, help='the second desynchronised timestamp', ) parser.add_argument( '--synced-end', type=lambda arg: srt_timestamp_to_milliseconds(parser, arg), required=True, help='the second synchronised timestamp', ) return parser.parse_args() def calc_correction(synced_start, synced_end, desynced_start, desynced_end): angular = (desynced_end - desynced_start) / (synced_end - synced_start) linear = desynced_end - angular * synced_end return angular, linear def correct_time(current_msecs, angular, linear): return round(current_msecs * angular + linear) def correct_timedelta(bad_delta, angular, linear): bad_msecs = timedelta_to_milliseconds(bad_delta) good_msecs = correct_time(bad_msecs, angular, linear) good_delta = datetime.timedelta(milliseconds=good_msecs) return good_delta def linear_correct_subs(subtitles, angular, linear): for subtitle in subtitles: subtitle.start = correct_timedelta(subtitle.start, angular, linear) subtitle.end = correct_timedelta(subtitle.end, angular, linear) yield subtitle def main(): args = parse_args() angular, linear = calc_correction( args.synced_start, args.synced_end, args.desynced_start, args.desynced_end, ) subtitles_in = srt.parse(args.input.read()) corrected_subs = linear_correct_subs(subtitles_in, angular, linear) output = srt.compose(corrected_subs, strict=args.strict) args.output.write(output) if __name__ == '__main__': main()
Python
0.000001
@@ -1461,18 +1461,16 @@ ular = ( -de synced_e @@ -1474,18 +1474,16 @@ d_end - -de synced_s @@ -1491,16 +1491,18 @@ art) / ( +de synced_e @@ -1498,32 +1498,34 @@ (desynced_end - +de synced_start)%0A @@ -1535,18 +1535,16 @@ inear = -de synced_e @@ -1558,16 +1558,18 @@ gular * +de synced_e
3fea814461d2a51e0cc13c4981fa6f4cdfca75e9
Correct broken import, this could never have worked.
providers/moviedata/filmtipset.py
providers/moviedata/filmtipset.py
from providers.moviedata.provider import MoviedataProvider from urllib import urlencode from settings import ACCESS_KEYS from application import APPLICATION as APP IDENTIFIER = "Filmtipset" class Provider(MoviedataProvider): def get_url(self, movie): options = { "action": "search", "id": movie["name"], "returntype": "json", "accesskey": ACCESS_KEYS[IDENTIFIER]["ACCESS_KEY"], "usernr": ACCESS_KEYS[IDENTIFIER]["USER_KEY"], } return "http://www.filmtipset.se/api/api.cgi?" + urlencode(options) def get_movie_data(self, movie): url = self.get_url(movie) APP.debug("Fetching url: %s" % url) data = self.parse_json(url, path="0.data.0.hits") data = self.find_movie_matching_year(data, movie["year"]) if not data: return None, {} data = self.transform_data(data) return data["id"], data def find_movie_matching_year(self, data, year): if not year: return self.traverse_json(data, path="0.movie") for i in range(5): data = self.traverse_json(data, "%s.movie" % i) if data.get("year", None) == year: return data return self.traverse_json(data, path="0.movie") def get_data_mapping(self): return { "id": lambda data: "tt" + data["imdb"], "title": "orgname", "title_swe": "name", "country": "country", "director": "director", "year": "year", "filmtipset_my_grade": "grade.value", "filmtipset_my_grade_type": "grade.type", "filmtipset_avg_grade": "filmtipsetgrade.value", "filmtipset_url": "url", "filmtipset_id": "id", }
Python
0
@@ -90,15 +90,18 @@ rom -setting +access_key s im
1ee492838c3289629a9213309c05761ac290d081
correct filetype arg
web/documentserver-example/python/src/utils/trackManager.py
web/documentserver-example/python/src/utils/trackManager.py
""" (c) Copyright Ascensio System SIA 2021 * The MIT License (MIT) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import config import requests import os import json from . import jwtManager, docManager, historyManager, fileUtils, serviceConverter # read request body def readBody(request): body = json.loads(request.body) if (jwtManager.isEnabled()): # if the secret key to generate token exists token = body.get('token') # get the document token if (not token): # if JSON web token is not received jwtHeader = 'Authorization' if config.DOC_SERV_JWT_HEADER is None or config.DOC_SERV_JWT_HEADER == '' else config.DOC_SERV_JWT_HEADER token = request.headers.get(jwtHeader) # get it from the Authorization header if token: token = token[len('Bearer '):] # and save it without Authorization prefix if (not token): # if the token is not received raise Exception('Expected JWT') # an error occurs body = jwtManager.decode(token) if (body.get('payload')): # get the payload object from the request body body = body['payload'] return body # file saving process def processSave(body, filename, usAddr): download = body.get('url') if (download is None): raise Exception("DownloadUrl is null") changesUri = body.get('changesurl') newFilename = filename curExt = fileUtils.getFileExt(filename) # get current file extension downloadExt = body.get('filetype') # get the extension of the downloaded file # Todo [Delete in version 7.0 or higher] if (downloadExt == None): downloadExt = fileUtils.getFileExt(download) # Support for versions below 7.0 # convert downloaded file to the file with the current extension if these extensions aren't equal if (curExt != downloadExt): try: newUri = serviceConverter.getConverterUri(download, downloadExt, curExt, docManager.generateRevisionId(download), False) # convert file and give url to a new file if not newUri: newFilename = docManager.getCorrectName(fileUtils.getFileNameWithoutExt(filename) + downloadExt, usAddr) # get the correct file name if it already exists else: download = newUri except Exception: newFilename = docManager.getCorrectName(fileUtils.getFileNameWithoutExt(filename) + downloadExt, usAddr) path = docManager.getStoragePath(newFilename, usAddr) # get the file path histDir = historyManager.getHistoryDir(path) # get the path to the history direction if not os.path.exists(histDir): # if the path doesn't exist os.makedirs(histDir) # create it versionDir = historyManager.getNextVersionDir(histDir) # get the path to the next file version os.rename(docManager.getStoragePath(filename, usAddr), historyManager.getPrevFilePath(versionDir, curExt)) # get the path to the previous file version and rename the storage path with it docManager.saveFileFromUri(download, path) # save file to the storage path docManager.saveFileFromUri(changesUri, historyManager.getChangesZipPath(versionDir)) # save file changes to the diff.zip archive hist = None hist = body.get('changeshistory') if (not hist) & ('history' in body): hist = json.dumps(body.get('history')) if hist: historyManager.writeFile(historyManager.getChangesHistoryPath(versionDir), hist) # write the history changes to the changes.json file historyManager.writeFile(historyManager.getKeyPath(versionDir), body.get('key')) # write the key value to the key.txt file forcesavePath = docManager.getForcesavePath(newFilename, usAddr, False) # get the path to the forcesaved file version if (forcesavePath != ""): # if the forcesaved file version exists os.remove(forcesavePath) # remove it return # file force saving process def processForceSave(body, filename, usAddr): download = body.get('url') if (download is None): raise Exception("DownloadUrl is null") curExt = fileUtils.getFileExt(filename) # get current file extension downloadExt = body.get('fileType') # get the extension of the downloaded file # Todo [Delete in version 7.0 or higher] if (downloadExt == None): downloadExt = fileUtils.getFileExt(download) # Support for versions below 7.0 newFilename = False # convert downloaded file to the file with the current extension if these extensions aren't equal if (curExt != downloadExt): try: newUri = serviceConverter.getConverterUri(download, downloadExt, curExt, docManager.generateRevisionId(download), False) # convert file and give url to a new file if not newUri: newFilename = True else: download = newUri except Exception: newFilename = True isSubmitForm = body.get('forcesavetype') == 3 # SubmitForm if(isSubmitForm): if (newFilename): filename = docManager.getCorrectName(fileUtils.getFileNameWithoutExt(filename) + "-form" + downloadExt, usAddr) # get the correct file name if it already exists else : filename = docManager.getCorrectName(fileUtils.getFileNameWithoutExt(filename) + "-form" + curExt, usAddr) forcesavePath = docManager.getStoragePath(filename, usAddr) else: if (newFilename): filename = docManager.getCorrectName(fileUtils.getFileNameWithoutExt(filename) + downloadExt, usAddr) forcesavePath = docManager.getForcesavePath(filename, usAddr, False) if (forcesavePath == ""): forcesavePath = docManager.getForcesavePath(filename, usAddr, True) docManager.saveFileFromUri(download, forcesavePath) if(isSubmitForm): uid = body['actions'][0]['userid'] # get the user id historyManager.createMetaData(filename, uid, "Filling Form", usAddr) # create meta data for forcesaved file return # create a command request def commandRequest(method, key): documentCommandUrl = config.DOC_SERV_SITE_URL + config.DOC_SERV_COMMAND_URL payload = { 'c': method, 'key': key } headers={'accept': 'application/json'} if jwtManager.isEnabled(): # check if a secret key to generate token exists or not jwtHeader = 'Authorization' if config.DOC_SERV_JWT_HEADER is None or config.DOC_SERV_JWT_HEADER == '' else config.DOC_SERV_JWT_HEADER # get jwt header headerToken = jwtManager.encode({'payload': payload}) # encode a payload object into a header token headers[jwtHeader] = f'Bearer {headerToken}' # add a header Authorization with a header token with Authorization prefix in it payload['token'] = jwtManager.encode(payload) # encode a payload object into a body token response = requests.post(documentCommandUrl, json=payload, headers=headers) return
Python
0.01345
@@ -2480,32 +2480,38 @@ downloadExt = + %22.%22 + body.get('filet @@ -5163,16 +5163,22 @@ oadExt = + %22.%22 + body.ge
00d9ee19790e0cd1bfdab0765e4c0e858d5f22bd
fix bug in use of 'media'
localcrawler/core.py
localcrawler/core.py
from urlparse import urlparse from BeautifulSoup import BeautifulSoup from django.conf import settings from django.test.client import Client import sys __all__ = ['Crawler'] class Crawler(object): def __init__(self, entry_point='/', img=True, media=True, # Media is deprecated: Use img media_dir=True, static_dir=True, css=True, js=True, bad_soup=True, client=None, ignore=None, return_results=False, return_response=False, output=sys.stderr): self.results = None self.queue = [entry_point] self.ignore = ignore or [] self.img = img self.media = img # Deprecated. Use img self.media_dir = media_dir self.static_dir = static_dir self.css = css self.js = js self.bad_soup = bad_soup self.return_results = return_results self.return_response = return_response self.client = client or Client() self.output = output self.success = True self.crawled = 0 self.failed = 0 self.succeeded = 0 def crawl(self): self.results = [] while self.queue: self.check(self.queue.pop(0)) return self.success def check(self, url): """ Open a single URL and check it's status code. If status is OK, run a scan if content type is html. """ response = self.client.get(url, follow=True) if self.return_results: if self.return_response: result = (url, response.status_code, response) else: result = (url, response.status_code) self.results.append(result) self.ignore.append(url) # check if we're a 200 if response.status_code != 200: self.success = False self.report(response.status_code, url, "URL Failed") return self.succeeded += 1 html = response.content if response.get('Content-Type', '').startswith('text/html'): self.scan(html, url) def report(self, prefix, url, message): self.failed += 1 print >>self.output, "[%s] %s (%s)" % (prefix, url, message) def scan(self, html, url): """ Scan a HTML document for further links we might be interested in. """ try: soup = BeautifulSoup(html) except Exception, e: if self.bad_soup: self.success = False self.report("SOUP", url, unicode(e)) return if self.img and self.media: for img in soup.findAll('img'): src = img.get('src', '') if self._relevant(src): self.queue.append(src) if self.js: for js in soup.findAll('script', attrs={'type': 'text/javascript'}): src = js.get('src', '') if self._relevant(src): self.queue.append(src) if self.css: for css in soup.findAll('link', attrs={'type': 'text/css'}): href = css.get('href', '') if self._relevant(href): self.queue.append(href) for a in soup.findAll('a'): href = a.get('href', '') if self._relevant(href): self.queue.append(href) def _relevant(self, url): if not url: return False url_parts = urlparse(url) conditions = [ url_parts.netloc == '', url.startswith('/'), not url in self.ignore, ] if not self.media_dir: conditions.append(not url.startswith(settings.MEDIA_URL)) if not self.static_dir: conditions.append(not url.startswith(settings.STATIC_URL)) return all(conditions)
Python
0
@@ -680,19 +680,21 @@ media = -img +media # Depr @@ -2646,32 +2646,155 @@ return%0A + # media is deprecated but currently setting either media or%0A # img to False will disable checking of images%0A if self.
b8241c2ff0cff4a0bc96e6d229c80029cdbcb71c
Change contact email.
luminoth/__init__.py
luminoth/__init__.py
from .cli import cli # noqa __version__ = '0.0.1.dev0' __title__ = 'Luminoth' __description__ = 'Computer vision toolkit based on TensorFlow' __uri__ = 'http://luminoth.ai' __doc__ = __description__ + ' <' + __uri__ + '>' __author__ = 'Tryolabs' __email__ = 'hello@tryolabs.com' __license__ = 'BSD 3-Clause License' __copyright__ = 'Copyright (c) 2017 Tryolabs S.A.'
Python
0
@@ -260,13 +260,16 @@ = ' -hello +luminoth @try
df3441a2c98fffbb18c11d3660acb86d2e31e5fa
Fix main run
src/ultros/core/__main__.py
src/ultros/core/__main__.py
# coding=utf-8 import argparse import asyncio from ultros.core.ultros import Ultros """ Ultros - Module runnable """ __author__ = "Gareth Coles" __version__ = "0.0.1" def start(args): u = Ultros(args.config, args.data) # Gonna have to be a coroutine if we're AIO-based. Probably. asyncio.get_event_loop().run_until_complete(u.start) def init(args): pass if __name__ == "__main__": parser = argparse.ArgumentParser(prog="ultros") parser.add_argument( "--version", action="version", version="Ultros {}".format(__version__) ) parser.add_argument( "--config", help="specify a directory containing configuration files", default="./config" ) parser.add_argument( "--data", help="specify a directory to store data files", default="./data" ) subparsers = parser.add_subparsers() parser_init = subparsers.add_parser( "init", help="Create a default directory structure with example files" ) parser_init.set_defaults(func=init) parser_start = subparsers.add_parser("start", help="Start Ultros") parser_start.set_defaults(func=start) args = parser.parse_args() if hasattr(args, "func"): args.func(args) else: parser.print_usage()
Python
0.000733
@@ -174,24 +174,29 @@ ef start(arg +ument s):%0A u = @@ -205,16 +205,21 @@ tros(arg +ument s.config @@ -223,16 +223,21 @@ fig, arg +ument s.data)%0A @@ -244,124 +244,16 @@ -# Gonna have to be a coroutine if we're AIO-based. Probably.%0A asyncio.get_event_loop().run_until_complete( u.start +( )%0A%0A%0A @@ -264,16 +264,21 @@ init(arg +ument s):%0A
4300cf8c6e98081c429fcbed44ed387af7735aa4
Add a link to python-markdown-math extension
markups/mdx_mathjax.py
markups/mdx_mathjax.py
# This file is part of python-markups module # License: BSD # Copyright: (C) Dmitry Shachnev, 2015 ''' Math extension for Python-Markdown ================================== Adds support for displaying math formulas using [MathJax](http://www.mathjax.org/). Author: 2015, Dmitry Shachnev <mitya57@gmail.com>. ''' from __future__ import absolute_import import markdown class MathExtension(markdown.extensions.Extension): def __init__(self, *args, **kwargs): self.config = { 'enable_dollar_delimiter': [False, 'Enable single-dollar delimiter'], } super(MathExtension, self).__init__(*args, **kwargs) def extendMarkdown(self, md, md_globals): def handle_match_inline(m): node = markdown.util.etree.Element('script') node.set('type', 'math/tex') node.text = markdown.util.AtomicString(m.group(3)) return node def handle_match(m): node = markdown.util.etree.Element('script') node.set('type', 'math/tex; mode=display') if '\\begin' in m.group(2): node.text = markdown.util.AtomicString(m.group(2) + m.group(4) + m.group(5)) else: node.text = markdown.util.AtomicString(m.group(3)) return node configs = self.getConfigs() inlinemathpatterns = ( markdown.inlinepatterns.Pattern(r'(?<!\\|\$)(\$)([^\$]+)(\$)'), # $...$ markdown.inlinepatterns.Pattern(r'(?<!\\)(\\\()(.+?)(\\\))') # \(...\) ) mathpatterns = ( markdown.inlinepatterns.Pattern(r'(?<!\\)(\$\$)([^\$]+)(\$\$)'), # $$...$$ markdown.inlinepatterns.Pattern(r'(?<!\\)(\\\[)(.+?)(\\\])'), # \[...\] markdown.inlinepatterns.Pattern(r'(?<!\\)(\\begin{([a-z]+?\*?)})(.+?)(\\end{\3})') ) if not configs['enable_dollar_delimiter']: inlinemathpatterns = inlinemathpatterns[1:] for i, pattern in enumerate(inlinemathpatterns): pattern.handleMatch = handle_match_inline md.inlinePatterns.add('math-inline-%d' % i, pattern, '<escape') for i, pattern in enumerate(mathpatterns): pattern.handleMatch = handle_match md.inlinePatterns.add('math-%d' % i, pattern, '<escape') def makeExtension(*args, **kwargs): return MathExtension(*args, **kwargs)
Python
0
@@ -93,16 +93,81 @@ , 2015%0A%0A +# Maintained in https://github.com/mitya57/python-markdown-math%0A%0A '''%0AMath
5229bf4a16d468a3a337db65c478671409d6d898
Update summery.py
metric-consumer/summary.py
metric-consumer/summary.py
#!/usr/bin/python import os import argparse import re def cumulative_moving_average(new_value, old_mean, total_items): return old_mean + (new_value - old_mean) / total_items def print_file_summary(path): cma = 0 n = 0 with open(path, 'r') as csv_file: all_lines = csv_file.readlines() for line in all_lines[1:]: try: values = line.split(',') #latency,1467792005016000000,3,False,338,False,256.0,1.467791983851e+12 receive_time = int(values[1]) send_time = int(float(values[7])) receive_time = receive_time/1000000 #convert from nanoseconds travel_time = receive_time - send_time cma = cumulative_moving_average(travel_time, cma, n+1) n = n+1 except: continue print '{} = mean {}'.format(path, cma) parser = argparse.ArgumentParser(description='Traverse all csv files in given dir and print mean travel time') parser.add_argument('--dir', dest='dir', type=str, help='Root directory') parser.set_defaults(dir='.') args = parser.parse_args() csv_pattern = re.compile(".*\.csv$") for root, dirs, files in os.walk(args.dir): for f in files: if(csv_pattern.match(f)): print_file_summary('{}/{}'.format(root, f))
Python
0.000001
@@ -452,18 +452,20 @@ _time = -in +floa t(values @@ -489,12 +489,8 @@ e = -int( floa @@ -501,17 +501,16 @@ lues%5B7%5D) -) %0A%09%09%09%09rec
4395fb9d6c1f7c4c48618a13681eae16e5e41ae6
Fix docs
xpathwebdriver/default_settings.py
xpathwebdriver/default_settings.py
# -*- coding: utf-8 -*- ''' Smoothtest Copyright (c) 2014 Juju. Inc Code Licensed under MIT License. See LICENSE file. ''' import logging import json class Settings(object): def __init__(self): if self.webdriver_remote_credentials_path: with open(self.webdriver_remote_credentials_path, 'r') as fp: cred = json.load(fp) self.webdriver_remote_command_executor = cred['webdriver_remote_command_executor'] self.webdriver_remote_session_id = cred['webdriver_remote_session_id'] @property def base_url(self): return self.web_server_url # Server to be tested URL eg: http://www.example.com web_server_url = '' # Virtual display is useful to keep the webdriver browser contained # avoiding the browser to pop-up abover other windows (with alerts for example) virtual_display_enable = False # Use virtual display virtual_display_visible = False # Show the virtual display or may be hidden (for headless testing) virtual_display_backend = None # 'xvfb', 'xvnc' or 'xephyr', ignores ``virtual_display_visible`` virtual_display_size = (800, 600) # Dimensions of the virtual display virtual_display_keep_open = False # Keep the virtual display after a smoothtest # process finished (useful when we also keep the browser open for debugging) webdriver_enabled = True # Whether or not automatically create the browser webdriver_browser = 'Chrome' #'PhantomJS' # Which browser we would like to use webdriver with: Firefox, Chrome, PhantomJs, etc... webdriver_browser_keep_open = False # Keep browser open after python process is dead webdriver_pool_size = 1 #Remote driver/reuse open driver webdriver_remote_command_executor = '' # Manually provide the url for the driver eg: 'http://127.0.0.1:54551' webdriver_remote_session_id = '' # Manually provide session id for reusage eg: '4aed25f4a5ce78bb7d57c19663110b3c' webdriver_remote_credentials_path = '' # Path to json file containing previous 2 key above (eg:dumped by "xpathshell -d <path>.json") #webdriver_browser_life DEPRECATED, never used in code # Browsers profiles # Eg: '/home/<user>/.mozilla/firefox/4iyhtofy.webdriver_autotest' on linux # or: 'C:/Users/<user>/AppData/Roaming/Mozilla/Firefox/Profiles/c1r3g2wi.default' on windows webdriver_firefox_profile = None screenshot_level = 0 # Like a text logging level, but doing screenshots (WIP) # Higher level-> more screenshots per action screenshot_exceptions_dir = './' # Were to save logged screenshot assert_screenshots_dir = '/tmp/' assert_screenshots_learning = False assert_screenshots_failed_dir = '/tmp/' log_level_default = logging.INFO log_level_root_handler = logging.DEBUG log_color = False # Not working on Python 3 def smoke_test_module(): Settings() if __name__ == "__main__": smoke_test_module()
Python
0.000003
@@ -1225,170 +1225,92 @@ lse - # Keep the virtual display after a smoothtest %0A # process finished (useful when we also keep the browser open for debugging) +# If we want to check results (useful whe combined with webdriver_browser_keep_open) %0A%0A @@ -1386,16 +1386,16 @@ browser%0A + webd @@ -1422,21 +1422,8 @@ ome' - #'PhantomJS' # W
9f899f439dbe9c80e77eee08fb674917c74f0b2a
Remove unused import
mechanize/_testcase.py
mechanize/_testcase.py
import os import shutil import subprocess import sys import tempfile import unittest class SetupStack(object): def __init__(self): self._on_teardown = [] def add_teardown(self, teardown): self._on_teardown.append(teardown) def tear_down(self): for func in reversed(self._on_teardown): func() class TearDownConvenience(object): def __init__(self, setup_stack=None): self._own_setup_stack = setup_stack is None if setup_stack is None: setup_stack = SetupStack() self._setup_stack = setup_stack # only call this convenience method if no setup_stack was supplied to c'tor def tear_down(self): assert self._own_setup_stack self._setup_stack.tear_down() class TempDirMaker(TearDownConvenience): def make_temp_dir(self): temp_dir = tempfile.mkdtemp(prefix="tmp-%s-" % self.__class__.__name__) def tear_down(): shutil.rmtree(temp_dir) self._setup_stack.add_teardown(tear_down) return temp_dir class MonkeyPatcher(TearDownConvenience): Unset = object() def monkey_patch(self, obj, name, value): orig_value = getattr(obj, name) setattr(obj, name, value) def reverse_patch(): setattr(obj, name, orig_value) self._setup_stack.add_teardown(reverse_patch) def _set_environ(self, env, name, value): if value is self.Unset: try: del env[name] except KeyError: pass else: env[name] = value def monkey_patch_environ(self, name, value, env=os.environ): orig_value = env.get(name, self.Unset) self._set_environ(env, name, value) def reverse_patch(): self._set_environ(env, name, orig_value) self._setup_stack.add_teardown(reverse_patch) class FixtureFactory(object): def __init__(self): self._setup_stack = SetupStack() self._context_managers = {} self._fixtures = {} def register_context_manager(self, name, context_manager): self._context_managers[name] = context_manager def get_fixture(self, name, add_teardown): context_manager = self._context_managers[name] fixture = context_manager.__enter__() add_teardown(lambda: context_manager.__exit__(None, None, None)) return fixture def get_cached_fixture(self, name): fixture = self._fixtures.get(name) if fixture is None: fixture = self.get_fixture(name, self._setup_stack.add_teardown) self._fixtures[name] = fixture return fixture def tear_down(self): self._setup_stack.tear_down() class TestCase(unittest.TestCase): def setUp(self): self._setup_stack = SetupStack() self._monkey_patcher = MonkeyPatcher(self._setup_stack) def tearDown(self): self._setup_stack.tear_down() def register_context_manager(self, name, context_manager): return self.fixture_factory.register_context_manager( name, context_manager) def get_fixture(self, name): return self.fixture_factory.get_fixture(name, self.add_teardown) def get_cached_fixture(self, name): return self.fixture_factory.get_cached_fixture(name) def add_teardown(self, *args, **kwds): self._setup_stack.add_teardown(*args, **kwds) def make_temp_dir(self, *args, **kwds): return TempDirMaker(self._setup_stack).make_temp_dir(*args, **kwds) def monkey_patch(self, *args, **kwds): return self._monkey_patcher.monkey_patch(*args, **kwds) def monkey_patch_environ(self, *args, **kwds): return self._monkey_patcher.monkey_patch_environ(*args, **kwds) def assert_contains(self, container, containee): self.assertTrue(containee in container, "%r not in %r" % (containee, container)) def assert_less_than(self, got, expected): self.assertTrue(got < expected, "%r >= %r" % (got, expected)) # http://lackingrhoticity.blogspot.com/2009/01/testing-using-golden-files-in-python.html class GoldenTestCase(TestCase): run_meld = False def assert_golden(self, dir_got, dir_expect): assert os.path.exists(dir_expect), dir_expect proc = subprocess.Popen(["diff", "--recursive", "-u", "-N", "--exclude=.*", dir_expect, dir_got], stdout=subprocess.PIPE) stdout, stderr = proc.communicate() if len(stdout) > 0: if self.run_meld: # Put expected output on the right because that is the # side we usually edit. subprocess.call(["meld", dir_got, dir_expect]) raise AssertionError( "Differences from golden files found.\n" "Try running with --meld to update golden files.\n" "%s" % stdout) self.assertEquals(proc.wait(), 0)
Python
0.000001
@@ -39,19 +39,8 @@ ess%0A -import sys%0A impo
4eb71abf71823a5a065d1b593ca8b624d17a35c9
prepare for 1.6
src/pyckson/__init__.py
src/pyckson/__init__.py
from pyckson.decorators import * from pyckson.json import * from pyckson.parser import parse from pyckson.parsers.base import Parser from pyckson.serializer import serialize from pyckson.serializers.base import Serializer from pyckson.dates.helpers import configure_date_formatter __version__ = '1.5'
Python
0.000001
@@ -296,7 +296,7 @@ '1. -5 +6 '%0A
00812b21b2a86f0b17888c32de0fc8d65b44510f
Use multiple info
manager/operation.py
manager/operation.py
from manager.models import Package, Build import lib.aur as aur import lib.pacman.sync as sync import lib.pacman.upgrade as upgrade import itertools import shutil import os.path from packager.settings import BUILD_ROOT_DIR from packager.manager import BuilderManager import packager.path class OperationError(Exception): def __init__(self, reason): self.reason = reason def __str__(self): return repr(self.reason) def _is_registered(name): try: Package.objects.get(name=name) except Package.DoesNotExist: return False else: return True def register(name, with_depend=False): if _is_registered(name): raise OperationError('{} has already registered'.format(name)) info = aur.info(name) native = [] foreign = [] if with_depend: depends = [] if hasattr(info, 'Depends'): depends.append(info.Depends) if hasattr(info, 'MakeDepends'): depends.append(info.MakeDepends) for depend in itertools.chain(*depends): depend_name = depend.translate(str.maketrans('>=', '<<')).split('<')[0] if sync.exist(depend_name): native.append(depend_name) elif aur.exist(depend_name): foreign.append(depend_name) else: raise OperationError('{} not found'.format(depend_name)) sync.system_upgrade() sync.install(native, asdeps=True) for package in foreign: if not _is_registered(package): r = register(package, with_depend=True) native.extend(r['native']) foreign.extend(r['foreign']) package = Package(name=name) package.save() ret = dict() ret['native'] = list(set(native)) ret['foreign'] = list(set(foreign)) return ret def remove(name, cleanup=False): if not _is_registered(name): raise OperationError('{} has not registered'.format(name)) if cleanup: shutil.rmtree(os.path.join(BUILD_ROOT_DIR, name), ignore_errors=True) package = Package.objects.get(name=name) package.delete() def build(name): if not _is_registered(name): raise OperationError('{} has not registered'.format(name)) package = Package.objects.get(name=name) sync.system_upgrade() BuilderManager().register(package.id) def build_all(): packages = Package.objects.all() sync.system_upgrade() for package in packages: BuilderManager().register(package.id) def build_update(): packages = Package.objects.all() sync.system_upgrade() for package in packages: try: latest = Build.objects.filter(package_id=package.id).order_by('-id')[0] except IndexError: BuilderManager().register(package.id) else: if latest.status == Build.FAILURE: BuilderManager().register(package.id) elif latest.status == Build.SUCCESS: info = aur.info(package.name) if not info.Version == latest.version: BuilderManager().register(package.id) def install(name): if not _is_registered(name): raise OperationError('{} has not registered'.format(name)) package = Package.objects.get(name=name) try: build_ = Build.objects.filter(package_id=package.id).order_by('-id')[0] except IndexError: raise OperationError('{} has no build'.format(name)) if build_.status == Build.SUCCESS: try: path = packager.path.build_to_path(build_) sync.system_upgrade() upgrade.install(path.result_file) except FileNotFoundError as e: raise OperationError from e else: raise OperationError('{} latest build has not succeeded'.format(name)) def install_all(): files = [] for package in Package.objects.all(): try: latest = Build.objects.filter(package_id=package.id).order_by('-id')[0] except IndexError: pass else: if latest.status == Build.SUCCESS: try: path = packager.path.build_to_path(latest) files.append(path.result_file) except FileNotFoundError: pass upgrade.install(' '.join(files))
Python
0
@@ -2611,32 +2611,56 @@ ystem_upgrade()%0A + need_check = list()%0A for package @@ -3010,32 +3010,85 @@ UCCESS:%0A + need_check.append(latest)%0A if need_check:%0A info = a @@ -3087,61 +3087,202 @@ info +s = aur. -info(package.name)%0A if not info +multiple_info(map(lambda x: x.package.name, need_check))%0A for p in need_check:%0A package_name = p.package.name%0A if infos%5Bpackage_name%5D and infos%5Bpackage_name%5D .Ver @@ -3290,17 +3290,12 @@ ion -== latest +!= p .ver @@ -3300,20 +3300,16 @@ ersion:%0A - @@ -3334,32 +3334,34 @@ ager().register( +p. package.id)%0A%0A%0Ade
63ae7b4caf877cb043b2c2d4861e6ab5bb5f5390
fix flake8
memote/suite/runner.py
memote/suite/runner.py
# -*- coding: utf-8 -*- # Copyright 2017 Novo Nordisk Foundation Center for Biosustainability, # Technical University of Denmark. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run the test suite on an instance of `cobra.Model`. """ from __future__ import absolute_import import sys import shlex import os from os.path import dirname import click import pytest from click_configfile import (ConfigFileReader, Param, SectionSchema, matches_section) from .. import __version__ class ConfigSectionSchema(object): """Describes all sections of the memote configuration file.""" @matches_section("memote") class Memote(SectionSchema): """Describes the memote configuration keys and values.""" collect = Param(type=bool, default=True) addargs = Param(type=str, default="") model = Param(type=click.Path(exists=True, dir_okay=False), multiple=True) class ConfigFileProcessor(ConfigFileReader): config_files = ["memote.ini", "setup.cfg"] config_section_schemas = [ConfigSectionSchema.Memote] class MyPlugin: def pytest_sessionfinish(self): click.echo("Storing report data.") def process_collect_flag(no_flag, context): if no_flag is not None: return not no_flag elif "collect" in context.default_map: return context.default_map["collect"] else: return True def process_addargs(args, context): if args is not None: return shlex.split(args) + [dirname(__file__)] elif "addargs" in context.default_map: return shlex.split(context.default_map["addargs"]) +\ [dirname(__file__)] else: return [dirname(__file__)] def process_model(model, context): if len(model) > 0: os.environ["MEMOTE_MODEL"] = os.pathsep.join(model) elif "MEMOTE_MODEL" in os.environ: return elif "model" in context.default_map: os.environ["MEMOTE_MODEL"] = os.pathsep.join( context.default_map["model"] ) else: raise ValueError( "No metabolic model found. Specify one as an argument, as an" " environment variable MEMOTE_MODEL, or in a configuration file." ) @click.command(context_settings={"default_map": ConfigFileProcessor.read_config()}) @click.help_option("--help", "-h") @click.version_option(__version__, "--version", "-V") @click.option("--no-collect", type=bool, is_flag=True, help="Do *not* collect test data needed for generating a report.") @click.option("--pytest-args", "-a", help="Any additional arguments you want to pass to pytest as a" " string.") @click.argument("model", type=click.Path(exists=True, dir_okay=False), nargs=-1) @click.pass_context def cli(ctx, model, pytest_args, no_collect): collect = process_collect_flag(no_collect, ctx) args = process_addargs(pytest_args, ctx) try: process_model(model, ctx) except ValueError as err: click.echo(str(err)) sys.exit(1) click.echo(os.environ["MEMOTE_MODEL"]) if collect: errno = pytest.main(args, plugins=[MyPlugin()]) else: errno = pytest.main(args) sys.exit(errno)
Python
0
@@ -2767,10 +2767,18 @@ ngs= -%7B%22 +dict(%0A defa @@ -2788,11 +2788,9 @@ _map -%22: += Conf @@ -2822,9 +2822,10 @@ ig() -%7D +%0A) )%0A@c
3fc6a711146afa79794ec884f560f1ea43e4565a
Update the latest version
src/site/sphinx/conf.py
src/site/sphinx/conf.py
# -*- coding: utf-8 -*- import sys, os, re import xml.etree.ElementTree as etree from datetime import date from collections import defaultdict def etree_to_dict(t): t.tag = re.sub(r'\{[^\}]*\}', '', t.tag) d = {t.tag: {} if t.attrib else None} children = list(t) if children: dd = defaultdict(list) for dc in map(etree_to_dict, children): for k, v in dc.iteritems(): dd[k].append(v) d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}} if t.attrib: d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems()) if t.text: text = t.text.strip() if children or t.attrib: if text: d[t.tag]['#text'] = text else: d[t.tag] = text return d # Parse the Maven pom.xml. pom = etree_to_dict(etree.parse('../../../pom.xml').getroot())['project'] # Set the basic project information. project = pom['name'] project_short = pom['name'] copyright = str(date.today().year) + ', ' + pom['organization']['name'] # Set the project version and release. # Use the last known stable release if the current version ends with '-SNAPSHOT'. if re.match(r'^.*-SNAPSHOT$', pom['version']): release = '0.18.0.Final' version = '0.18' else: release = pom['version'] version = re.match(r'^[0-9]+\.[0-9]+', pom['version']).group(0) # Define some useful global substitutions. rst_epilog = '\n' rst_epilog += '.. |baseurl| replace:: http://line.github.io/armeria/\n' rst_epilog += '.. |jetty_alpnAgent_version| replace:: ' + pom['properties']['jetty.alpnAgent.version'] + '\n' rst_epilog += '.. |oss_parent_version| replace:: ' + pom['parent']['version'] + '\n' rst_epilog += '.. |logback_version| replace:: ' + pom['properties']['logback.version'] + '\n' rst_epilog += '.. |slf4j_version| replace:: ' + pom['properties']['slf4j.version'] + '\n' rst_epilog += '.. |tomcat_version| replace:: ' + pom['properties']['tomcat.version'] + '\n' rst_epilog += '\n' needs_sphinx = '1.0' extensions = ['sphinx.ext.autodoc'] templates_path = ['_templates'] source_suffix = '.rst' source_encoding = 'utf-8-sig' master_doc = 'index' exclude_trees = ['.build'] add_function_parentheses = True pygments_style = 'tango' master_doc = 'index' sys.path.append(os.path.abspath('_themes')) html_theme = 'sphinx_rtd_theme' html_theme_path = ['_themes'] html_short_title = project_short html_static_path = ['_static'] html_use_smartypants = True html_use_index = True html_show_sourcelink = False htmlhelp_basename = pom['artifactId']
Python
0
@@ -1246,17 +1246,17 @@ e = '0.1 -8 +9 .0.Final @@ -1275,17 +1275,17 @@ n = '0.1 -8 +9 '%0Aelse:%0A
521c71c38d4e6edc242afb76daf330d9aec8e9ff
remove ipdb
scripts/dataverse/connect_external_accounts.py
scripts/dataverse/connect_external_accounts.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import logging from modularodm import Q from website.app import init_app from scripts import utils as script_utils from framework.transactions.context import TokuTransaction from website.addons.dataverse.model import AddonDataverseNodeSettings logger = logging.getLogger(__name__) def do_migration(): for node_addon in AddonDataverseNodeSettings.find(Q('foreign_user_settings', 'ne', None)): user_addon = node_addon.foreign_user_settings # import ipdb; ipdb.set_trace() if not user_addon.external_accounts: logger.warning('User {0} has no dataverse external account'.format(user_addon.owner._id)) continue account = user_addon.external_accounts[0] node_addon.set_auth(account, user_addon.owner) logger.info('Added external account {0} to node {1}'.format( account._id, node_addon.owner._id, )) def main(dry=True): init_app(set_backends=True, routes=False) # Sets the storage backends on all models with TokuTransaction(): do_migration() if dry: raise Exception('Abort Transaction - Dry Run') if __name__ == '__main__': dry = 'dry' in sys.argv if not dry: script_utils.add_file_logger(logger, __file__) main(dry=dry)
Python
0.000016
@@ -509,48 +509,8 @@ ngs%0A - # import ipdb; ipdb.set_trace()%0A
9cdd86499013c1deac7caeb8320c34294789f716
Add _kill_and_join to async actor stub
py/garage/garage/asyncs/actors.py
py/garage/garage/asyncs/actors.py
"""Asynchronous support for garage.threads.actors.""" __all__ = [ 'StubAdapter', ] from garage.asyncs import futures class StubAdapter: """Wrap all method calls, adding FutureAdapter on their result. While this simple adapter does not work for all corner cases, for common cases, it should work fine. """ def __init__(self, stub): super().__setattr__('_stub', stub) def __getattr__(self, name): method = getattr(self._stub, name) # Simple foolproof detection of non-message-sending access if name.startswith('_'): return method return lambda *args, **kwargs: \ futures.FutureAdapter(method(*args, **kwargs)) def _get_future(self): return futures.FutureAdapter(self._stub._get_future()) def _send_message(self, func, args, kwargs): """Enqueue a message into actor's message queue. Since this does not block, it may raise Full when the message queue is full. """ future = self._stub._send_message(func, args, kwargs, block=False) return futures.FutureAdapter(future)
Python
0.000001
@@ -1124,8 +1124,140 @@ future)%0A +%0A async def _kill_and_join(self, graceful=True):%0A self._kill(graceful=graceful)%0A await self._get_future().result()%0A
16381d4fafe743c3feb1de7ec27b6cbf95f617f1
Add state and conf to interactive namespace by default
pyexperiment/utils/interactive.py
pyexperiment/utils/interactive.py
"""Provides helper functions for interactive prompts Written by Peter Duerr """ from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import def embed_interactive(**kwargs): """Embed an interactive terminal into a running python process """ try: import IPython ipython_config = IPython.Config() ipython_config.TerminalInteractiveShell.confirm_exit = False if IPython.__version__ == '1.2.1': IPython.embed(config=ipython_config, banner1='', user_ns=kwargs) else: IPython.embed(config=ipython_config, banner1='', local_ns=kwargs) except ImportError: import readline # pylint: disable=unused-variable import code code.InteractiveConsole(kwargs).interact()
Python
0
@@ -225,16 +225,77 @@ import%0A%0A +from pyexperiment import state%0Afrom pyexperiment import conf%0A %0Adef emb @@ -395,16 +395,138 @@ %22%22%22%0A + if not 'state' in kwargs:%0A kwargs%5B'state'%5D = state%0A if not 'conf' in kwargs:%0A kwargs%5B'conf'%5D = conf%0A%0A try:
6cd34697334ddd8ada1daeee9a2c8b9522257487
Remove unused function
pyramda/iterable/for_each_test.py
pyramda/iterable/for_each_test.py
try: # Python 3 from unittest import mock except ImportError: # Python 2 import mock from .for_each import for_each def print_x_plus_5(x): print(x + 5) def test_for_each_nocurry_returns_the_original_iterable(): assert for_each(mock.MagicMock(), [1, 2, 3]) == [1, 2, 3] def test_for_each_curry_returns_the_original_iterable(): assert for_each(mock.MagicMock())([1, 2, 3]) == [1, 2, 3] def test_for_each_no_curry_executed_function_for_each_item_in_the_iterable(): m = mock.MagicMock() for_each(m, ([1, 2, 3])) == [1, 2, 3] assert len(m.mock_calls) == 3 def test_for_each_curry_executed_function_for_each_item_in_the_iterable(): m = mock.MagicMock() for_each(m)([1, 2, 3]) == [1, 2, 3] assert len(m.mock_calls) == 3
Python
0.000004
@@ -1,9 +1,8 @@ -%0A try:%0A @@ -132,50 +132,8 @@ h%0A%0A%0A -def print_x_plus_5(x):%0A print(x + 5)%0A%0A%0A def
d46eb3b0103a41568284fffeb2acadedd95561bb
Update Monitor.py to allow for custom "Unknown command." message
mk2/plugins/monitor.py
mk2/plugins/monitor.py
from mk2.plugins import Plugin from mk2.events import ServerOutput, StatPlayerCount, ServerStop, ServerEvent, Event class Check(object): alive = True timeout = 0 time = 0 warn = 0 def __init__(self, parent, **kw): self.dispatch = parent.dispatch self.console = parent.console for k, v in kw.items(): setattr(self, k, v) def check(self): if self.alive: self.alive = False return True return False def step(self): if self.check(): return self.time += 1 if self.timeout and self.time == self.timeout: timeout = "{0} minutes".format(self.timeout) self.console("{0} -- restarting.".format(self.message.format(timeout=timeout))) self.dispatch(ServerEvent(cause="server/error/" + self.event[0], data="REBOOTING SERVER: " + self.event[1].format(timeout=timeout), priority=1)) self.dispatch(ServerStop(reason=self.stop_reason, respawn=True)) elif self.warn and self.time == self.warn: if self.timeout: self.console("{0} -- auto restart in {1} minutes".format(self.warning, self.timeout - self.time)) else: self.console(self.warning) time = "{0} minutes".format(self.warn) self.dispatch(ServerEvent(cause="server/warning/" + self.event[0], data="WARNING: " + self.event[1].format(timeout=time), priority=1)) else: if self.timeout: self.console("{0} -- auto restart in {1} minutes".format(self.warning, self.timeout - self.time)) else: self.console(self.warning) def reset(self): self.alive = True self.time = 0 class Monitor(Plugin): crash_enabled = Plugin.Property(default=True) crash_timeout = Plugin.Property(default=3) crash_warn = Plugin.Property(default=0) oom_enabled = Plugin.Property(default=True) ping_enabled = Plugin.Property(default=True) ping_timeout = Plugin.Property(default=3) ping_warn = Plugin.Property(default=0) pcount_enabled = Plugin.Property(default=False) pcount_timeout = Plugin.Property(default=3) pcount_warn = Plugin.Property(default=0) def setup(self): do_step = False self.checks = {} if self.oom_enabled: self.register(self.handle_oom, ServerOutput, level='SEVERE', pattern='java\.lang\.OutOfMemoryError.*') if self.crash_enabled: do_step = True self.checks['crash'] = Check(self, name="crash", timeout=self.crash_timeout, warn=self.crash_warn, message="server has crashed", warning="server might have crashed", event=("hang", "server didn't respond for {timeout}"), stop_reason="crashed") if self.ping_enabled: self.register(self.handle_ping, StatPlayerCount) do_step = True self.checks['ping'] = Check(self, name="ping", timeout=self.ping_timeout, warn=self.ping_warn, message="server is not accepting connections", warning="server might have stopped accepting connections", event=("ping", "server didn't respond for {timeout}"), stop_reason="not accepting connections") if self.pcount_enabled: self.register(self.handle_pcount, StatPlayerCount) do_step = True self.checks['pcount'] = Check(self, name="pcount", timeout=self.pcount_timeout, warn=self.pcount_warn, message="server has had 0 players for {timeout}, something is wrong", warning="server has 0 players, might be inaccessible", event=("player-count", "server had 0 players for {timeout}"), stop_reason="zero players") self.do_step = do_step def server_started(self, event): self.reset_counts() if self.do_step: self.repeating_task(self.step, 60) def load_state(self, state): self.server_started(None) def step(self, *a): for c in self.checks.values(): c.step() if self.crash_enabled: self.register(self.handle_crash_ok, ServerOutput, pattern='Unknown command.*', track=False) self.send('') # Blank command to trigger 'Unknown command' def reset_counts(self): for c in self.checks.values(): c.reset() ### handlers # crash def handle_crash_ok(self, event): self.checks["crash"].reset() return Event.EAT | Event.UNREGISTER # out of memory def handle_oom(self, event): self.console('server out of memory, restarting...') self.dispatch(ServerEvent(cause='server/error/oom', data="server ran out of memory", priority=1)) self.dispatch(ServerStop(reason='out of memory', respawn=True)) # ping def handle_ping(self, event): if event.source == 'ping': self.checks["ping"].reset() # pcount def handle_pcount(self, event): if event.players_current > 0: self.checks["pcount"].reset() else: self.checks["pcount"].alive = False
Python
0
@@ -2080,24 +2080,161 @@ y(default=0) +%0A crash_unknown_cmd_message = Plugin.Property(default=%22Unknown command.*%22)%0A crash_check_command = Plugin.Property(default=%22%22) %0A%0A oom_en @@ -5197,27 +5197,38 @@ ern= -'Unknown command.*' +self.crash_unknown_cmd_message ,%0A @@ -5290,10 +5290,32 @@ end( -'' +self.crash_check_command ) #
4f2b7e5601e9f241868f86743eacb0e432be7495
fix settings of cache in UT
source/jormungandr/tests/integration_tests_settings.py
source/jormungandr/tests/integration_tests_settings.py
# encoding: utf-8 START_MONITORING_THREAD = False SAVE_STAT = True # désactivation de l'authentification PUBLIC = True LOGGER = { 'version': 1, 'disable_existing_loggers': False, 'formatters':{ 'default': { 'format': '[%(asctime)s] [%(levelname)5s] [%(process)5s] [%(name)10s] %(message)s', }, }, 'handlers': { 'default': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'default', }, }, 'loggers': { '': { 'handlers': ['default'], 'level': 'INFO', 'propagate': True }, } }
Python
0
@@ -652,9 +652,62 @@ %7D%0A%7D%0A%0A +CACHE_CONFIGURATION = %7B%0A 'CACHE_TYPE': 'null'%0A%7D%0A%0A%0A %0A
488c363227434b44efe58d13e38020b9c76c600f
Fix CPT test for tag branch (#333)
cpt/test/test_client/upload_checks_test.py
cpt/test/test_client/upload_checks_test.py
import unittest import os import zipfile from conans.client.tools import environment_append from conans.test.utils.tools import TestClient, TestServer from cpt.test.test_client.tools import get_patched_multipackager class UploadTest(unittest.TestCase): conanfile = """from conans import ConanFile class Pkg(ConanFile): name = "lib" version = "1.0" options = {"shared": [True, False]} default_options = "shared=False" def build(self): self.output.warn("HALLO") """ def test_dont_upload_non_built_packages(self): ts = TestServer(users={"user": "password"}) tc = TestClient(servers={"default": ts}, users={"default": [("user", "password")]}) tc.save({"conanfile.py": self.conanfile}) with environment_append({"CONAN_UPLOAD": ts.fake_url, "CONAN_LOGIN_USERNAME": "user", "CONAN_PASSWORD": "password", "CONAN_USERNAME": "user"}): mulitpackager = get_patched_multipackager(tc, exclude_vcvars_precommand=True) mulitpackager.add({}, {"shared": True}) mulitpackager.add({}, {"shared": False}) mulitpackager.run() self.assertIn("Uploading package 1/2", tc.out) self.assertIn("Uploading package 2/2", tc.out) # With the same cache and server try to rebuild them with policy missing mulitpackager = get_patched_multipackager(tc, build_policy="missing", exclude_vcvars_precommand=True) mulitpackager.add({}, {"shared": True}) mulitpackager.add({}, {"shared": False}) mulitpackager.run() self.assertIn("Skipping upload for 5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9", tc.out) self.assertIn("Skipping upload for 2a623e3082a38f90cd2c3d12081161412de331b0", tc.out) self.assertNotIn("HALLO", tc.out) # Without any build policy they get built mulitpackager = get_patched_multipackager(tc, exclude_vcvars_precommand=True) mulitpackager.add({}, {"shared": True}) mulitpackager.add({}, {"shared": False}) mulitpackager.run() self.assertNotIn("Skipping upload for 5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9", tc.out) self.assertNotIn("Skipping upload for 2a623e3082a38f90cd2c3d12081161412de331b0", tc.out) self.assertIn("Uploading package 1/2", tc.out) self.assertIn("Uploading package 2/2", tc.out) self.assertIn("HALLO", tc.out) def test_upload_when_tag_is_false(self): ts = TestServer(users={"user": "password"}) tc = TestClient(servers={"default": ts}, users={"default": [("user", "password")]}) tc.save({"conanfile.py": self.conanfile}) zip_path = os.path.join(tc.current_folder, 'config.zip') zipf = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) zipf.close() with environment_append({"CONAN_UPLOAD": ts.fake_url, "CONAN_LOGIN_USERNAME": "user", "CONAN_PASSWORD": "password", "CONAN_USERNAME": "user", "CONAN_CONFIG_URL": zip_path, "CONAN_UPLOAD_ONLY_WHEN_TAG": "1"}): mp = get_patched_multipackager(tc, exclude_vcvars_precommand=True) mp.add_common_builds(shared_option_name=False) mp.run() self.assertNotIn("Redefined channel by branch tag", tc.out) self.assertNotIn("Uploading packages for 'lib/1.0@user/stable'", tc.out) self.assertNotIn("Uploading package 1/1: 5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 to 'default'", tc.out) self.assertIn("Skipping upload, not tag branch", tc.out) def test_upload_when_tag_is_true(self): ts = TestServer(users={"user": "password"}) tc = TestClient(servers={"default": ts}, users={"default": [("user", "password")]}) tc.save({"conanfile.py": self.conanfile}) zip_path = os.path.join(tc.current_folder, 'config.zip') zipf = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) zipf.close() with environment_append({"CONAN_UPLOAD": ts.fake_url, "CONAN_LOGIN_USERNAME": "user", "CONAN_PASSWORD": "password", "CONAN_USERNAME": "user", "CONAN_CONFIG_URL": zip_path, "CONAN_UPLOAD_ONLY_WHEN_TAG": "1", "TRAVIS": "1", "TRAVIS_TAG": "0.1"}): mp = get_patched_multipackager(tc, exclude_vcvars_precommand=True) mp.add_common_builds(shared_option_name=False) mp.run() self.assertNotIn("Skipping upload, not tag branch", tc.out) self.assertIn("Redefined channel by branch tag", tc.out) self.assertIn("Uploading packages for 'lib/1.0@user/stable'", tc.out) self.assertIn("Uploading package 1/1: 5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 to 'default'", tc.out)
Python
0.000013
@@ -3217,24 +3217,72 @@ Y_WHEN_TAG%22: + %221%22,%0A %22TRAVIS%22: %221%22%7D):%0A%0A
4697a7677aecaab4135e483c30e9fc6cc780fcca
test skipped as experimental without street network.
source/jormungandr/tests/routing_tests_experimental.py
source/jormungandr/tests/routing_tests_experimental.py
# Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved. # # This file is part of Navitia, # the software to build cool stuff with public transport. # # Hope you'll enjoy and contribute to this project, # powered by Canal TP (www.canaltp.fr). # Help us simplify mobility and open public transport: # a non ending quest to the responsive locomotion way of traveling! # # LICENCE: This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Stay tuned using # twitter @navitia # IRC #navitia on freenode # https://groups.google.com/d/forum/navitia # www.navitia.io from __future__ import absolute_import, print_function, unicode_literals, division from datetime import timedelta from .tests_mechanism import config from jormungandr.scenarios.qualifier import min_from_criteria from .journey_common_tests import * from unittest import skip from .routing_tests import OnBasicRouting ''' This unit runs all the common tests in journey_common_tests.py along with locals tests added in this unit for scenario experimental ''' @config({'scenario': 'experimental'}) class TestJourneysExperimental(JourneyCommon, DirectPath, AbstractTestFixture): """ Test the experiental scenario All the tests are defined in "TestJourneys" class, we only change the scenario NOTE: for the moment we cannot import all routing tests, so we only get 2, but we need to add some more """ @staticmethod def check_next_datetime_link(dt, response, clockwise): if not response.get('journeys'): return """default next behaviour is 1s after the best or the soonest""" j_to_compare = min_from_criteria(generate_pt_journeys(response), new_default_pagination_journey_comparator(clockwise=clockwise)) j_departure = get_valid_datetime(j_to_compare['departure_date_time']) eq_(j_departure + timedelta(seconds=1), dt) @staticmethod def check_previous_datetime_link(dt, response, clockwise): if not response.get('journeys'): return """default previous behaviour is 1s before the best or the latest """ j_to_compare = min_from_criteria(generate_pt_journeys(response), new_default_pagination_journey_comparator(clockwise=clockwise)) j_departure = get_valid_datetime(j_to_compare['arrival_date_time']) eq_(j_departure - timedelta(seconds=1), dt) def test_best_filtering(self): """ This feature is no longer supported""" pass def test_datetime_represents_arrival(self): super(TestJourneysExperimental, self).test_datetime_represents_arrival() def test_journeys_wheelchair_profile(self): """ This feature is no longer supported """ pass def test_not_existent_filtering(self): """ This feature is no longer supported """ pass def test_other_filtering(self): """ This feature is no longer supported """ pass @config({"scenario": "experimental"}) class TestExperimentalJourneysWithPtref(JourneysWithPtref, AbstractTestFixture): pass @config({"scenario": "experimental"}) class TestExperimentalOnBasicRouting(OnBasicRouting, AbstractTestFixture): def test_sp_to_sp(self): """ Test journeys from stop point to stop point without street network """ query = "journeys?from=stop_point:uselessA&to=stop_point:B&datetime=20120615T080000" # with street network desactivated response = self.query_region(query + "&max_duration_to_pt=0") assert('journeys' not in response) # with street network activated response = self.query_region(query + "&max_duration_to_pt=1") assert('journeys' not in response) @skip("temporarily disabled") def test_isochrone(self): super(OnBasicRouting, self).test_isochrone()
Python
0
@@ -3917,534 +3917,117 @@ -def test_sp_to_sp(self):%0A %22%22%22%0A Test journeys from stop point to stop point without street network%0A %22%22%22%0A query = %22journeys?from=stop_point:uselessA&to=stop_point:B&datetime=20120615T080000%22%0A%0A # with street network desactivated%0A response = self.query_region(query + %22&max_duration_to_pt=0%22)%0A assert('journeys' not in response)%0A%0A # with street network activated%0A response = self.query_region(query + %22&max_duration_to_pt=1%22)%0A assert('journeys' not in response +@skip(%22temporarily disabled%22)%0A def test_sp_to_sp(self):%0A super(OnBasicRouting, self).test_sp_to_sp( )%0A%0A
8552bccb91755c3a69d1435976cfeffef1e1137e
handle exceptions, fix #169
src/you_get/downloader/coursera.py
src/you_get/downloader/coursera.py
#!/usr/bin/env python __all__ = ['coursera_download'] from ..common import * def coursera_login(user, password, csrf_token): url = 'https://www.coursera.org/maestro/api/user/login' my_headers = { 'Cookie': ('csrftoken=%s' % csrf_token), 'Referer': 'https://www.coursera.org', 'X-CSRFToken': csrf_token, } values = { 'email_address': user, 'password': password, } form_data = parse.urlencode(values).encode('utf-8') response = request.urlopen(request.Request(url, headers = my_headers, data = form_data)) return response.headers def coursera_download(url, output_dir = '.', merge = True, info_only = False): course_code = r1(r'coursera.org/([^/]+)', url) url = "http://class.coursera.org/%s/lecture/index" % course_code request.install_opener(request.build_opener(request.HTTPCookieProcessor())) import http.client conn = http.client.HTTPConnection('class.coursera.org') conn.request('GET', "/%s/lecture/index" % course_code) response = conn.getresponse() csrf_token = r1(r'csrf_token=([^;]+);', response.headers['Set-Cookie']) import netrc, getpass info = netrc.netrc().authenticators('coursera.org') if info is None: user = input("User: ") password = getpass.getpass("Password: ") else: user, password = info[0], info[2] print("Logging in...") coursera_login(user, password, csrf_token) request.urlopen("https://class.coursera.org/%s/auth/auth_redirector?type=login&subtype=normal" % course_code) # necessary! html = get_html(url) course_name = "%s (%s)" % (r1(r'course_strings_name = "([^"]+)"', html), course_code) output_dir = os.path.join(output_dir, course_name) materials = re.findall(r'<a target="_new" href="([^"]+)"', html) num_of_slides = len(re.findall(r'title="[Ss]lides', html)) num_of_srts = len(re.findall(r'title="Subtitles \(srt\)"', html)) num_of_texts = len(re.findall(r'title="Subtitles \(text\)"', html)) num_of_mp4s = len(re.findall(r'title="Video \(MP4\)"', html)) num_of_others = len(materials) - num_of_slides - num_of_srts - num_of_texts - num_of_mp4s print("MOOC Site: ", site_info) print("Course Name: ", course_name) print("Num of Videos (MP4): ", num_of_mp4s) print("Num of Subtitles (srt): ", num_of_srts) print("Num of Subtitles (text): ", num_of_texts) print("Num of Slides: ", num_of_slides) print("Num of other resources: ", num_of_others) print() if info_only: return # Process downloading names = re.findall(r'<div class="hidden">([^<]+)</div>', html) assert len(names) == len(materials) for i in range(len(materials)): title = names[i] resource_url = materials[i] ext = r1(r'format=(.+)', resource_url) or r1(r'\.(\w\w\w\w|\w\w\w|\w\w|\w)$', resource_url) or r1(r'download.(mp4)', resource_url) _, _, size = url_info(resource_url) if ext == 'mp4': download_urls([resource_url], title, ext, size, output_dir, merge = merge) else: download_url_chunked(resource_url, title, ext, size, output_dir, merge = merge) return def download_url_chunked(url, title, ext, size, output_dir = '.', refer = None, merge = True, faker = False): if dry_run: print('Real URL:\n', [url], '\n') return title = escape_file_path(title) if ext: filename = '%s.%s' % (title, ext) else: filename = title filepath = os.path.join(output_dir, filename) if not force and os.path.exists(filepath): print('Skipping %s: file already exists' % tr(filepath)) print() return bar = DummyProgressBar() print('Downloading %s ...' % tr(filename)) url_save_chunked(url, filepath, bar, refer = refer, faker = faker) bar.done() print() return site_info = "Coursera" download = coursera_download download_playlist = playlist_not_supported('coursera')
Python
0.000002
@@ -3087,24 +3087,41 @@ l)%0A %0A + try:%0A if e @@ -3137,32 +3137,36 @@ 4':%0A + + download_urls(%5Br @@ -3228,24 +3228,28 @@ ge)%0A + else:%0A @@ -3246,32 +3246,36 @@ se:%0A + + download_url_chu @@ -3334,24 +3334,139 @@ ge = merge)%0A + except Exception as err:%0A print('Skipping %25s: %25s%5Cn' %25 (resource_url, err))%0A continue%0A %0A ret
6683afcbe771f6dd72faaf4070a4e7c8e19d2919
Handle incorrect data with sismos API
modules/info/sismos.py
modules/info/sismos.py
from datetime import datetime from discord import Embed from bot import Command, categories from bot.utils import pat_channel, format_date from bot.libs.configuration import ServerConfig class Sismos(Command): __version__ = '1.0.1' __author__ = 'makzk' cfg_channel_name = 'sismos_channel' api_url = 'https://api.adderou.cl/sismo/' def __init__(self, bot): super().__init__(bot) self.name = 'sismos' self.help = '$[sismos-help]' self.format = '$[sismos-format]' self.last_events = None self.last_update = None self.category = categories.INFORMATION self.schedule = (self.update_info, 20) async def handle(self, cmd): if self.last_events is None: await cmd.answer('$[sismos-not-loaded]') return if len(self.last_events) == 0: await cmd.answer('$[sismos-no-last]') return if cmd.argc > 0: if cmd.args[0] in cmd.lang.get_list('sismos-last-cmds'): await cmd.answer('$[sismos-last]', embed=Sismos.make_embed(self.last_events[0])) return elif cmd.owner and cmd.args[0] in cmd.lang.get_list('sismos-channel-cmds'): if cmd.argc < 2 or not pat_channel.match(cmd.args[1]): await cmd.answer('$[format]: $[sismos-channel-format]') return else: cmd.config.set(Sismos.cfg_channel_name, cmd.args[1][2:-1]) await cmd.answer('$[sismos-channel-set]', locales={'channel': cmd.args[1]}) return sismos_list = ['- [{:.1f}º] [{}]({}) ({} km)'.format( f['magnitudes'][0]['magnitud'], f['geoReferencia'], f['enlace'], f['profundidad'] ) for f in self.last_events[:5]] embed = Embed(title='$[sismos-last-earthquakes]', description='\n'.join(sismos_list)) embed.set_footer(text='$[sismos-last-update]') await cmd.answer(embed, locales={'update': format_date(self.last_update)}) async def update_info(self): await self.bot.wait_until_ready() first = self.last_events is None if first: self.log.debug('Loading earthquakes information...') self.log.debug('Loading %s ...', Sismos.api_url) async with self.http.get(Sismos.api_url) as r: data = await r.json() if first: self.log.debug('Earthquakes information loaded. {} entries loaded.'.format(len(data))) if self.last_events is None or len(self.last_events) == 0 or data[0]['id'] != self.last_events[0]['id']: self.last_events = data self.last_update = datetime.now() if not first and len(self.last_events) > 0 and len(self.last_events[0]['magnitudes']) > 0 \ and self.last_events[0]['magnitudes'][0]['magnitud'] >= 5: query = ServerConfig.select().where( ServerConfig.name == Sismos.cfg_channel_name, ServerConfig.value != '' ) for server_config in query: sv = self.bot.get_server(server_config.serverid) if sv is None: ServerConfig.delete_instance(server_config) continue chan = sv.get_channel(server_config.value) if chan is None: continue await self.bot.send_message( destination=chan, content='$[sismos-alert-title]', embed=Sismos.make_embed(self.last_events[0]) ) @staticmethod def make_embed(data): mag = data['magnitudes'][0] embed = Embed(title='$[sismos-grade] {} {}'.format(mag['magnitud'], mag['medida'])) embed.description = data['geoReferencia'] + '\n\n' embed.description += '$[sismos-date]: {}\n'.format(data['fechaLocal']) embed.description += '$[sismos-location]: lat {latitud}º, long {longitud}º\n'.format(**data) embed.description += '$[sismos-depth]: {} km'.format(data['profundidad']) embed.url = data['enlace'] if data['preliminar']: embed.title += ' $[sismos-preliminary]' return embed
Python
0.000003
@@ -2396,24 +2396,161 @@ t r.json()%0A%0A + if not isinstance(data, list) or len(data) == 0:%0A self.log.debug('No data retrieved')%0A return%0A%0A @@ -2780,17 +2780,16 @@ %5B'id'%5D:%0A -%0A
26c4effd8741d2511bb0b3bd46cca12d37b0e01b
Add file magic
examples/python/scheme_timer.py
examples/python/scheme_timer.py
""" Checks the execution time of repeated calls to the Scheme API from Python Runs an empty Scheme command NUMBER_OF_ITERATIONS times and displays the total execution time """ __author__ = 'Cosmo Harrigan' NUMBER_OF_ITERATIONS = 100 from opencog.atomspace import AtomSpace, TruthValue, types, get_type_name from opencog.scheme_wrapper import load_scm, scheme_eval, scheme_eval_h, __init__ atomspace = AtomSpace() __init__(atomspace) data = ["opencog/atomspace/core_types.scm", "opencog/scm/utilities.scm"] for item in data: load_scm(atomspace, item) def test_operation(): for i in range(NUMBER_OF_ITERATIONS): scheme_eval_h(atomspace, '()') import timeit elapsed = timeit.timeit("test_operation()", setup="from __main__ import test_operation", number=1) print "{0} seconds elapsed performing {1} repeated calls = {2} calls / sec".\ format(elapsed, NUMBER_OF_ITERATIONS, NUMBER_OF_ITERATIONS / elapsed)
Python
0.000001
@@ -1,12 +1,36 @@ +#! /usr/bin/env python%0A%0A %22%22%22%0AChecks t
075e7ea4e6be57cb618fcc26484456bf24db99c9
add button for pyjd to load slides
examples/slideshow/Slideshow.py
examples/slideshow/Slideshow.py
from pyjamas.ui.Button import Button from pyjamas.ui.RootPanel import RootPanel from pyjamas.ui.HTML import HTML from pyjamas.ui.DockPanel import DockPanel from pyjamas.ui import HasAlignment from pyjamas.ui.Hyperlink import Hyperlink from pyjamas.ui.VerticalPanel import VerticalPanel from pyjamas.ui.ScrollPanel import ScrollPanel from pyjamas import Window from SinkList import SinkList from pyjamas import History import Slide from pyjamas.HTTPRequest import HTTPRequest from SlideLoader import SlideListLoader class Slideshow: def onHistoryChanged(self, token): info = self.sink_list.find(token) if info: self.show(info, False) else: self.showInfo() def onModuleLoad(self): self.curInfo='' self.curSink=None self.description=HTML() self.sink_list=SinkList() self.panel=DockPanel() self.loadSinks() self.sinkContainer = DockPanel() self.sinkContainer.setStyleName("ks-Sink") height = Window.getClientHeight() self.sp = ScrollPanel(self.sinkContainer) self.sp.setWidth("100%") self.sp.setHeight("%dpx" % (height-110)) vp=VerticalPanel() vp.setWidth("100%") vp.setHeight("100%") vp.add(self.description) vp.add(self.sp) self.description.setStyleName("ks-Intro") self.panel.add(self.sink_list, DockPanel.WEST) self.panel.add(vp, DockPanel.CENTER) self.panel.setCellVerticalAlignment(self.sink_list, HasAlignment.ALIGN_TOP) self.panel.setCellWidth(vp, "100%") self.panel.setCellHeight(vp, "100%") Window.addWindowResizeListener(self) History.addHistoryListener(self) RootPanel().add(self.panel) def onWindowResized(self, width, height): self.sink_list.resize(width, height) self.sp.setHeight("%dpx" % (height-110)) def show(self, info, affectHistory): if info == self.curInfo: return self.curInfo = info #Logger.write("showing " + info.getName()) if self.curSink <> None: self.curSink.onHide() #Logger.write("removing " + self.curSink) self.sinkContainer.remove(self.curSink) self.curSink = info.getInstance() self.sink_list.setSinkSelection(info.getName()) self.description.setHTML(info.getDescription()) if (affectHistory): History.newItem(info.getName()) self.sinkContainer.add(self.curSink, DockPanel.CENTER) self.sinkContainer.setCellWidth(self.curSink, "100%") self.sinkContainer.setCellHeight(self.curSink, "100%") self.sinkContainer.setCellVerticalAlignment(self.curSink, HasAlignment.ALIGN_TOP) self.curSink.onShow() def loadSinks(self): HTTPRequest().asyncPost("slides.txt", "", SlideListLoader(self)) def setSlides(self, slides): for l in slides: name = l[0] desc = l[1] self.sink_list.addSink(Slide.init(name, desc)) #Show the initial screen. initToken = History.getToken() if len(initToken): self.onHistoryChanged(initToken) else: self.showInfo() def showInfo(self): self.show(self.sink_list.sinks[0], False) if __name__ == '__main__': app = Slideshow() app.onModuleLoad()
Python
0
@@ -1,24 +1,37 @@ +import pyjd%0A%0A from pyjamas.ui.Button i @@ -520,16 +520,85 @@ stLoader +%0Afrom pyjamas.Timer import Timer%0Afrom pyjamas.ui.Button import Button %0A%0Aclass @@ -972,33 +972,44 @@ -%0A self.loadSinks() +self.b=Button(%22load%22, self)%0A %0A @@ -1864,16 +1864,417 @@ f.panel) +%0A RootPanel().add(self.b)%0A%0A # kludgy way to detect %22real%22 pyjd / pyjs difference.%0A # there's a bug in XULRunner nsIXMLHttpRequest which%0A # stops it from working (open %22NS_ERROR_NOT_INITIALISED%22)%0A if not hasattr(pyjd, %22Browser%22):%0A Timer(1, self)%0A%0A def onClick(self, sender):%0A self.loadSinks()%0A%0A def onTimer(self, tid):%0A self.loadSinks() %0A%0A de @@ -3848,16 +3848,92 @@ ain__':%0A + pyjd.setup(%22http://127.0.0.1/examples/slideshow/public/Slideshow.html%22)%0A app @@ -3969,8 +3969,23 @@ eLoad()%0A + pyjd.run()%0A
9f1a4977e34dc01a0489655df826b63b84f7d3be
Use SunPy sample data for Solar Cycle example.
examples/solar_cycle_example.py
examples/solar_cycle_example.py
""" =============== The Solar Cycle =============== This example shows the current and possible next solar cycle. """ import datetime import matplotlib.pyplot as plt import sunpy.lightcurve as lc ############################################################################### # Let's download the latest data from NOAA. noaa = lc.NOAAIndicesLightCurve.create() noaa_predict = lc.NOAAPredictIndicesLightCurve.create() ############################################################################### # Next lets grab the data again to create a new data structure that we will # shift by 12 years to simulate the next solar cycle. We will truncate the # data to only plot what is necessary. noaa2 = lc.NOAAIndicesLightCurve.create() noaa2.data = noaa2.data.shift(2, freq=datetime.timedelta(days = 365*12)) noaa2 = noaa2.truncate('2021/04/01', '2030/01/01') ############################################################################### # Finally lets plot everything together with some arbitrary range for the strength # of the next solar cycle. plt.plot(noaa.data.index, noaa.data['sunspot RI'], label='Sunspot Number') plt.plot(noaa_predict.data.index,noaa_predict.data['sunspot'],color='grey', label='Near-term Prediction') plt.fill_between(noaa_predict.data.index, noaa_predict.data['sunspot low'], noaa_predict.data['sunspot high'], alpha = 0.3, color='grey') plt.fill_between(noaa2.data.index, noaa2.data['sunspot RI smooth']*0.4, noaa2.data['sunspot RI smooth']*1.3, alpha = 0.3, color='grey', label='Next Cycle Predict') plt.ylim(0) plt.text('2011-01-01', 120,'Cycle 24',fontsize=16) plt.text('2024-01-01', 120,'Cycle 25',fontsize=16) plt.ylabel('Sunspot Number') plt.xlabel('Year') plt.legend(loc=2, framealpha=0.5) plt.show()
Python
0
@@ -190,16 +190,93 @@ ve as lc +%0Afrom sunpy.data.sample import NOAAINDICES_LIGHTCURVE, NOAAPREDICT_LIGHTCURVE %0A%0A###### @@ -355,49 +355,191 @@ #%0A# -Let's download the latest data from NOAA. +For this example we will use the SunPy sample data, if you want the current%0A# data, delete the argument to the %60%60create%60%60 function. i.e.%0A# %60%60noaa = lc.NOAAIndicesLightCurve.create()%60%60 %0Anoa @@ -570,24 +570,46 @@ urve.create( +NOAAINDICES_LIGHTCURVE )%0Anoaa_predi @@ -652,16 +652,38 @@ .create( +NOAAPREDICT_LIGHTCURVE )%0A%0A##### @@ -987,16 +987,38 @@ .create( +NOAAINDICES_LIGHTCURVE )%0Anoaa2. @@ -1072,19 +1072,17 @@ lta(days - = += 365*12)) @@ -1287,16 +1287,18 @@ for the +%0A# strengt @@ -1298,18 +1298,16 @@ strength -%0A# of the @@ -1432,16 +1432,17 @@ a.index, + noaa_pre @@ -1462,16 +1462,26 @@ nspot'%5D, +%0A color='g @@ -1591,16 +1591,33 @@ t low'%5D, +%0A noaa_pr @@ -1647,34 +1647,15 @@ h'%5D, -%0A alpha - = += 0.3, @@ -1741,16 +1741,33 @@ h'%5D*0.4, +%0A noaa2.d @@ -1799,34 +1799,15 @@ 1.3, -%0A alpha - = += 0.3, @@ -1812,32 +1812,49 @@ 3, color='grey', +%0A label='Next Cyc @@ -1897,32 +1897,33 @@ 011-01-01', 120, + 'Cycle 24',fonts @@ -1917,16 +1917,17 @@ cle 24', + fontsize @@ -1958,16 +1958,17 @@ 1', 120, + 'Cycle 2 @@ -1970,16 +1970,17 @@ cle 25', + fontsize
0ac3750c2b8d0fc978c076604db3bfee1a47708f
allow name param to name tab widgets
examples/tabpanelwidget/Tabs.py
examples/tabpanelwidget/Tabs.py
import pyjd # dummy in pyjs from pyjamas.ui.TabBar import TabBar from pyjamas.ui.TabPanel import TabPanel from pyjamas.ui import HasAlignment from pyjamas.ui.Image import Image from pyjamas.ui.VerticalPanel import VerticalPanel from pyjamas.ui.RootPanel import RootPanel from pyjamas.ui.HorizontalPanel import HorizontalPanel from pyjamas.ui.HTML import HTML from pyjamas.ui.Composite import Composite #from pyjamas.ui import DecoratorPanel from pyjamas.ui import MouseListener from pyjamas.ui import Event from pyjamas import Window from pyjamas.ui.DecoratorPanel import DecoratedTabPanel, DecoratorPanel from pyjamas.ui.DecoratorPanel import DecoratorTitledPanel #class PrettyTab(DecoratorPanel): class PrettyTab(Composite): def __init__(self, text, imageUrl): DecoratorPanel.__init__(self, DecoratorPanel.DECORATE_ALL) p = HorizontalPanel() p.setSpacing(3) self.img = Image(imageUrl) self.txt = HTML(text) p.add(self.img) p.add(self.txt) self.add(p) def addClickListener(self, listener): self.img.addClickListener(listener) self.txt.addClickListener(listener) class Tabs: def onModuleLoad(self): #red = PrettyTab("1638", "images/user_red.png") #red.setStyleName('gwt-TabBarItem') #green = PrettyTab("1640", "images/user_green.png") #red.setStyleName('gwt-TabBarItem') red = "1638" green = "1640" self.fTabs = DecoratedTabPanel(Size=("600px", "100%")) self.fTabs.add(self.createImage("rembrandt/JohannesElison.jpg"), red, True) self.fTabs.add(self.createImage("rembrandt/SelfPortrait1640.jpg"), green, True) self.fTabs.add(self.createImage("rembrandt/LaMarcheNocturne.jpg"), "1642") self.fTabs.add(self.createImage("rembrandt/TheReturnOfTheProdigalSon.jpg"), "1662") self.fTabs.add(HTML("shouldn't be here!"), None) self.fTabs.add(HTML("This is a Test.<br />Tab should be on right"), "Test") self.fTabs.selectTab(0) dp = DecoratorTitledPanel("Tabs", "bluetitle", "bluetitleicon", ["bluetop", "bluetop2", "bluemiddle", "bluebottom"]) dp.add(self.fTabs) RootPanel().add(dp) def createImage(self, imageUrl): image = Image(imageUrl) image.setStyleName("ks-images-Image") p = VerticalPanel() p.setHorizontalAlignment(HasAlignment.ALIGN_CENTER) p.setVerticalAlignment(HasAlignment.ALIGN_MIDDLE) p.add(image) return p if __name__ == '__main__': pyjd.setup("./public/Tabs.html") app = Tabs() app.onModuleLoad() pyjd.run()
Python
0
@@ -1584,18 +1584,59 @@ g%22), - red, True +%0A red, True, name=%22johannes%22 )%0A @@ -1711,20 +1711,57 @@ g%22), - green, True +%0A green, True, name=%22self%22 )%0A @@ -1836,14 +1836,55 @@ g%22), - %221642 +%0A %221642%22, name=%22lamarche %22)%0A @@ -1914,32 +1914,57 @@ elf.createImage( +%0A %22rembrandt/TheRe @@ -1994,15 +1994,50 @@ g%22), - %221662%22 +,%0A %22prodigal%22 )%0A @@ -2090,16 +2090,39 @@ ), None) + # None means separator %0A @@ -2219,16 +2219,24 @@ %22Test%22 +, %22test%22 )%0A @@ -2461,24 +2461,65 @@ ().add(dp)%0A%0A + self.fTabs.addTabListener(self)%0A%0A def crea @@ -2798,16 +2798,16 @@ image)%0A%0A - @@ -2816,16 +2816,492 @@ turn p%0A%0A + def onTabSelected(self, sender, tabIndex):%0A pass%0A%0A def onBeforeTabSelected(self, sender, tabIndex):%0A # 6 because one of them is the separator.%0A if self.fTabs.getWidgetCount() == 6:%0A self.fTabs.add(HTML(%222nd Test.%3Cbr /%3ETab should be on right%22),%0A %222nd Test%22, name=%22test2%22)%0A return True%0A self.fTabs.remove(%22test2%22)%0A return tabIndex != 6 # don't allow change to tab 6 - we're removing it!%0A%0A if __nam
858e9f91e042a0fc1143e300e4e883b43d17a346
FIX : get_config raises redirect after try-except block
bidder_gateway.py
bidder_gateway.py
import sys import shutil import os import logging import pickle import subprocess from bottle import Bottle, run, urljoin, HTTPResponse, request AGENT_CONFIG_SERVER = 'http://127.0.0.1:9985' # agent pickle file path pickle_path = '.bidders' # agent base path exec_base_path = '/home/nemi/workspace/test/daemon' # set up logging logging.basicConfig(filename='bidder_gateway.log', format='%(asctime)-15s %(levelname)s %(message)s', level=logging.DEBUG) logger = logging.getLogger('bidder_gateway') # create the bottle app so we don't use the global one app = Bottle() # initialize bidder map bidders = {} @app.get('/test_redirect') def do_redirection(): location = urljoin(AGENT_CONFIG_SERVER, '/v1/accounts/nemi') raise HTTPResponse("", status=302, Location=location) @app.get('/v1/agents') def get_agents(): return '%s' % bidders.keys() @app.get('/v1/agents/<name>/config') def get_config(name): try : # try to map the name to the internal config name location = urljoin( AGENT_CONFIG_SERVER, '/v1/agents/%s/config' % bidders[name]['agent_conf_name']) raise HTTPResponse("", status=302, Location=location) except : return { 'resultCode' : 1, 'resultDescription' : 'unable to map %s' % name } @app.post('/v1/agents/<name>/start') def start_bidder(name): """ Starts up a bidder using as the instance parameters the arguments passed in the query string """ global _process_id result = { 'resultCode' : 0, 'resultDescription' : 'ok' } if name in bidders : result['resultCode'] = 1 result['resultDescription'] = 'bidder already started' return result else : bidders[name] = {} # save the executable name and external name bidders[name]['bidder_name'] = name bidders[name]['executable'] = request.query['executable'] # save the params bidders[name]['params'] = { k:v for k,v in request.query.iteritems() if k not in ('bidder_name', 'executable') } logger.info('bringing up bidder %s=%s' % (name, bidders[name])) # set the args a list (popen expects them that way) arguments = [] for k,v in bidders[name]['params'].iteritems() : arguments.append('-%s' % k) arguments.append(v) exe = [ './%s' % bidders[name]['executable']] exe.extend(arguments) # bring the process up proc = subprocess.Popen( exe, cwd=exec_base_path, shell=False, close_fds=True, stdout=subprocess.PIPE) # wait for the forker process to finish proc.wait() pid = int(proc.stdout.readline()) rc = proc.returncode if rc : del bidders[name] result['resultCode'] = 3 result['resultDescription'] = 'return code is %d' % rc return result # save the pid for the new bidder bidders[name]['pid'] = pid # the key stored by the agent configuration service # is a concatenation of the bidder name passed and the # pid for for process bidders[name]['agent_conf_name'] = \ '%s_%s' % (bidders[name]['executable'], bidders[name]['pid']) logger.info('bidder %s got pid %d' % (name, bidders[name]['pid'])) # great, let's pickle the data try : f = open(os.path.join(pickle_path, str(bidders[name]['pid'])), 'wb') pickle.dump(bidders[name], f) f.close() except : result['resultCode'] = 2 result['resultDescription'] = 'unable to pickle configuration' return result @app.post('/v1/agents/<name>/stop') def stop_bidder(name): """ Stops a running bidder """ result = { 'resultCode' : 0, 'resultDescription' : 'ok' } if name not in bidders : result['resultCode'] = 1 result['resultDescription'] = 'bidder not running' return result logger.info('stopping bidder %s=%s' % (name, bidders[name])) pid = bidders[name]['pid'] try : signal = 9 if 'signal' in request.query : signal = int(request.query['signal']) os.kill(pid, signal) logger.info('signal %d sent to process with pid %d' % (signal, pid)) except : result['resultCode'] = 2 result['resultDescription'] = 'unable to kill process %s' % pid return result logger.info('bidder %s with pid %d stopped' % (name, pid)) # clean up del bidders[name] try : os.remove(os.path.join(pickle_path, str(pid))) except : result = { 'resultCode' : 4, 'resultDescription' : 'unable to delete pickled data' } return result if __name__ == '__main__' : logger.warning('starting up server') # check if the pickle_path exists if not os.path.exists(pickle_path): os.mkdir(pickle_path) # for each pickled process reload the configuration for config in os.listdir(pickle_path): f = open(os.path.join(pickle_path, config), 'rb') c = pickle.load(f) bidders[c['bidder_name']] = c f.close() logger.warning('loaded bidder %s=%s' % (c['bidder_name'], c)) run(app, host='localhost', port=8080, reloader=True) sys.exit(0)
Python
0
@@ -1137,70 +1137,8 @@ '%5D)%0A - raise HTTPResponse(%22%22, status=302, Location=location)%0A @@ -1296,20 +1296,70 @@ %7D%0A - +raise HTTPResponse(%22%22, status=302, Location=location)%0A %0A@app.po
78ef7be3f97a8189a845ace2380ce8dfc8a4c531
improve TypeError messages with invalid slices
bidict/_common.py
bidict/_common.py
from .compat import PY2, iteritems, viewitems from .util import pairs from collections import Mapping class BidirectionalMapping(Mapping): """ Mutable and immutable bidict types extend this class, which implements all the shared logic. Users typically won't need to touch this. """ def __init__(self, *args, **kw): self._fwd = {} self._bwd = {} for (k, v) in pairs(*args, **kw): self._put(k, v) inv = object.__new__(self.__class__) inv._fwd = self._bwd inv._bwd = self._fwd inv._inv = self self._inv = inv self._hash = None def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._fwd) __str__ = __repr__ def __eq__(self, other): try: return viewitems(self) == viewitems(other) except: return False def __ne__(self, other): return not self.__eq__(other) def __invert__(self): """ Called when the unary inverse operator (~) is applied. """ return self._inv inv = property(__invert__, doc='Property providing access to the inverse ' 'bidict. Can be chained as in: ``B.inv.inv is B``') def __inverted__(self): return iteritems(self._bwd) @staticmethod def _fwd_slice(slice): """ Raises :class:`TypeError` if the given slice does not have either only its start or only its stop set to a non-None value. Returns True if only its start is not None and False if only its stop is not None. """ start_missing = slice.start is None start_found = not start_missing stop_missing = slice.stop is None step_found = slice.step is not None if step_found or start_missing == stop_missing: raise TypeError('Slice must specify only either start or stop') return start_found def __getitem__(self, keyorslice): """ Provides a __getitem__ implementation which accepts a slice (e.g. ``b[:val]``) to allow referencing an inverse mapping. A non-slice value (e.g. ``b[key]``) is considered a reference to a forward mapping. """ if isinstance(keyorslice, slice): # forward lookup (by key): b[key:] if self._fwd_slice(keyorslice): return self._fwd[keyorslice.start] else: # inverse lookup (by val): b[:val] return self._bwd[keyorslice.stop] else: # keyorslice is a key: b[key] return self._fwd[keyorslice] def _put(self, key, val): try: oldval = self._fwd[key] except KeyError: oldval = _sentinel try: oldkey = self._bwd[val] except KeyError: oldkey = _sentinel if oldval is not _sentinel and oldkey is not _sentinel: if key == oldkey and val == oldval: return raise CollapseException((key, oldval), (oldkey, val)) elif oldval is not _sentinel: del self._bwd[oldval] elif oldkey is not _sentinel: del self._fwd[oldkey] self._fwd[key] = val self._bwd[val] = key get = lambda self, k, *args: self._fwd.get(k, *args) copy = lambda self: self.__class__(self._fwd) get.__doc__ = dict.get.__doc__ copy.__doc__ = dict.copy.__doc__ __len__ = lambda self: len(self._fwd) __iter__ = lambda self: iter(self._fwd) __contains__ = lambda self, x: x in self._fwd __len__.__doc__ = dict.__len__.__doc__ __iter__.__doc__ = dict.__iter__.__doc__ __contains__.__doc__ = dict.__contains__.__doc__ keys = lambda self: self._fwd.keys() items = lambda self: self._fwd.items() keys.__doc__ = dict.keys.__doc__ items.__doc__ = dict.items.__doc__ values = lambda self: self._bwd.keys() values.__doc__ = \ "D.values() -> a set-like object providing a view on D's values. " \ 'Note that because values of a BidirectionalMapping are also keys, ' \ 'this returns a ``dict_keys`` object rather than a ``dict_values`` ' \ 'object.' if PY2: iterkeys = lambda self: self._fwd.iterkeys() viewkeys = lambda self: self._fwd.viewkeys() iteritems = lambda self: self._fwd.iteritems() viewitems = lambda self: self._fwd.viewitems() itervalues = lambda self: self._bwd.iterkeys() viewvalues = lambda self: self._bwd.viewkeys() iterkeys.__doc__ = dict.iterkeys.__doc__ viewkeys.__doc__ = dict.viewkeys.__doc__ iteritems.__doc__ = dict.iteritems.__doc__ viewitems.__doc__ = dict.viewitems.__doc__ itervalues.__doc__ = dict.itervalues.__doc__ viewvalues.__doc__ = values.__doc__.replace('values()', 'viewvalues()') values.__doc__ = dict.values.__doc__ class CollapseException(Exception): """ Raised when an attempt is made to insert a new mapping into a bidict that would collapse two existing mappings. """ _sentinel = object()
Python
0
@@ -1618,31 +1618,18 @@ -start_missing = +if slice.s @@ -1633,28 +1633,33 @@ e.st -art +ep is +not None -%0A +:%0A star @@ -1658,60 +1658,75 @@ -start_found = not start_missing%0A stop_missing + raise TypeError('Slice may not specify step')%0A none_start = s @@ -1732,18 +1732,19 @@ slice.st -op +art is None @@ -1752,26 +1752,25 @@ -step_found +none_stop = slice @@ -1776,24 +1776,19 @@ e.st -e +o p is -not None%0A -%0A @@ -1798,126 +1798,166 @@ if -step_found or start_missing == stop_missing:%0A raise TypeError('Slice must specify only either start or stop +none_start == none_stop:%0A raise TypeError('Exactly one of slice start or stop must be None '%0A 'and the other must not be ')%0A @@ -1974,19 +1974,22 @@ urn -start_found +not none_start %0A%0A
ac477a9d73a60551678f31e34b4f4527fc330a04
Update affected tests (sensor_wrapper already registers the options).
st2tests/st2tests/config.py
st2tests/st2tests/config.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo.config import cfg from st2common import log as logging import st2common.config as common_config CONF = cfg.CONF LOG = logging.getLogger(__name__) def parse_args(): _setup_config_opts() CONF(args=[]) def _setup_config_opts(): cfg.CONF.reset() _register_config_opts() _override_config_opts() def _override_config_opts(): _override_db_opts() def _register_config_opts(): _register_common_opts() _register_api_opts() _register_auth_opts() _register_action_sensor_opts() _register_workflow_opts() def _override_db_opts(): CONF.set_override(name='db_name', override='st2-test', group='database') def _register_common_opts(): try: common_config.register_opts(ignore_errors=True) except: LOG.exception('Common config registration failed.') def _register_api_opts(): api_opts = [ cfg.StrOpt('host', default='0.0.0.0', help='action API server host'), cfg.IntOpt('port', default=9101, help='action API server port'), cfg.ListOpt('allow_origin', default=['http://localhost:3000', 'http://dev'], help='List of origins allowed'), cfg.IntOpt('heartbeat', default=25, help='Send empty message every N seconds to keep connection open') ] _register_opts(api_opts, group='api') # XXX: note : template_path value only works if started from the top-level of the codebase. # Brittle! pecan_opts = [ cfg.StrOpt('root', default='st2api.controllers.root.RootController', help='Pecan root controller'), cfg.StrOpt('template_path', default='%(confdir)s/st2api/st2api/templates'), cfg.ListOpt('modules', default=['st2api']), cfg.BoolOpt('debug', default=True), cfg.BoolOpt('auth_enable', default=True), cfg.DictOpt('errors', default={404: '/error/404', '__force_dict__': True}) ] _register_opts(pecan_opts, group='api_pecan') messaging_opts = [ cfg.StrOpt('url', default='amqp://guest:guest@localhost:5672//', help='URL of the messaging server.') ] _register_opts(messaging_opts, group='messaging') ssh_runner_opts = [ cfg.StrOpt('remote_dir', default='/tmp', help='Location of the script on the remote filesystem.'), cfg.BoolOpt('allow_partial_failure', default=False, help='How partial success of actions run on multiple nodes should be treated.') ] _register_opts(ssh_runner_opts, group='ssh_runner') def _register_auth_opts(): auth_opts = [ cfg.StrOpt('host', default='0.0.0.0'), cfg.IntOpt('port', default=9100), cfg.BoolOpt('use_ssl', default=False), cfg.StrOpt('mode', default='proxy'), cfg.StrOpt('logging', default='conf/logging.conf'), cfg.IntOpt('token_ttl', default=86400, help='Access token ttl in seconds.'), cfg.BoolOpt('debug', default=True) ] _register_opts(auth_opts, group='auth') def _register_action_sensor_opts(): action_sensor_opts = [ cfg.BoolOpt('enable', default=True, help='Whether to enable or disable the ability ' + 'to post a trigger on action.'), cfg.StrOpt('triggers_base_url', default='http://localhost:9101/v1/triggertypes/', help='URL for action sensor to post TriggerType.'), cfg.StrOpt('webhook_sensor_base_url', default='http://localhost:9101/v1/webhooks/st2/', help='URL for action sensor to post TriggerInstances.'), cfg.IntOpt('request_timeout', default=1, help='Timeout value of all httprequests made by action sensor.'), cfg.IntOpt('max_attempts', default=10, help='No. of times to retry registration.'), cfg.IntOpt('retry_wait', default=1, help='Amount of time to wait prior to retrying a request.') ] _register_opts(action_sensor_opts, group='action_sensor') def _register_workflow_opts(): workflow_opts = [ cfg.StrOpt('url', default='http://localhost:8989', help='Mistral API server root endpoint.') ] _register_opts(workflow_opts, group='workflow') def _register_opts(opts, group=None): CONF.register_opts(opts, group)
Python
0
@@ -1043,16 +1043,30 @@ reset()%0A +%0A try:%0A _reg @@ -1081,24 +1081,194 @@ nfig_opts()%0A + except Exception:%0A # Some scripts register the options themselves which means registering them again will%0A # cause a non-fatal exception%0A return%0A _overrid
23cc84147f52cd4036398200916e68bd0f078050
Fix print statemenet
stationspinner/evecentral/tasks.py
stationspinner/evecentral/tasks.py
from stationspinner.celery import app from celery import chord from stationspinner.evecentral.models import Market, MarketItem from stationspinner.libs.pragma import get_location_name from stationspinner.sde.models import InvType from stationspinner.settings import STATIC_ROOT from evelink.thirdparty.eve_central import EVECentral from urllib2 import urlopen from datetime import datetime from pytz import UTC from django.db.models import Q from celery.utils.log import get_task_logger from traceback import format_exc from os.path import join import csv log = get_task_logger(__name__) def _market_items(): market_items = InvType.objects.filter(published=True, marketGroupID__lt=35000) typeIDs = [i.pk for i in market_items.order_by('id')] for i in xrange(0, len(typeIDs), 100): yield typeIDs[i:i+100] @app.task(name='evecentral.write_static_prices') def write_static_prices(): for market in Market.objects.all(): market_items = MarketItem.objects.filter(locationID=market.locationID).order_by('typeName') with open(join(STATIC_ROOT, '{0}.csv'.format(market.locationID)), 'wb') as output: csvprices = csv.writer(output, delimiter=';') for item in market_items: try: csvprices.writerow((item.typeID, item.typeName, item.buy_max, item.buy_min, item.buy_percentile, item.buy_volume, item.sell_max, item.sell_min, item.sell_percentile, item.sell_volume)) except: print item @app.task(name='evecentral.update_all_markets') def update_all_markets(): market_updates = [] for market in Market.objects.filter( Q(cached_until__lte=datetime.now(tz=UTC)) | Q(cached_until=None)): market_updates.extend(update_market(market.locationID)) market.updated() log.info('Updating "{0}" market'.format(get_location_name(market.locationID))) chord(market_updates, write_static_prices.s()).apply_async() def update_market(locationID): tasks = [] for typeIDs in _market_items(): tasks.append(parse_market_data.s(typeIDs, locationID)) return tasks @app.task(name='evecentral.parse_market_data') def parse_market_data(typeIDs, locationID): ec = EVECentral(url_fetch_func=lambda url: urlopen(url).read()) try: if locationID > 30000000: data = ec.market_stats(type_ids=typeIDs, system=locationID) else: data = ec.market_stats(type_ids=typeIDs, regions=locationID) except Exception, ex: log.error('Could not update locationID {0}: {1}'.format(locationID, format_exc(ex))) return for typeID, price_data in data.items(): prices = {} for price_type in ('buy', 'sell'): type_data = price_data[price_type] for statistic, value in type_data.items(): prices['{0}_{1}'.format(price_type, statistic)] = value MarketItem.objects.update_or_create(typeID=typeID, locationID=locationID, typeName=InvType.objects.get(pk=typeID).typeName, defaults=prices)
Python
0.99991
@@ -1892,18 +1892,93 @@ -print item +log.debug('Failed to render csv row for %7B0%7D at %7B1%7D.'.format(item, market.locationID)) %0A%0A%0A%0A
df69d04b468bfa74419fc6715bdef30e38374b1b
Reword message when trying to stage on an unnamed buffer
core/commands/inline_stage_hunk.py
core/commands/inline_stage_hunk.py
from collections import namedtuple from itertools import chain import re import sublime from sublime_plugin import TextCommand from ..fns import accumulate, filter_, unique from ..git_command import GitCommand from ..parse_diff import SplittedDiff __all__ = ( "gs_inline_stage_hunk", ) MYPY = False if MYPY: from typing import Iterator, List, NamedTuple, Optional, Tuple from ..parse_diff import Hunk as HunkText if MYPY: Hunk = NamedTuple("Hunk", [ ("a_start", int), ("a_length", int), ("b_start", int), ("b_length", int), ("content", str) ]) else: Hunk = namedtuple("Hunk", "a_start a_length b_start b_length content") class UnsupportedCombinedDiff(RuntimeError): pass def flash(view, message): # type: (sublime.View, str) -> None window = view.window() if window: window.status_message(message) class gs_inline_stage_hunk(TextCommand, GitCommand): def run(self, edit): view = self.view fpath = view.file_name() if not fpath: flash(view, "Cannot stage unnnamed files.") return if view.is_dirty(): flash(view, "Cannot stage on unsaved files.") return raw_diff = self.git("diff", "-U0", fpath) if not raw_diff: not_tracked_file = self.git("ls-files", fpath).strip() == "" if not_tracked_file: self.git("add", fpath) flash(view, "Staged whole file.") else: flash(view, "The file is clean.") return diff = SplittedDiff.from_string(raw_diff) assert len(diff.headers) == 1 try: hunks = hunks_touching_selection(diff, view) except UnsupportedCombinedDiff: flash(view, "Files with merge conflicts are not supported.") return if not hunks: flash(view, "Not on a hunk.") return patch = format_patch(diff.headers[0].text, hunks) self.git("apply", "--cached", "--unidiff-zero", "-", stdin=patch) hunk_count = len(hunks) flash(view, "Staged {} {}.".format(hunk_count, pluralize("hunk", hunk_count))) def hunks_touching_selection(diff, view): # type: (SplittedDiff, sublime.View) -> List[Hunk] rows = unique( view.rowcol(line.begin())[0] + 1 for region in view.sel() for line in view.lines(region) ) hunks = list(map(parse_hunk, diff.hunks)) return list(unique(filter_(hunk_containing_row(hunks, row) for row in rows))) def parse_hunk(hunk): # type: (HunkText) -> Hunk return Hunk(*parse_metadata(hunk.header().text), content=hunk.content().text) def hunk_containing_row(hunks, row): # type: (List[Hunk], int) -> Optional[Hunk] # Assumes `hunks` are sorted for hunk in hunks: if row < hunk.b_start: break # Assume a length of "1" for removal only hunks so the # user can actually grab them exactly on the line above the # removal gutter mark. b_end = hunk.b_start + max(hunk.b_length, 1) if hunk_with_no_newline_marker(hunk): # Make the hit area one line longer so that the user # can stage being on the last line of the view (if the # newline gets *added* in this hunk). This is technially # wrong if the newline gets *removed* but doesn't do any # harm because there can't be any line after that anyway. b_end += 1 if hunk.b_start <= row < b_end: return hunk return None def hunk_with_no_newline_marker(hunk): # type: (Hunk) -> bool # Avoid looking for "No newline..." which depends on the locale setting return "\n\\ " in hunk.content def format_patch(header, hunks): # type: (str, List[Hunk]) -> str return ''.join(chain( [header], map(format_hunk, rewrite_hunks(hunks)) )) def format_hunk(hunk): # type: (Hunk) -> str return "@@ -{},{} +{},{} @@\n{}".format(*hunk) def rewrite_hunks(hunks): # type: (List[Hunk]) -> Iterator[Hunk] # Assumes `hunks` are sorted, and from the same file deltas = (hunk.b_length - hunk.a_length for hunk in hunks) offsets = accumulate(deltas, initial=0) for hunk, offset in zip(hunks, offsets): new_b = hunk.a_start + offset if hunk_of_additions_only(hunk): new_b += 1 elif hunk_of_removals_only(hunk): new_b -= 1 yield hunk._replace(b_start=new_b) def hunk_of_additions_only(hunk): # type: (Hunk) -> bool # Note that this can only ever be true for zero context diffs return hunk.a_length == 0 and hunk.b_length > 0 def hunk_of_removals_only(hunk): # type: (Hunk) -> bool # Note that this can only ever be true for zero context diffs return hunk.b_length == 0 and hunk.a_length > 0 def rewrite_hunks_for_reset(hunks): # type: (List[Hunk]) -> Iterator[Hunk] # Assumes `hunks` are sorted, and from the same file deltas = (hunk.b_length - hunk.a_length for hunk in hunks) offsets = accumulate(deltas, initial=0) for hunk, offset in zip(hunks, offsets): new_a, new_b = hunk.b_start - offset, hunk.a_start if hunk_of_additions_only(hunk): new_a -= 1 new_b += 1 elif hunk_of_removals_only(hunk): new_a += 1 new_b -= 1 yield hunk._replace(a_start=new_a, b_start=new_b) LINE_METADATA = re.compile(r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@") def parse_metadata(line): # type: (str) -> Tuple[int, int, int, int] match = LINE_METADATA.match(line) if match is None: raise UnsupportedCombinedDiff(line) a_start, a_length, b_start, b_length = match.groups() return int(a_start), int(a_length or "1"), int(b_start), int(b_length or "1") def pluralize(word, count): # type: (str, int) -> str return word if count == 1 else word + "s"
Python
0
@@ -1089,16 +1089,19 @@ t stage +on unnnamed @@ -1101,20 +1101,22 @@ nnnamed -file +buffer s.%22)%0A
ca57e29c15ad02dee3cdad0d2159cbe33c15d6e0
fix expire cache
corehq/apps/app_manager/signals.py
corehq/apps/app_manager/signals.py
from __future__ import absolute_import from __future__ import unicode_literals from django.dispatch.dispatcher import Signal from corehq.apps.callcenter.app_parser import get_call_center_config_from_app from corehq.apps.domain.models import Domain from dimagi.utils.logging import notify_exception def create_app_structure_repeat_records(sender, application, **kwargs): from corehq.motech.repeaters.models import AppStructureRepeater domain = application.domain if domain: repeaters = AppStructureRepeater.by_domain(domain) for repeater in repeaters: repeater.register(application) def update_callcenter_config(sender, application, **kwargs): if not application.copy_of: return try: domain = Domain.get_by_name(application.domain) cc_config = domain.call_center_config if not cc_config or not (cc_config.fixtures_are_active() and cc_config.config_is_valid()): return app_config = get_call_center_config_from_app(application) save = cc_config.update_from_app_config(app_config) if save: cc_config.save() except Exception: notify_exception(None, "Error updating CallCenter config for app build") def expire_latest_enabled_build_profiles(sender, application, **kwargs): from corehq.apps.app_manager.util import get_latest_enabled_build_for_profile from corehq.apps.app_manager.util import get_enabled_build_profiles_for_version if application.copy_of: for build_profile_id in application.build_profiles: get_latest_enabled_build_for_profile.clear(application.domain, build_profile_id) get_enabled_build_profiles_for_version(application.get_id, application.version) app_post_save = Signal(providing_args=['application']) app_post_save.connect(create_app_structure_repeat_records) app_post_save.connect(update_callcenter_config) app_post_save.connect(expire_latest_enabled_build_profiles) app_post_release = Signal(providing_args=['application'])
Python
0.000001
@@ -1702,16 +1702,22 @@ _version +.clear (applica
55c0d8912750ad8ddc702213c340c02d10638640
Test function
corehq/apps/sms/tests/test_util.py
corehq/apps/sms/tests/test_util.py
#!/usr/bin/env python from django.test import TestCase from corehq.apps.hqcase.utils import update_case from corehq.apps.sms.mixin import apply_leniency from corehq.apps.sms.util import ( ContactNotFoundException, clean_phone_number, get_contact, is_contact_active, ) from corehq.apps.users.models import CommCareUser from corehq.form_processor.tests.utils import run_with_all_backends from corehq.form_processor.utils import is_commcarecase from corehq.util.test_utils import create_test_case class UtilTestCase(TestCase): def setUp(self): self.domain = 'test-domain' self.user = CommCareUser.create(self.domain, 'test-user', '123') def tearDown(self): self.user.delete() def testCleanPhoneNumber(self): phone_number = " 324 23-23421241" cleaned = clean_phone_number(phone_number) self.assertEqual(cleaned, "+3242323421241") @run_with_all_backends def test_get_contact_for_case(self): with create_test_case(self.domain, 'contact', 'test-case') as case: contact = get_contact(self.domain, case.case_id) self.assertEqual(contact.case_id, case.case_id) self.assertTrue(is_commcarecase(contact)) with self.assertRaises(ContactNotFoundException): get_contact(self.domain + 'x', case.case_id) def test_get_contact_for_user(self): contact = get_contact(self.domain, self.user.get_id) self.assertEqual(contact.get_id, self.user.get_id) self.assertTrue(isinstance(contact, CommCareUser)) with self.assertRaises(ContactNotFoundException): get_contact(self.domain + 'x', self.user.get_id) def test_contact_not_found(self): with self.assertRaises(ContactNotFoundException): get_contact(self.domain, 'this-id-should-not-be-found') @run_with_all_backends def test_is_contact_active_for_case(self): with create_test_case(self.domain, 'contact', 'test-case') as case: self.assertTrue(is_contact_active(self.domain, 'CommCareCase', case.case_id)) update_case(self.domain, case.case_id, close=True) self.assertFalse(is_contact_active(self.domain, 'CommCareCase', case.case_id)) def test_is_contact_active_for_user(self): self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id)) self.user.is_active = False self.user.save() self.assertFalse(is_contact_active(self.domain, 'CommCareUser', self.user.get_id)) self.user.is_active = True self.user.save() self.assertTrue(is_contact_active(self.domain, 'CommCareUser', self.user.get_id)) def test_apply_leniency(self): self.assertEqual('16175551234', apply_leniency(' 1 (617) 555-1234 ')) self.assertEqual('16175551234', apply_leniency(' 1.617.555.1234 ')) self.assertEqual('16175551234', apply_leniency(' +1 617 555 1234 '))
Python
0.000006
@@ -49,16 +49,66 @@ stCase%0A%0A +from nose.tools import assert_false, assert_true%0A%0A from cor @@ -326,16 +326,48 @@ active,%0A + is_superuser_or_contractor,%0A )%0Afrom c @@ -409,16 +409,27 @@ CareUser +, CouchUser %0Afrom co @@ -596,16 +596,30 @@ est_case +, flag_enabled %0A%0A%0Aclass @@ -3059,16 +3059,422 @@ 17 555 1234 '))%0A +%0A%0Adef test_contractor():%0A user = CouchUser(username=%22eric%22)%0A with flag_enabled('IS_CONTRACTOR'):%0A assert_true(is_superuser_or_contractor(user))%0A%0A%0Adef test_superuser():%0A user = CouchUser(username=%22john%22, is_superuser=True)%0A assert_true(is_superuser_or_contractor(user))%0A%0A%0Adef test_normal_user():%0A user = CouchUser(username=%22michael%22)%0A assert_false(is_superuser_or_contractor(user))%0A
4f0d43f3c451a4059a2931ec771a8d796396250e
fasta2imgt converts to upper
bin/fasta2imgt.py
bin/fasta2imgt.py
#! /usr/bin/env python import sys import optparse from Bio import SeqIO from Bio.Alphabet import generic_dna import vdj parser = optparse.OptionParser() (options, args) = parser.parse_args() if len(args) == 2: inhandle = open(args[0],'r') outhandle = open(args[1],'w') elif len(args) == 1: inhandle = open(args[0],'r') outhandle = sys.stdout elif len(args) == 0: inhandle = sys.stdin outhandle = sys.stdout else: raise Exception, "Wrong number of arguments." for record in SeqIO.parse(inhandle,'fasta',generic_dna): chain = vdj.ImmuneChain(record) print >>outhandle, chain
Python
0.999999
@@ -577,16 +577,24 @@ n(record +.upper() )%0A pr
9ebf03ddcba26054824547f6d1094ba9fb37a030
Restructure the create_permission signal handler to perform fewer SQL queries, this speeds up the test suite dramatically.
django/contrib/auth/management/__init__.py
django/contrib/auth/management/__init__.py
""" Creates permissions for all installed apps that need permissions. """ from django.db.models import get_models, signals from django.contrib.auth import models as auth_app def _get_permission_codename(action, opts): return u'%s_%s' % (action, opts.object_name.lower()) def _get_all_permissions(opts): "Returns (codename, name) for all permissions in the given opts." perms = [] for action in ('add', 'change', 'delete'): perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw))) return perms + list(opts.permissions) def create_permissions(app, created_models, verbosity, **kwargs): from django.contrib.contenttypes.models import ContentType app_models = get_models(app) for klass in app_models: ctype = ContentType.objects.get_for_model(klass) for codename, name in _get_all_permissions(klass._meta): p, created = auth_app.Permission.objects.get_or_create( codename=codename, content_type__pk=ctype.id, defaults={ 'name': name, 'content_type': ctype } ) if created and verbosity >= 2: print "Adding permission '%s'" % p def create_superuser(app, created_models, verbosity, **kwargs): from django.core.management import call_command if auth_app.User in created_models and kwargs.get('interactive', True): msg = ("\nYou just installed Django's auth system, which means you " "don't have any superusers defined.\nWould you like to create one " "now? (yes/no): ") confirm = raw_input(msg) while 1: if confirm not in ('yes', 'no'): confirm = raw_input('Please enter either "yes" or "no": ') continue if confirm == 'yes': call_command("createsuperuser", interactive=True) break signals.post_syncdb.connect(create_permissions, dispatch_uid = "django.contrib.auth.management.create_permissions") signals.post_syncdb.connect(create_superuser, sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
Python
0.000002
@@ -84,25 +84,28 @@ ngo. -db.models +contrib.auth import get_ @@ -104,27 +104,26 @@ ort -get_ models -, signals + as auth_app %0Afro @@ -131,36 +131,33 @@ django. -contrib.auth +db.models import models a @@ -148,34 +148,36 @@ import +get_ models - as auth_app +, signals%0A %0A%0Adef _g @@ -752,16 +752,232 @@ els(app) +%0A%0A # This will hold the permissions we're looking for as%0A # (content_type, (codename, name))%0A searched_perms = set()%0A # The codenames and ctypes that should exist.%0A ctypes = set()%0A codenames = set() %0A for @@ -1067,26 +1067,42 @@ -for codename, name +ctypes.add(ctype)%0A for perm in @@ -1152,107 +1152,412 @@ -p, created = auth_app.Permission.objects.get_or_create(%0A codename=codename,%0A +codenames.add(perm%5B0%5D)%0A searched_perms.add((ctype, perm))%0A%0A # Find all the Permissions that a) have a content_type for a model we're%0A # looking for, and b) have a codename we're looking for. It doesn't need to%0A # have both, we have a list of exactly what we want, and it's faster to%0A # write the query with fewer conditions.%0A all_perms = set(auth_app.Permission.objects.filter(%0A @@ -1578,19 +1578,17 @@ pe__ -pk +in =ctype -.id +s ,%0A @@ -1597,120 +1597,335 @@ - defaults=%7B%0A ' +codename__in=codenames%0A ).values_list(%0A %22content_type%22, %22codename%22%0A ))%0A%0A for ctype, (code name -': +, name -,%0A 'content_type': ctype%0A %7D +) in searched_perms:%0A # If the permissions exists, move on.%0A if (ctype.pk, codename) in all_perms:%0A continue%0A p = auth_app.Permission.objects.create(%0A codename=codename, %0A @@ -1933,17 +1933,26 @@ -) +name=name, %0A @@ -1960,22 +1960,47 @@ -if created and +content_type=ctype%0A )%0A if ver @@ -2012,20 +2012,16 @@ y %3E= 2:%0A - @@ -2060,16 +2060,17 @@ '%22 %25 p%0A%0A +%0A def crea
8431458f7f18ec0dde86d46ec18dbdb61412f8ef
bump version
blaze/__init__.py
blaze/__init__.py
from __future__ import absolute_import, division, print_function import logging logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.WARNING) inf = float('inf') nan = float('nan') __version__ = '0.4.2-dev' # If IPython is already loaded, register the Blaze catalog magic # from . import catalog # import sys # if 'IPython' in sys.modules: # catalog.register_ipy_magic() # del sys def print_versions(): """Print all the versions of software that Blaze relies on.""" import sys, platform import numpy as np import dynd import datashape import blz print("-=" * 38) print("Blaze version: %s" % __version__) print("Datashape version: %s" % datashape.__version__) print("NumPy version: %s" % np.__version__) print("DyND version: %s / LibDyND %s" % (dynd.__version__, dynd.__libdynd_version__)) print("BLZ version: %s" % blz.__version__) print("Blosc version: %s (%s)" % blz.blosc_version()) print("Python version: %s" % sys.version) (sysname, nodename, release, version, machine, processor) = \ platform.uname() print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version)) if sysname == "Linux": print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1])) if not processor: processor = "not recognized" print("Processor: %s" % processor) print("Byte-ordering: %s" % sys.byteorder) print("Detected cores: %s" % blz.detect_number_of_cores()) print("-=" * 38) def test(verbosity=1, xunitfile=None, exit=False): """ Runs the full Blaze test suite, outputting the results of the tests to sys.stdout. This uses nose tests to discover which tests to run, and runs tests in any 'tests' subdirectory within the Blaze module. Parameters ---------- verbosity : int, optional Value 0 prints very little, 1 prints a little bit, and 2 prints the test names while testing. xunitfile : string, optional If provided, writes the test results to an xunit style xml file. This is useful for running the tests in a CI server such as Jenkins. exit : bool, optional If True, the function will call sys.exit with an error code after the tests are finished. """ import nose import os import sys argv = ['nosetests', '--verbosity=%d' % verbosity] # Output an xunit file if requested if xunitfile: argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile]) # Set the logging level to warn argv.extend(['--logging-level=WARN']) # Add all 'tests' subdirectories to the options rootdir = os.path.dirname(__file__) for root, dirs, files in os.walk(rootdir): if 'tests' in dirs: testsdir = os.path.join(root, 'tests') argv.append(testsdir) print('Test dir: %s' % testsdir[len(rootdir)+1:]) # print versions (handy when reporting problems) print_versions() sys.stdout.flush() # Ask nose to do its thing return nose.main(argv=argv, exit=exit)
Python
0
@@ -229,11 +229,11 @@ '0. -4.2 +6.0 -dev
dc9071a9574d435cead5b54355d237c7cfe679d1
handle datetime in encoder
bokeh/protocol.py
bokeh/protocol.py
import uuid import json import logging import time from six.moves import cPickle as pickle import numpy as np import pandas as pd log = logging.getLogger(__name__) """ serialization functions for rpc server, we serialize json messages, as well as python data, which are lists of numpy arrays. msg serialization one object -> one string data serialization list of arrays -> list of buffers/strings we have 3 protocol levels here 1. zeromq, functions exist to separate the envelope from the payload, and pack those up as well. 2. arrayserver protocol, arrayserver messages are the payloads of zeromq messages, and are packaged into clientid, reqid, msgobj (json), dataobjects - list data which can be serialized and deserialized 3. rpc protocol, a layer around the msgobject and a data object """ millifactor = 10 ** 6. class NumpyJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, pd.Series): return self.transform_list(obj.tolist()) elif isinstance(obj, np.ndarray): if obj.dtype.kind == 'M': obj = obj.astype('datetime64[ms]').astype('int64') return self.transform_list(obj.tolist()) elif isinstance(obj, np.number): if isinstance(obj, np.integer): return int(obj) else: return float(obj) elif isinstance(obj, pd.tslib.Timestamp): return obj.value / millifactor else: return super(NumpyJSONEncoder, self).default(obj) def transform_list(self, l): try: for k, v in enumerate(l): if isinstance(v, list): v = self.transform_list(v) elif np.isnan(v): l[k] = "NaN" elif np.isposinf(v): l[k] = "Infinity" elif np.isneginf(v): l[k] = "-Infinity" # If we get a type error, then there are non-numeric types # in the list, just bail... except TypeError: pass return l def serialize_json(obj, encoder=NumpyJSONEncoder, **kwargs): return json.dumps(obj, cls=encoder, **kwargs) deserialize_json = json.loads def default_serialize_data(data): """ Parmeters --------- data : list of python objects (mostly numpy arrays) Returns --------- output : list of length 2n, where n is the number of objects. first item is pickled metadata, second is the data itself. for numpy arrays metadata : {'datatype' : 'numpy', 'dtype' : 'dtype', 'shape' : [2,2]} data : the array itself for arbitrary python objects metadata : {'datatype' : 'pickle'} data : pickled object """ output = [] def add_numpy(d): metadata = {'dtype' : d.dtype, 'shape' : d.shape, 'datatype' : 'numpy'} metadata = pickle.dumps(metadata) output.append(metadata) output.append(d) def add_pickle(d): output.append(pickle.dumps({'datatype' : 'pickle'})) output.append(pickle.dumps(d, protocol=-1)) for d in data: if isinstance(d, np.ndarray): d = np.ascontiguousarray(d) try: temp = np.frombuffer(d, dtype=d.dtype) except (ValueError, TypeError): add_pickle(d) continue add_numpy(d) else: add_pickle(d) return output def default_deserialize_data(input): """ Parmeters --------- input : list of strings from default_serialize_data Returns --------- output : list of python objects, mostly numpy arrays """ output = [] curr_index = 0 while curr_index < len(input): meta = pickle.loads(input[curr_index]) if meta['datatype'] == 'numpy': array = np.frombuffer(input[curr_index + 1], dtype=meta['dtype']) array = array.reshape(meta['shape']) output.append(array) elif meta['datatype'] == 'pickle': obj = pickle.loads(input[curr_index + 1]) output.append(obj) curr_index += 2 return output serialize_web = serialize_json deserialize_web = deserialize_json def status_obj(status): return {'msgtype' : 'status', 'status' : status} def error_obj(error_msg): return { 'msgtype' : 'error', 'error_msg' : error_msg}
Python
0.000002
@@ -1443,16 +1443,126 @@ ifactor%0A + elif isinstance(obj, (dt.datetime, dt.date)):%0A return time.mktime(obj.timetuple()) * 1000.%0A
f794817bf62c8f92a6d7d9e55e13866dc63df7ba
Fix issue #7
botbot/checker.py
botbot/checker.py
import stat, os from . import problems class Checker: """ Holds a set of checks that can be run on a file to make sure that it's suitable for the shared directory. Runs checks recursively on a given path. """ # checks is a set of all the checking functions this checker knows of. All # checkers return a number signifying a specific problem with the # file specified in the path. def __init__(self): self.checks = set() self.all_problems = list() def register(self, fn): """Add a new checking function to the set, or a list/tuple of functions.""" if isinstance(fn, list) or isinstance(fn, tuple): for f in fn: self.checks.add(f) else: self.checks.add(fn) def check_tree(self, path): """ Run all the checks on every file in the specified path, recursively. Returns a list of tuples. Each tuple contains 2 elements: the first is the path of the file, and the second is a list of issues with the file at that path. """ mode = os.stat(path).st_mode for f in os.listdir(path): newpath = os.path.join(path, f) np_mode = os.stat(newpath).st_mode if stat.S_ISDIR(np_mode): self.check_tree(newpath) else: current_problems = set() for check in self.checks: p = check(newpath) current_problems.add(p) self.all_problems.append((newpath, current_problems)) # Note: this section removes the residual dummy errors # from files that have other errors. It adds another O(n) # loop where we could have done it in that previous loop, # so we should probably optimize it at some point. for prob in self.all_problems: prob_set = prob[1] n = len(prob_set) if problems.PROB_NO_PROBLEM in prob[1] and n > 1: prob[1].remove(problems.PROB_NO_PROBLEM) def pretty_print_issues(self, verbose): """Print a list of issues with their fixes.""" for p in self.all_problems: for m in p[1]: if (verbose): print(p[0] + ": " + m.message + " " + m.fix) else: if m != problems.PROB_NO_PROBLEM: print(p[0] + ": " + m.message + " " + m.fix) def has_permission_issues(path): """Check whether a given path has bad permissons.""" mode = os.stat(path).st_mode if stat.S_ISDIR(mode) and not stat.S_IXGRP(mode): return problems.PROB_DIR_NOT_EXEC else: if not bool(stat.S_IRGRP & mode): return problems.PROB_FILE_NOT_GRPRD else: return problems.PROB_NO_PROBLEM def is_fastq(path): """Check whether a given file is a fastq file.""" if os.path.splitext(path)[1] == ".fastq": return problems.PROB_FILE_IS_FASTQ else: return problems.PROB_NO_PROBLEM
Python
0
@@ -2963,16 +2963,57 @@ fastq%22:%0A + if not os.path.islink(path):%0A @@ -3047,30 +3047,17 @@ S_FASTQ%0A - else:%0A +%0A retu
a7b9c9a120aebe270ea200f3be0b2d3468f911cf
Bump version
modelqueryform/__init__.py
modelqueryform/__init__.py
__version__ = "2.1"
Python
0
@@ -14,7 +14,7 @@ %222. -1 +2 %22%0A
ed360f5d896593f2646037c1b2028d8a5552a2d2
fix test import data
tests/case_manager/test_case_data_manager.py
tests/case_manager/test_case_data_manager.py
# @Time : 2016/9/1 21:04 # @Author : lixintong import datetime import os import unittest from uitester.case_manager.case_data_manager import CaseDataManager class TestCaseDataManager(unittest.TestCase): def setUp(self): self.case_data_manager = CaseDataManager() self.package_name = '' def test_export_and_import_data(self): # notice export and import must has common CaseDataManager # export test print(" export start :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) case_list = self.case_data_manager.db_helper.query_case_all() cases_id_list = [] for case in case_list: cases_id_list.append(str(case.id)) # 类型转成str path = os.path.join(os.getcwd(),'data.dpk') # self.package_name = self.case_data_manager.export_data(path, cases_id_list) self.case_data_manager.export_data(path, cases_id_list)#导入数据 print(" export finish :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) # import test print(" import start :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) conflict_datas = self.case_data_manager.import_data(path)#有冲突 conflict_datas = self.case_data_manager.import_data(path) # 无冲突 if conflict_datas: updata_tag_message_list = [] for key in conflict_datas: data = conflict_datas[key] updata_tag_message_list.append(data) self.case_data_manager.merge_conflict_data(updata_tag_message_list) # result validation unfinished print(self.case_data_manager.case_file_data["name"][0]) case = self.case_data_manager.db_helper.query_case_by_name(True, self.case_data_manager.case_file_data[ "name"][0]) self.assertTrue(case is not None) print("import finish :", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
Python
0
@@ -1936,24 +1936,28 @@ %5D)%0A%0A + self.assertT
4d40e9db4bd6b58787557e8d5547f69eb67c9b96
Add additional coverage to author build list
tests/changes/api/test_author_build_index.py
tests/changes/api/test_author_build_index.py
from uuid import uuid4 from changes.config import db from changes.models import Author from changes.testutils import APITestCase class AuthorBuildListTest(APITestCase): def test_simple(self): fake_author_id = uuid4() self.create_build(self.project) path = '/api/0/authors/{0}/builds/'.format(fake_author_id.hex) resp = self.client.get(path) assert resp.status_code == 200 data = self.unserialize(resp) assert len(data) == 0 author = Author(email='foo@example.com', name='Foo Bar') db.session.add(author) build = self.create_build(self.project, author=author) path = '/api/0/authors/{0}/builds/'.format(author.id.hex) resp = self.client.get(path) assert resp.status_code == 200 data = self.unserialize(resp) assert len(data) == 1 assert data[0]['id'] == build.id.hex
Python
0
@@ -518,25 +518,31 @@ ail= -'foo@example.com' +self.default_user.email , na @@ -650,16 +650,16 @@ uthor)%0A%0A - @@ -906,8 +906,281 @@ .id.hex%0A +%0A self.login(self.default_user)%0A%0A path = '/api/0/authors/me/builds/'%0A%0A resp = self.client.get(path)%0A assert resp.status_code == 200%0A data = self.unserialize(resp)%0A assert len(data) == 1%0A assert data%5B0%5D%5B'id'%5D == build.id.hex%0A
03aebd7eff51be1847866d9920b8520cee72348f
fix failure in test_global_pinger_memo
tests/python/pants_test/cache/test_pinger.py
tests/python/pants_test/cache/test_pinger.py
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import threading import time from six.moves import SimpleHTTPServer, socketserver from pants.cache.pinger import Pinger from pants_test.base_test import BaseTest def get_delayed_handler(delay): class DelayResponseHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_HEAD(self): time.sleep(delay) self.send_response(200) self.end_headers() return DelayResponseHandler class TestPinger(BaseTest): timeout_seconds = .6 slow_seconds = .05 fast_seconds = 0 def setup_delayed_server(self, delay): server = socketserver.TCPServer(("", 0), get_delayed_handler(delay)) thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start() return server def setUp(self): timeout = self.setup_delayed_server(self.timeout_seconds) slow = self.setup_delayed_server(self.slow_seconds) fast = self.setup_delayed_server(self.fast_seconds) self.servers = [timeout, slow, fast] self.fast_netloc = 'localhost:{}'.format(fast.socket.getsockname()[1]) self.slow_netloc = 'localhost:{}'.format(slow.socket.getsockname()[1]) self.timeout_netloc = 'localhost:{}'.format(timeout.socket.getsockname()[1]) def test_pinger_times_correct(self): test = Pinger(timeout=.5, tries=2) netlocs = [self.fast_netloc, self.slow_netloc, self.timeout_netloc] ping_results = dict(test.pings(netlocs)) self.assertLess(ping_results[self.fast_netloc], ping_results[self.slow_netloc]) self.assertEqual(ping_results[self.timeout_netloc], Pinger.UNREACHABLE) def test_pinger_timeout_config(self): test = Pinger(timeout=self.slow_seconds - .01, tries=2) netlocs = [self.fast_netloc, self.slow_netloc] ping_results = dict(test.pings(netlocs)) self.assertLess(ping_results[self.fast_netloc], 1) self.assertEqual(ping_results[self.slow_netloc], Pinger.UNREACHABLE) def test_global_pinger_memo(self): fast_pinger = Pinger(timeout=self.slow_seconds, tries=2) slow_pinger = Pinger(timeout=self.timeout_seconds, tries=2) self.assertEqual(fast_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE) self.assertNotEqual(slow_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE) def tearDown(self): for server in self.servers: server.shutdown()
Python
0.000017
@@ -2253,32 +2253,38 @@ elf.slow_seconds + - .01 , tries=2)%0A s
932fccc77fb10ece61c3feeb47a28225216c7c0d
add two more authors for gemeinfrei_2021.py
service/ws_re/scanner/tasks/gemeinfrei_2021.py
service/ws_re/scanner/tasks/gemeinfrei_2021.py
import pywikibot from service.ws_re.register.authors import Authors from service.ws_re.scanner.tasks.base_task import ReScannerTask from service.ws_re.template.article import Article from tools.bots.pi import WikiLogger class GF21Task(ReScannerTask): def __init__(self, wiki: pywikibot.Site, logger: WikiLogger, debug: bool = True): super().__init__(wiki, logger, debug) self.authors = Authors() def task(self): for re_article in self.re_page: if isinstance(re_article, Article): authors = self.authors.get_author_by_mapping(re_article.author[0], re_article["BAND"].value) for author in authors: author_string = f"{author.first_name} {author.last_name}" if author_string in ("Arthur Stein", "Hugo Willrich", "Edward Capps", "Kurt Witte", "August Hug", "Max Radin", "Werner Schur", "Percy Neville Ure", "Herbert Bannert"): if re_article["KEINE_SCHÖPFUNGSHÖHE"].value: re_article["TODESJAHR"].value = "" re_article["KEINE_SCHÖPFUNGSHÖHE"].value = False
Python
0
@@ -1014,16 +1014,51 @@ Bannert%22 +, %22Adolf Wilhelm%22, %22Wilhelm Schmid%22 ):%0A
6a7d7393d90c1a10071b392d24431af1111a0824
clean up
streamteam/dynamics/plot.py
streamteam/dynamics/plot.py
# coding: utf-8 """ ...explain... """ from __future__ import division, print_function __author__ = "adrn <adrn@astro.columbia.edu>" # Standard library import os, sys # Third-party import matplotlib.pyplot as plt import numpy as np __all__ = ['plot_orbits'] def plot_orbits(w, ix=None, axes=None, triangle=False, **kwargs): """ TODO: """ if triangle and axes is None: fig,axes = plt.subplots(2,2,figsize=(12,12),sharex='col',sharey='row') axes[0,1].set_visible(False) axes = axes.flat axes = [axes[0],axes[2],axes[3]] elif triangle and axes is not None: try: axes = axes.flat except: pass if len(axes) == 4: axes = [axes[0],axes[2],axes[3]] elif not triangle and axes is None: fig,axes = plt.subplots(1,3,figsize=(12,5),sharex=True,sharey=True) if ix is not None: ixs = [ix] else: ixs = range(w.shape[1]) for ii in ixs: axes[0].plot(w[:,ii,0], w[:,ii,1], **kwargs) axes[1].plot(w[:,ii,0], w[:,ii,2], **kwargs) axes[2].plot(w[:,ii,1], w[:,ii,2], **kwargs) if triangle: # HACK: until matplotlib 1.4 comes out, need this axes[0].set_ylim(axes[0].get_xlim()) axes[2].set_xlim(axes[0].get_ylim()) axes[0].set_ylabel("Y") axes[1].set_xlabel("X") axes[1].set_ylabel("Z") axes[2].set_xlabel("Y") else: axes[0].set_xlabel("X") axes[0].set_ylabel("Y") axes[1].set_xlabel("X") axes[1].set_ylabel("Z") axes[2].set_xlabel("Y") axes[2].set_ylabel("Z") if not triangle: axes[0].figure.tight_layout() return axes[0].figure
Python
0.000001
@@ -273,17 +273,17 @@ _orbits( -w +x , ix=Non @@ -339,13 +339,1022 @@ -TODO: +Given time series of positions, %60x%60, make nice plots of the orbit in%0A cartesian projections.%0A%0A Parameters%0A ----------%0A x : array_like%0A Array of positions. The last axis (%60axis=-1%60) is assumed%0A to be the dimensionality, e.g., %60x.shape%5B-1%5D%60. The first axis%0A (%60axis=0%60) is assumed to be the time axis.%0A ix : int, array_like (optional)%0A Index or array of indices of orbits to plot. For example, if %60x%60 is an%0A array of shape (1024,32,6) -- 1024 timesteps for 32 orbits in 6D%0A phase-space -- %60ix%60 would specify which of the 32 orbits to plot.%0A axes : array_like (optional)%0A Array of matplotlib Axes objects.%0A triangle : bool (optional)%0A Make a triangle plot instead of plotting all projections in a single row.%0A%0A Other Parameters%0A ----------------%0A kwargs%0A All other keyword arguments are passed to the matplotlib %60plot()%60 call.%0A You can pass in any of the usual style kwargs like %60color=...%60,%0A %60marker=...%60, etc. %0A @@ -1923,12 +1923,25 @@ s = -%5Bix%5D +np.atleast_1d(ix) %0A @@ -1967,17 +1967,17 @@ = range( -w +x .shape%5B1 @@ -2016,25 +2016,25 @@ xes%5B0%5D.plot( -w +x %5B:,ii,0%5D, w%5B @@ -2027,25 +2027,25 @@ (x%5B:,ii,0%5D, -w +x %5B:,ii,1%5D, ** @@ -2069,25 +2069,25 @@ xes%5B1%5D.plot( -w +x %5B:,ii,0%5D, w%5B @@ -2084,17 +2084,17 @@ ,ii,0%5D, -w +x %5B:,ii,2%5D @@ -2126,17 +2126,17 @@ 2%5D.plot( -w +x %5B:,ii,1%5D @@ -2137,17 +2137,17 @@ ,ii,1%5D, -w +x %5B:,ii,2%5D
d56382a87068e7d43b3333b6ea3dc2fd0a80d929
Use dict instead of list
10-disambiguate.py
10-disambiguate.py
#!/usr/bin/env python from signal import signal, SIGPIPE, SIG_DFL signal(SIGPIPE, SIG_DFL) import csv import gc import sys from collections import defaultdict from sklearn.feature_extraction import DictVectorizer from sklearn.metrics.pairwise import cosine_similarity as sim from operator import itemgetter from multiprocessing import Pool, cpu_count wsi = defaultdict(lambda: dict()) v = DictVectorizer() D = [] with open('03-cw-wsi.txt') as f: reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: word, sid, _, words = row try: words = {k: float(v) for record in words.split(' ') for k, v in (record.rsplit(':', 1),)} except ValueError: print('Skipping misformatted string: %s.' % words, file=sys.stderr) continue wsi[word][int(sid)] = words D.append(words) X = v.fit_transform(D) def emit(word): sneighbours = {} for sid, words in wsi[word].items(): sense = '%s#%d' % (word, sid) features = words.copy() features.update({word: 1.}) vector = v.transform(features) sneighbours[sense] = {} for neighbour, weight in words.items(): neighbours = wsi[neighbour] candidates = [(nsid, sim(vector, v.transform(neighbours[nsid])).item(0)) for nsid in neighbours] nsid, cosine = max(candidates, key=itemgetter(1)) if cosine > 0: nsense = '%s#%d' % (neighbour, nsid) sneighbours[sense][nsense] = weight return sneighbours with Pool(cpu_count()) as pool: for sneighbours in pool.imap_unordered(emit, wsi): for sense, neighbours in sneighbours.items(): for nsense, weight in neighbours.items(): print('%s\t%s\t%f' % (sense, nsense, weight))
Python
0.000001
@@ -1131,16 +1131,17 @@ atures)%0A +%0A @@ -1236,18 +1236,16 @@ ghbours - = wsi%5Bne @@ -1280,19 +1280,16 @@ tes - = %5B( += %7B nsid -, +: sim @@ -1335,17 +1335,16 @@ .item(0) -) for nsi @@ -1358,17 +1358,17 @@ ighbours -%5D +%7D %0A @@ -1372,20 +1372,17 @@ -nsid +_ , cosine @@ -1382,16 +1382,17 @@ cosine + = max(ca @@ -1399,16 +1399,24 @@ ndidates +.items() , key=it @@ -1428,16 +1428,17 @@ ter(1))%0A +%0A
c7af9fc8a512e9dfffb456fbb7af7d099b36436d
add logging conf
memopol2/settings.py
memopol2/settings.py
# Django settings for memopol2 project. import os PROJECT_PATH = os.path.abspath(os.path.split(__file__)[0]) DEBUG = True TEMPLATE_DEBUG = DEBUG # those emails are used as the contact form recipient ADMINS = ( ('memopol', 'contact@lqdn.fr'), ) MANAGERS = ADMINS DEFAULT_FROM_EMAIL='memopol@lqdn.fr' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': '/tmp/%s-memopol2.sqlite' % os.getenv('USER'), }, } APPS_DEBUG = False if os.getenv('VIRTUAL_ENV'): DATABASES['default']['NAME'] = '%s/memopol2.sqlite' % os.getenv('VIRTUAL_ENV') APPS_DEBUG = True elif not os.path.isfile('bin/django-manage'): APPS_DEBUG = True # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/Paris' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/static/' MEDIA_DIRECTORY = os.path.join(PROJECT_PATH, MEDIA_URL.lstrip('/')) # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'pw93$2vi7^b_8q#-j@z2#2rc-x7e(vcqmi)ekf9%8h57)#caoy' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.csrf.middleware.CsrfViewMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) if APPS_DEBUG: MIDDLEWARE_CLASSES += ( 'debug_toolbar.middleware.DebugToolbarMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.media', ) ROOT_URLCONF = 'memopol2.urls' TEMPLATE_DIRS = ( os.path.join(PROJECT_PATH, "templates"), ) INSTALLED_APPS = ( # django 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'django.contrib.admindocs', 'django.contrib.markup', 'django.contrib.comments', # 3rd party 'south', 'flatblocks', 'contact_form', # memopol 'reps', 'meps', 'votes', 'mps', 'queries', 'trends', 'trophies', ) if APPS_DEBUG: INSTALLED_APPS += ( 'debug_toolbar', ) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_PANELS = ( 'debug_toolbar.panels.version.VersionDebugPanel', 'debug_toolbar.panels.timer.TimerDebugPanel', 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel', 'debug_toolbar.panels.headers.HeaderDebugPanel', 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel', 'debug_toolbar.panels.template.TemplateDebugPanel', 'debug_toolbar.panels.sql.SQLDebugPanel', 'debug_toolbar.panels.signals.SignalDebugPanel', 'debug_toolbar.panels.logger.LoggingPanel', ) LANGUAGES = ( ('fr', 'French'), ('en', 'English'), ) FIXTURE_DIRS = ( 'fixtures', ) PARLTRACK_URL = "http://parltrack.memopol2.lqdn.fr" ROOT_URL = "http://memopol2.lqdn.org" try: from settings_local import * except ImportError: pass
Python
0
@@ -4220,16 +4220,626 @@ n.org%22%0A%0A +# See http://docs.djangoproject.com/en/dev/topics/logging for%0A# more details on how to customize your logging configuration.%0ALOGGING = %7B%0A 'version': 1,%0A 'disable_existing_loggers': True,%0A 'handlers': %7B%0A 'console': %7B%0A 'level': 'WARN',%0A 'class': 'logging.StreamHandler',%0A %7D,%0A %7D,%0A 'loggers': %7B%0A 'django': %7B%0A 'handlers': %5B'console'%5D,%0A 'level': 'ERROR',%0A 'propagate': True,%0A %7D,%0A 'memopol2': %7B%0A 'handlers': %5B'console'%5D,%0A 'level': 'WARN',%0A 'propagate': True,%0A %7D,%0A %7D%0A%7D%0A%0A try:%0A
f129d3ae3b81a59ea04103cc214a6f1893d4420a
Return also _id
app/mapproxy_webconf/storage.py
app/mapproxy_webconf/storage.py
import errno import inspect import os import sqlite3 import yaml import json from bottle import PluginError from mapproxy_webconf import utils class YAMLStorePlugin(object): name = 'yamlstore' def __init__(self, storage_dir, keyword='storage'): self.storage = YAMLStore(storage_dir) self.keyword = keyword def setup(self, app): ''' Make sure that other installed plugins don't affect the same keyword argument. ''' for other in app.plugins: if not isinstance(other, YAMLStorePlugin): continue if other.keyword == self.keyword: raise PluginError("Found another YAMLStore plugin with conflicting settings (non-unique keyword).") def apply(self, callback, context): conf = context['config'].get('yamlstorage') or {} keyword = conf.get('keyword', self.keyword) # Test if the original callback accepts a 'db' keyword. # Ignore it if it does not need a database handle. args = inspect.getargspec(context['callback'])[0] if keyword not in args: return callback def wrapper(*args, **kwargs): # Add the storage as a keyword argument. kwargs[keyword] = self.storage return callback(*args, **kwargs) # Replace the route callback with the wrapped one. return wrapper DEFAULT_VALUE = object() class YAMLStore(object): def __init__(self, storage_dir): self.storage_dir = storage_dir def _filename(self, name): return os.path.join(self.storage_dir, name + '.yaml') def get(self, name, project, default=DEFAULT_VALUE): try: with open(self._filename(name), 'rb') as f: data = yaml.load(f) project_data = data.get(project, default) if project_data is DEFAULT_VALUE: return {} else: return project_data except IOError, ex: if ex.errno == errno.ENOENT: if default is DEFAULT_VALUE: return {} return default def put(self, name, project, data): content = yaml.dump({project: data}) utils.save_atomic(self._filename(name), content) class SQLiteStorePlugin(object): name = 'sqlitestore' def __init__(self, dbfile, keyword='storage'): self.storage = SQLiteStore(dbfile) self.keyword = keyword def setup(self, app): ''' Make sure that other installed plugins don't affect the same keyword argument. ''' for other in app.plugins: if not isinstance(other, SQLiteStorePlugin): continue if other.keyword == self.keyword: raise PluginError("Found another SQLiteStore plugin with conflicting settings (non-unique keyword).") def apply(self, callback, context): conf = context['config'].get('sqlitestore') or {} keyword = conf.get('keyword', self.keyword) # Test if the original callback accepts a 'storage' keyword. # Ignore it if it does not need a database handle. args = inspect.getargspec(context['callback'])[0] if keyword not in args: return callback def wrapper(*args, **kwargs): # Add the storage as a keyword argument. kwargs[keyword] = self.storage return callback(*args, **kwargs) # Replace the route callback with the wrapped one. return wrapper class SQLiteStore(object): def __init__(self, filename): self.filename = filename self.db = sqlite3.connect(filename) self.db.row_factory = sqlite3.Row self._init_db() def _init_db(self): self.db.execute(""" PRAGMA foreign_keys = ON; """) self.db.execute(""" CREATE TABLE IF NOT EXISTS store ( id INTEGER PRIMARY KEY, section TEXT NOT NULL, project TEXT, data TEXT NOT NULL, rank INTEGER, parent INTEGER, FOREIGN KEY(parent) REFERENCES store(id) ) """) def get_all(self, section, project, default=DEFAULT_VALUE, with_rank=False): if default is DEFAULT_VALUE: default = {} result = default if hasattr(result, "append"): append_data = True else: append_data = False cur = self.db.cursor() cur.execute("SELECT id, data, parent, rank FROM store WHERE section = ? AND project = ?", (section, project)) for row in cur.fetchall(): data = json.loads(row['data']) if with_rank: data['_parent'] = row['parent'] data['_rank'] = row['rank'] if append_data: result.append(data) else: result[row['id']] = data return result def get(self, id, section, project): cur = self.db.cursor() cur.execute("SELECT data, parent, rank FROM store WHERE id = ? AND section = ? AND project = ?", (id, section, project)) row = cur.fetchone() if row: data = json.loads(row[0]) if row[1] is not None: data['_parent'] = row[1] if row[2] is not None: data['_rank'] = row[2] return data def add(self, section, project, data): rank = data.pop('_rank', None) parent = data.pop('_parent', None) data = json.dumps(data) cur = self.db.cursor() cur.execute("INSERT INTO store (section, project, data, parent, rank) VALUES (?, ?, ?, ?, ?)", (section, project, data, parent, rank)) self.db.commit() return cur.lastrowid def update(self, id, section, project, data): rank = data.pop('_rank', None) parent = data.pop('_parent', None) data = json.dumps(data) cur = self.db.cursor() cur.execute("UPDATE store SET data = ?, parent = ?, rank = ? WHERE id = ? AND SECTION = ? AND project = ?", (data, parent, rank, id, section, project)) self.db.commit() def delete(self, id, section, project): cur = self.db.cursor() cur.execute("DELETE FROM store WHERE id = ? AND SECTION = ? AND project = ?", (id, section, project)) self.db.commit() return cur.rowcount == 1
Python
0.001708
@@ -4767,16 +4767,52 @@ data'%5D)%0A + data%5B'_id'%5D = row%5B'id'%5D%0A
70197850e6ab2df07ccb2c3463b1912c087255eb
Update forward compatibility horizon to 2020-11-13
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 11, 12) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) if date < _FORWARD_COMPATIBILITY_HORIZON: logging.warning("Trying to set the forward compatibility date to the past" " date %s. This will be ignored by TensorFlow." % (date)) return _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1446,17 +1446,17 @@ 0, 11, 1 -2 +3 )%0A_FORWA
82dd23f64b9617db7b333c51f20a314777c6e22c
Update forward compatibility horizon to 2022-08-05
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ import datetime import os from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 8, 4) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) if date < _FORWARD_COMPATIBILITY_HORIZON: logging.warning("Trying to set the forward compatibility date to the past" " date %s. This will be ignored by TensorFlow." % (date)) return _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://www.tensorflow.org/guide/versions#backward_and_partial_forward_compatibility). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1338,9 +1338,9 @@ 8, -4 +5 )%0A_F