commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
d80878788ddcc1443c54c11b923da23bc295b496
Fix wget -q flag
charmtest/network.py
charmtest/network.py
import io import argparse class Wget(object): name = "wget" def __init__(self, network): self._network = network def __call__(self, proc_args): parser = argparse.ArgumentParser() parser.add_argument("url") parser.add_argument("-O", dest="output") parser.add_argument("-q", dest="quite") args = parser.parse_args(proc_args["args"][1:]) content = self._network[args.url] result = {} if args.output == "-": result["stdout"] = io.BytesIO(content) else: with open(args.output, "wb") as fd: fd.write(content) return result
Python
0.000002
@@ -332,17 +332,38 @@ est=%22qui -t +et%22, action=%22store_tru e%22)%0A
730e765822932b5b0b00832c41140f39a9ae8d11
Bump version
datetimerange/__version__.py
datetimerange/__version__.py
# encoding: utf-8 from datetime import datetime __author__ = "Tsuyoshi Hombashi" __copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__) __license__ = "MIT License" __version__ = "0.3.5" __maintainer__ = __author__ __email__ = "tsuyoshi.hombashi@gmail.com"
Python
0
@@ -208,9 +208,9 @@ 0.3. -5 +6 %22%0A__
e15a1bd925a7fb163ef45e9a8519110d147e623f
Add TODOs to Event RSS
controllers/event_controller.py
controllers/event_controller.py
import datetime import os import logging import PyRSS2Gen from google.appengine.api import memcache from google.appengine.ext import webapp from google.appengine.ext.webapp import template, util from django.utils import simplejson from models import Event, Match from helpers.match_helper import MatchHelper from helpers.team_helper import TeamHelper class EventList(webapp.RequestHandler): """ List all Events. """ def get(self, year=None): if year: year = int(year) explicit_year = True else: year = datetime.datetime.now().year explicit_year = False memcache_key = "event_list_%s" % year html = memcache.get(memcache_key) if html is None: events = Event.all().filter("year =", int(year)).order('start_date').fetch(1000) template_values = { "explicit_year": explicit_year, "year": year, "events": events, } path = os.path.join(os.path.dirname(__file__), '../templates/events/list.html') html = template.render(path, template_values) memcache.set(memcache_key, html, 3600) self.response.out.write(html) class EventDetail(webapp.RequestHandler): """ Show an Event. event_code like "2010ct" """ def get(self, event_key): memcache_key = "event_detail_%s" % event_key html = memcache.get(memcache_key) if html is None: event = Event.get_by_key_name(event_key) matches = MatchHelper.organizeMatches(event.match_set) teams = TeamHelper.sortTeams([a.team for a in event.teams]) template_values = { "event": event, "matches": matches, "teams": teams, } path = os.path.join(os.path.dirname(__file__), '../templates/events/details.html') html = template.render(path, template_values) memcache.set(memcache_key, html, 300) self.response.out.write(html) class EventRss(webapp.RequestHandler): """ Generates a RSS feed for the matches in a event Created by: @brandondean, github.com/brandondean """ def get(self, event_key): memcache_key = "event_rss_%s" % event_key html = memcache.get(memcache_key) if html is None: event = Event.get_by_key_name(event_key) matches = MatchHelper.organizeMatches(event.match_set) rss_items = [] # Loop through and generate RSS items for each match for match in matches['f'] + matches['sf'] + matches['qf'] + matches['ef'] + matches['qm']: match.unpack_json() new_item = PyRSS2Gen.RSSItem( title = str(match.verbose_name()), link = 'http://www.thebluealliance.com/match/' + match.get_key_name() + '', # List the red and blue alliance teams and their score # TODO: Make this generic in case there's ever not just red/blue -gregmarra 12 Mar 2011 description = "Red Alliance: " + ' '.join(match.alliances["red"]["teams"]) + " " + "Score: " + str(match.alliances["red"]["score"]) + " " + "Blue Alliance: " + ' '.join(match.alliances["blue"]["teams"]) + " " + "Score: " + str(match.alliances["blue"]["score"]) ) rss_items.append(new_item) # Create final rss document rss = PyRSS2Gen.RSS2( title = event.name + "-- " + str(event.year), link = 'http://www.thebluealliance.com/event/' + str(event.get_key_name()) + '', description = "RSS feed for the " + event.name + " provided by The Blue Alliance." , lastBuildDate = datetime.datetime.now(), items = rss_items ) html = rss.to_xml() memcache.set(memcache_key, html, 300) self.response.out.write(html)
Python
0
@@ -3249,16 +3249,231 @@ ar 2011%0A + # TODO: Make this output format something either very machine or very human readable.%0A # Probably opt for human, since machines should be using the API. -gregmarra 12 Mar 2011%0A
0b9a97a4d6d47bd8f442c3fe3783b1d2cd85ac74
Add tests for widgets
jarbas/dashboard/tests/test_dashboard_admin.py
jarbas/dashboard/tests/test_dashboard_admin.py
from collections import namedtuple from unittest.mock import MagicMock from django.test import TestCase from jarbas.core.models import Reimbursement from jarbas.dashboard.admin import ReimbursementModelAdmin, SubuotaListfilter Request = namedtuple('Request', ('method',)) ReimbursementMock = namedtuple('Reimbursement', ('cnpj_cpf')) class TestDashboardSite(TestCase): def setUp(self): self.requests = map(Request, ('GET', 'POST', 'PUT', 'PATCH', 'DELETE')) self.ma = ReimbursementModelAdmin(Reimbursement, 'dashboard') def test_has_add_permission(self): permissions = map(self.ma.has_add_permission, self.requests) self.assertNotIn(True, tuple(permissions)) def test_has_change_permission(self): permissions = map(self.ma.has_change_permission, self.requests) expected = (True, False, False, False, False) self.assertEqual(expected, tuple(permissions)) def test_has_delete_permission(self): permissions = map(self.ma.has_delete_permission, self.requests) self.assertNotIn(True, tuple(permissions)) def test_format_document(self): obj1 = ReimbursementMock('12345678901234') obj2 = ReimbursementMock('12345678901') obj3 = ReimbursementMock('2345678') self.assertEqual('12.345.678/9012-34', self.ma._format_document(obj1)) self.assertEqual('123.456.789-01', self.ma._format_document(obj2)) self.assertEqual('2345678', self.ma._format_document(obj3)) class TestSubuotaListfilter(TestCase): def setUp(self): self.qs = MagicMock() self.list_filter = MagicMock() def test_queryset_without_subquota(self): self.list_filter.value.return_value = None SubuotaListfilter.queryset(self.list_filter, MagicMock(), self.qs) self.qs.filter.assert_not_called() def test_queryset_with_subquota(self): self.list_filter.value.return_value = 42 SubuotaListfilter.queryset(self.list_filter, MagicMock(), self.qs) self.qs.filter.assert_called_once_with(subquota_id=42)
Python
0.000001
@@ -183,50 +183,127 @@ ort -ReimbursementModelAdmin, SubuotaListfilter +(%0A ReceiptUrlWidget,%0A ReimbursementModelAdmin,%0A SubquotaWidget,%0A SubuotaListfilter,%0A SuspiciousWidget,%0A) %0A%0A%0AR @@ -2077,32 +2077,32 @@ ock(), self.qs)%0A - self.qs. @@ -2148,8 +2148,1945 @@ _id=42)%0A +%0A%0Aclass TestCustomWidgets(TestCase):%0A%0A def test_subquota_widget(self):%0A widget = SubquotaWidget()%0A rendered = widget.render('Name', 'Flight ticket issue')%0A self.assertIn('Emiss%C3%A3o bilhete a%C3%A9reo', rendered)%0A%0A def test_suspicious_widget_with_one_suspicion(self):%0A widget = SuspiciousWidget()%0A json_value = '%7B%22invalid_cnpj_cpf%22: true%7D'%0A rendered = widget.render('Name', json_value)%0A self.assertIn('CPF ou CNPJ inv%C3%A1lidos', rendered)%0A self.assertNotIn('%3Cbr%3E', rendered)%0A%0A def test_suspicious_widget_with_two_suspicions(self):%0A widget = SuspiciousWidget()%0A json_value = '%7B%22invalid_cnpj_cpf%22: true, %22election_expenses%22: true%7D'%0A rendered = widget.render('Name', json_value)%0A self.assertIn('CPF ou CNPJ inv%C3%A1lidos', rendered)%0A self.assertIn('%3Cbr%3E', rendered)%0A self.assertIn('Gasto com campanha eleitoral', rendered)%0A%0A def test_suspicious_widget_with_new_suspicion(self):%0A widget = SuspiciousWidget()%0A json_value = '%7B%22whatever%22: true, %22invalid_cnpj_cpf%22: true%7D'%0A rendered = widget.render('Name', json_value)%0A self.assertIn('CPF ou CNPJ inv%C3%A1lidos', rendered)%0A self.assertIn('%3Cbr%3E', rendered)%0A self.assertIn('whatever', rendered)%0A%0A def test_suspicious_widget_without_suspicion(self):%0A widget = SuspiciousWidget()%0A json_value = 'null'%0A rendered = widget.render('Name', json_value)%0A self.assertEqual('', rendered)%0A%0A def test_receipt_url_widget(self):%0A widget = ReceiptUrlWidget()%0A url = 'https://jarbas.serenatadeamor.org'%0A rendered = widget.render('Name', url)%0A self.assertIn('href=%22%7B%7D%22'.format(url), rendered)%0A self.assertIn('%3E%7B%7D%3C/a%3E'.format(url), rendered)%0A%0A def test_receipt_url_widget_without_url(self):%0A widget = ReceiptUrlWidget()%0A rendered = widget.render('Name', '')%0A self.assertEqual('', rendered)%0A
6c131821bd91d2353d99daecfe27981d1b09a525
Fix help description
Cogs/Responses.py
Cogs/Responses.py
import discord, re from discord.ext import commands from Cogs import Settings, DisplayName, Utils, Nullify, PickList def setup(bot): # Add the bot and deps settings = bot.get_cog("Settings") bot.add_cog(Responses(bot, settings)) class Responses(commands.Cog): # Init with the bot reference, and a reference to the settings var def __init__(self, bot, settings): self.bot = bot self.settings = settings global Utils, DisplayName Utils = self.bot.get_cog("Utils") DisplayName = self.bot.get_cog("DisplayName") # Regex values self.regexUserName = re.compile(r"\[\[[user]+\]\]", re.IGNORECASE) self.regexUserPing = re.compile(r"\[\[[atuser]+\]\]", re.IGNORECASE) self.regexServer = re.compile(r"\[\[[server]+\]\]", re.IGNORECASE) self.regexHere = re.compile(r"\[\[[here]+\]\]", re.IGNORECASE) self.regexEveryone = re.compile(r"\[\[[everyone]+\]\]", re.IGNORECASE) @commands.Cog.listener() async def on_message(self, message): if message.author.bot: return if not message.guild: return message_responses = self.settings.getServerStat(message.guild, "MessageResponses", {}) if not message_responses: return # We have something to check ctx = await self.bot.get_context(message) if ctx.command: return # Don't check if we're running a command # Check for matching response triggers here content = message.content.replace("\n"," ") # Remove newlines for better matching for trigger in message_responses: match = re.fullmatch(trigger, content) if not match: continue # Got a full match - build the message, send it and bail m = message_responses[trigger] m = re.sub(self.regexUserName, "{}".format(DisplayName.name(message.author)), m) m = re.sub(self.regexUserPing, "{}".format(message.author.mention), m) m = re.sub(self.regexServer, "{}".format(Nullify.escape_all(ctx.guild.name)), m) m = re.sub(self.regexHere, "@here", m) m = re.sub(self.regexEveryone, "@everyone", m) return await ctx.send(m) @commands.command() async def addresponse(self, ctx, regex_trigger = None, *, response = None): """Adds a new response for the regex trigger. If the trigger has spaces, it must be wrapped in quotes (bot-admin only). Available Options: [[user]] = user name [[atuser]] = user mention [[server]] = server name [[game]] = the game name [[url]] = stream url [[here]] = @​here ping [[everyone]] = @​everyone ping Example: $addresponse "(?i)(hello there|\\btest\\b).*" [[atuser]], this is a test! This would look for a message starting with the whole word "test" or "hello there" (case-insensitive) and respond by pinging the user and saying "this is a test!" """ if not await Utils.is_bot_admin_reply(ctx): return if not regex_trigger or not response: return await ctx.send("Usage: `{}addresponse regex_trigger response`".format(ctx.prefix)) # Ensure the regex is valid try: re.compile(regex_trigger) except Exception as e: return await ctx.send(Nullify.escape_all(e)) # Save the trigger and response message_responses = self.settings.getServerStat(ctx.guild, "MessageResponses", {}) context = "Updated" if regex_trigger in message_responses else "Added new" message_responses[regex_trigger] = response self.settings.setServerStat(ctx.guild, "MessageResponses", message_responses) return await ctx.send("{} response trigger!".format(context)) @commands.command() async def responses(self, ctx): """Lists the response triggers and their responses (bot-admin only).""" if not await Utils.is_bot_admin_reply(ctx): return message_responses = self.settings.getServerStat(ctx.guild, "MessageResponses", {}) if not message_responses: return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix)) entries = [{"name":"{}. ".format(i)+Nullify.escape_all(x),"value":Nullify.escape_all(message_responses[x])} for i,x in enumerate(message_responses,start=1)] return await PickList.PagePicker(title="Current Responses",list=entries,ctx=ctx).pick() @commands.command() async def remresponse(self, ctx, *, regex_trigger_number = None): """Removes the passed response trigger (bot-admin only).""" if not await Utils.is_bot_admin_reply(ctx): return if not regex_trigger_number: return await ctx.send("Usage: `{}remresponse regex_trigger_number`\nYou can get a numbered list with `{}responses`".format(ctx.prefix,ctx.prefix)) message_responses = self.settings.getServerStat(ctx.guild, "MessageResponses", {}) if not message_responses: return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix)) # Make sure we got a number, and it's within our list range try: regex_trigger_number = int(regex_trigger_number) assert 0 < regex_trigger_number <= len(message_responses) except: return await ctx.send("You need to pass a valid number from 1 to {:,}.\nYou can get a numbered list with `{}responses`".format(len(message_responses),ctx.prefix)) # Remove it, save, and report message_responses.pop(list(message_responses)[regex_trigger_number-1],None) self.settings.setServerStat(ctx.guild, "MessageResponses", message_responses) return await ctx.send("Response trigger removed!") @commands.command() async def clearresponses(self, ctx): """Removes all response triggers (bot-admin only).""" if not await Utils.is_bot_admin_reply(ctx): return self.settings.setServerStat(ctx.guild, "MessageResponses", {}) return await ctx.send("All response triggers removed!")
Python
0.996521
@@ -2382,69 +2382,8 @@ me%0D%0A -%09%09%5B%5Bgame%5D%5D = the game name%0D%0A%09%09%5B%5Burl%5D%5D = stream url%0D%0A %09%09%5B%5B
2e23ad9decd01ece3d924e57dabc923cec8f34f7
tidy up some obsolete junk
Commands/Alias.py
Commands/Alias.py
# -*- coding: utf-8 -*- """ Created on May 21, 2014 @author: HubbeKing, Tyranic-Moron """ import re from CommandInterface import CommandInterface from IRCMessage import IRCMessage from IRCResponse import IRCResponse, ResponseType import GlobalVars class Alias(CommandInterface): triggers = ['alias', 'unalias', 'aliases'] runInThread = True def help(self, message): """ @type message: IRCMessage """ helpDict = { u"alias": u"alias <alias> <command/alias> <params> - aliases <alias> to the specified command/alias and parameters\n" \ u"you can specify where parameters given to the alias should be inserted with $1, $2, $n. " \ u"The whole parameter string is $0. $sender and $channel can also be used.", u"unalias": u"unalias <alias> - deletes the alias <alias>", u"aliases": u"aliases [<alias>] - lists all defined aliases, or the contents of the specified alias" } return helpDict[message.ParameterList[0]] def onLoad(self): if 'Alias' not in self.bot.dataStore: self.bot.dataStore['Alias'] = {} self.aliases = self.bot.dataStore['Alias'] for alias in self.aliases: self.bot.moduleHandler.mappedTriggers[alias] = self def onUnload(self): for alias in self.aliases: del self.bot.moduleHandler.mappedTriggers[alias] def shouldExecute(self, message): if message.Command.lower() in self.bot.moduleHandler.mappedTriggers: return True return False def execute(self, message): """ @type message: IRCMessage """ if message.Command.lower() not in self.triggers and message.Command.lower() in self.aliases: newMessage = self._aliasedMessage(message) newCommand = newMessage.Command.lower() if newCommand in self.bot.moduleHandler.mappedTriggers: # aliased command is a valid trigger return self.bot.moduleHandler.mappedTriggers[newCommand].execute(newMessage) elif newCommand in self.aliases: # command is an alias of another alias newMessage = self._aliasedMessage(message) return self.execute(newMessage) if message.Command.lower() == 'alias': return self._alias(message) elif message.Command.lower() == 'unalias': return self._unalias(message) elif message.Command.lower() == 'aliases': return self._aliases(message) def _alias(self, message): if message.User.Name not in GlobalVars.admins: return IRCResponse(ResponseType.Say, 'Only my admins may create new aliases!', message.ReplyTo) if len(message.ParameterList) <= 1: return IRCResponse(ResponseType.Say, 'Alias what?', message.ReplyTo) if message.ParameterList[0].lower() in self.bot.moduleHandler.mappedTriggers: return IRCResponse(ResponseType.Say, u"'{}' is already a command!".format(message.ParameterList[0].lower()), message.ReplyTo) if message.ParameterList[0].lower() in self.aliases: return IRCResponse(ResponseType.Say, u"'{}' is already an alias!".format(message.ParameterList[0].lower()), message.ReplyTo) if message.ParameterList[1].lower() not in self.bot.moduleHandler.mappedTriggers \ and message.ParameterList[1].lower() not in self.aliases: return IRCResponse(ResponseType.Say, u"'{}' is not a valid command or alias!".format(message.ParameterList[1].lower()), message.ReplyTo) newAlias = message.ParameterList[1:] newAlias[0] = newAlias[0].lower() self._newAlias(message.ParameterList[0].lower(), newAlias) return IRCResponse(ResponseType.Say, u"Created a new alias '{}' for '{}'.".format(message.ParameterList[0].lower(), u' '.join(message.ParameterList[1:])), message.ReplyTo) def _unalias(self, message): if message.User.Name not in GlobalVars.admins: return IRCResponse(ResponseType.Say, 'Only my admins may delete aliases!', message.ReplyTo) if len(message.ParameterList) == 0: return IRCResponse(ResponseType.Say, 'Unalias what?', message.ReplyTo) if message.ParameterList[0].lower() not in self.aliases: return IRCResponse(ResponseType.Say, u"I don't have an alias called '{}'".format(message.ParameterList[0].lower()), message.ReplyTo) self._delAlias(message.ParameterList[0].lower()) return IRCResponse(ResponseType.Say, u"Deleted alias '{}'".format(message.ParameterList[0].lower()), message.ReplyTo) def _aliases(self, message): if len(message.ParameterList) == 0: return IRCResponse(ResponseType.Say, u"Current aliases: {}".format(u', '.join(sorted(self.aliases.keys()))), message.ReplyTo) elif message.ParameterList[0].lower() in self.aliases: return IRCResponse(ResponseType.Say, u"'{}' is aliased to: {}".format(message.ParameterList[0].lower(), u' '.join(self.aliases[message.ParameterList[0].lower()])), message.ReplyTo) else: return IRCResponse(ResponseType.Say, u"'{}' is not a recognized alias".format(message.ParameterList[0].lower()), message.ReplyTo) def _newAlias(self, alias, command): self.aliases[alias] = command self.bot.moduleHandler.mappedTriggers[alias] = self self._syncAliases() def _delAlias(self, alias): del self.aliases[alias] del self.bot.moduleHandler.mappedTriggers[alias] self._syncAliases() def _syncAliases(self): self.bot.dataStore['Alias'] = self.aliases self.bot.dataStore.sync() def _aliasedMessage(self, message): if message.Command.lower() not in self.aliases: return alias = self.aliases[message.Command.lower()] newMsg = u'{0}{1}'.format(self.bot.commandChar, ' '.join(alias)) newMsg = newMsg.replace('$sender', message.User.Name) if message.Channel is not None: newMsg = newMsg.replace('$channel', message.Channel.Name) else: newMsg = newMsg.replace('$channel', message.User.Name) if re.search(r'\$[0-9]+', newMsg): # if the alias contains numbered param replacement points, replace them newMsg = newMsg.replace('$0', u' '.join(message.ParameterList)) for i, param in enumerate(message.ParameterList): if newMsg.find(u"${}+".format(i+1)) != -1: newMsg = newMsg.replace(u"${}+".format(i+1), u" ".join(message.ParameterList[i:])) else: newMsg = newMsg.replace(u"${}".format(i+1), param) else: # if there are no numbered replacement points, append the full parameter list instead newMsg += u' {}'.format(u' '.join(message.ParameterList)) return IRCMessage(message.Type, message.User.String, message.Channel, newMsg, self.bot)
Python
0.000018
@@ -2121,200 +2121,8 @@ age) -%0A elif newCommand in self.aliases: # command is an alias of another alias%0A newMessage = self._aliasedMessage(message)%0A return self.execute(newMessage) %0A%0A @@ -2911,262 +2911,11 @@ mand -!%22.format(message.ParameterList%5B0%5D.lower()),%0A message.ReplyTo)%0A if message.ParameterList%5B0%5D.lower() in self.aliases:%0A return IRCResponse(ResponseType.Say,%0A u%22'%7B%7D' is already an + or ali @@ -3102,83 +3102,8 @@ gers - %5C%0A and message.ParameterList%5B1%5D.lower() not in self.aliases :%0A
35ff017b483bb46b1f942045bd2c9e20ace39483
fix line splitting
Commands/Urban.py
Commands/Urban.py
# -*- coding: utf-8 -*- """ Created on Jan 24, 2014 @author: Tyranic-Moron """ import urllib import json from IRCMessage import IRCMessage from IRCResponse import IRCResponse, ResponseType from CommandInterface import CommandInterface from Utils import WebUtils from twisted.words.protocols.irc import assembleFormattedText, attributes as A class Urban(CommandInterface): triggers = ['urban', 'ud'] help = "urban <search term> - returns the definition of the given search term from UrbanDictionary.com" def execute(self, message): """ @type message: IRCMessage """ if len(message.ParameterList) == 0: return IRCResponse(ResponseType.Say, "You didn't give a word! Usage: {0}".format(self.help), message.ReplyTo) search = urllib.quote(message.Parameters) url = 'http://api.urbandictionary.com/v0/define?term={0}'.format(search) webPage = WebUtils.fetchURL(url) response = json.loads(webPage.body) if len(response['list']) == 0: return IRCResponse(ResponseType.Say, "No entry found for '{0}'".format(message.Parameters), message.ReplyTo) graySplitter = assembleFormattedText(A.normal[' ', A.fg.gray['|'], ' ']) defn = response['list'][0] word = defn['word'] definition = defn['definition'] definition = graySplitter.join([s.strip() for s in definition.strip().split('\r\n')]) example = defn['example'] example = graySplitter.join([s.strip() for s in example.strip().split('\r\n')]) author = defn['author'] up = defn['thumbs_up'] down = defn['thumbs_down'] more = 'http://{}.urbanup.com/'.format(word.replace(' ', '-')) if word.lower() != message.Parameters.lower(): word = "{0} (Contains '{1}')".format(word, message.Parameters) defFormatString = unicode(assembleFormattedText(A.normal[A.bold["{0}:"], " {1}"])) exampleFormatString = unicode(assembleFormattedText(A.normal[A.bold["Example(s):"], " {0}"])) byFormatString = unicode(assembleFormattedText(A.normal["{0}", graySplitter, A.fg.lightGreen["+{1}"], A.fg.gray["/"], A.fg.lightRed["-{2}"], graySplitter, "More defs: {3}"])) responses = [IRCResponse(ResponseType.Say, defFormatString.format(word, definition), message.ReplyTo), IRCResponse(ResponseType.Say, exampleFormatString.format(example), message.ReplyTo), IRCResponse(ResponseType.Say, byFormatString.format(author, up, down, more), message.ReplyTo)] return responses
Python
0.000001
@@ -1564,39 +1564,38 @@ on.strip().split -('%5Cr%5Cn' +lines( )%5D)%0A%0A exa @@ -1698,15 +1698,14 @@ plit -('%5Cr%5Cn' +lines( )%5D)%0A
74e1a3617457a2d690000fd18825e864a70a2d85
print not-installed when in quiet mode
checkdependencies.py
checkdependencies.py
#!/usr/bin/env python2 import imp, os, sys, subprocess from distutils import spawn try: import pip except: pip = None dirname = os.path.dirname(__file__) from lib import wireutils wireutils.cprintconf.name = "Perdyshot" wireutils.cprintconf.color= wireutils.bcolors.DARKCYAN def readBool(text): reply = wireutils.cinput(text + ' (y/n): ') if reply == 'y': return True elif reply == 'n': return False else: print wireutils.cprint("Invalid option. Please answer y or n for yes or no.\n", color = wireutils.bcolors.RED) return readBool(text) def hasModule(name): try: imp.find_module(name) return True except ImportError: return False def checkModule(name): installed = hasModule(name) if installed: if not args.get("quiet") and args.get("clean"): print wireutils.format("m {name}: y", name = name) elif not args.get("quiet"): wireutils.cprint("Module {name} installed.", name = name, color = wireutils.bcolors.GREEN) else: if not args.get("quiet") and args.get("clean"): print wireutils.format("m {name}: n", name = name) elif not args.get("quiet"): wireutils.cprint("Module {name} not installed.", name = name, color = wireutils.bcolors.RED) return installed def installModule(name): if pip and not args.get("dry") and not args.get("clean"): pip.main(["install", "-U", name]) def moduleNeedsInstalling(name): installed = checkModule(name) if not args.get("dry"): if installed or args.get("clean"): return False elif ROOT: return readBool("Do you wish to install it now?") def manualInstallNotify(name, tutorial): if not args.get("dry") and not args.get("clean"): if not readBool("%s can't be automatically installled.\nPlease refer to {blue}{line}%s{endc} to install it manually.\nDo you wish to continue?" % (name, tutorial)): sys.exit() def checkApplication(name, friendlyName, tutorial): installed = spawn.find_executable(name) != None if installed: if not args.get("quiet") and args.get("clean"): print wireutils.format("a {name}: y", name = name) elif not args.get("quiet"): wireutils.cprint("Executable {name} ({readable}) found.", name = name, readable = friendlyName, color=wireutils.bcolors.GREEN) else: if not args.get("quiet") and args.get("clean"): print wireutils.format("a {name}: n", name = name) elif not args.get("quiet"): wireutils.cprint("Executable {name} ({readable}) not found.", name = name, readable = friendlyName, color=wireutils.bcolors.RED) if not installed and not args.get("dry"): manualInstallNotify(friendlyName, tutorial) if __name__ == "__main__": try: try: import argparse parser = argparse.ArgumentParser(description = 'Checks the Perdyshot dependencies.', usage="%(prog)s [options]") parser.add_argument('-o', '--omit', help="Omit an update step", default="", choices=["module", "app", "m", "a"], dest="omit") parser.add_argument('--dry-run', help="Don't actually do anything", action = 'store_true', dest="dry") parser.add_argument('-q', '--quiet', help="Supress most output", action = 'store_true', dest="quiet") parser.add_argument('--porcelain', help="Machine-readable output (implies --dry-run)", action = 'store_true', dest="clean") args = vars(parser.parse_args()) except Exception: wireutils.cprint("Argparse library missing. Will not be able to parse cli arguments.\n", color=wireutils.bcolors.DARKRED) args = {} if not args.get("quiet") and not args.get("clean"): wireutils.cprint("""Perdyshot Dependency Checker {bold}============================{endc} """, strip = True) ROOT = os.geteuid() == 0 if not args.get("dry") and not args.get("clean"): if not ROOT: if not readBool("You aren't root.\nInstalling missing packages will not be supported.\nDo you wish to continue?"): sys.exit() print if not pip: if not readBool("{bold}pip{endc} isn't installed.\nInstalling missing packages will not be supported.\nDo you wish to continue?"): sys.exit() print if args.get("omit") not in ["module", "m"]: if not args.get("quiet") and not args.get("clean"): wireutils.cprint("Checking module dependencies for Perdyshot ...\n{bold}----------------------------------------------{endc}\n") if moduleNeedsInstalling("argparse"): installModule("argparse") if moduleNeedsInstalling("configobj"): installModule("configobj") if not checkModule("gi"): manualInstallNotify("gi", "http://python-gtk-3-tutorial.readthedocs.org/en/latest/install.html") if moduleNeedsInstalling("gtk"): installModule("PyGTK") if moduleNeedsInstalling("PIL"): installModule("Pillow") if not checkModule("PyQt4"): manualInstallNotify("PyQt4", "http://pyqt.sourceforge.net/Docs/PyQt4/installation.html") if moduleNeedsInstalling("validate"): installModule("validate") if moduleNeedsInstalling("enum"): installModule("enum34") if moduleNeedsInstalling("datetime"): installModule("DateTime") if not args.get("omit") and not args.get("quiet") and not args.get("clean"): print if args.get("omit") not in ["app", "a"]: if not args.get("quiet") and not args.get("clean"): wireutils.cprint("Checking application dependencies for Perdyshot ...\n{bold}---------------------------------------------------{endc}\n") checkApplication("convert", "ImageMagick", "http://www.imagemagick.org/script/binary-releases.php") checkApplication("xclip", "xclip", "https://github.com/milki/xclip/blob/master/INSTALL") except (KeyboardInterrupt, EOFError): print
Python
0
@@ -1083,34 +1083,8 @@ if -not args.get(%22quiet%22) and args @@ -1175,32 +1175,10 @@ el -if not args.get(%22quiet%22) +se :%0A @@ -2411,34 +2411,8 @@ if -not args.get(%22quiet%22) and args @@ -2503,32 +2503,10 @@ el -if not args.get(%22quiet%22) +se :%0A
55f3e0e222246bfbc9c1a19f68b06941bac6cd70
Add an option to include spaces on random string generator
base/utils.py
base/utils.py
""" Small methods for generic use """ # standard library import itertools import random import re import string import unicodedata # django from django.utils import timezone def today(): """ This method obtains today's date in local time """ return timezone.localtime(timezone.now()).date() # BROKEN def grouper(iterable, n): args = [iter(iterable)] * n return ([e for e in t if e is not None] for t in itertools.izip_longest( *args )) def format_rut(rut): if not rut: return '' rut = rut.replace(' ', '').replace('.', '').replace('-', '') rut = rut[:9] if not rut: return '' verifier = rut[-1] code = rut[0:-1][::-1] code = re.sub("(.{3})", "\\1.", code, 0, re.DOTALL) code = code[::-1] return '%s-%s' % (code, verifier) def camel_to_underscore(string): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() def underscore_to_camel(word): return ''.join(x.capitalize() or '_' for x in word.split('_')) def strip_accents(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # BROKEN def tz_datetime(s, *args, **kwargs): """ Creates a datetime.datetime object but with the current timezone """ tz = timezone.get_current_timezone() naive_dt = timezone.datetime(*args, **kwargs) return timezone.make_aware(naive_dt, tz) def random_string(length=6, chars=None): if chars is None: chars = string.ascii_uppercase + string.digits return ''.join(random.choice(chars) for x in range(length))
Python
0.000004
@@ -1520,16 +1520,37 @@ ars=None +, include_spaces=True ):%0A i @@ -1622,16 +1622,61 @@ digits%0A%0A + if include_spaces:%0A chars += ' '%0A%0A retu
5496bd29c4262c252367d7b305d2a78fd1ad2fa7
move debug call
bcdata/wcs.py
bcdata/wcs.py
import logging import requests import bcdata log = logging.getLogger(__name__) def get_dem( bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25, interpolation=None ): """Get TRIM DEM for provided bounds, write to GeoTIFF. """ bbox = ",".join([str(b) for b in bounds]) # do not upsample if resolution < 25: raise ValueError("Resolution requested must be 25m or greater") # if specifying interpolation method, there has to actually be a # resampling requested - resolution can't be the native 25m if interpolation and resolution == 25: raise ValueError("Requested coverage at native resolution, no resampling required, interpolation {} invalid") # if downsampling, default to bilinear (the server defaults to nearest) if resolution > 25 and not interpolation: log.info("Interpolation not specified, defaulting to bilinear") interpolation = "bilinear" # make sure interpolation is valid if interpolation: valid_interpolations = ["nearest", "bilinear", "bicubic"] if interpolation not in valid_interpolations: raise ValueError("Interpolation {} invalid. Valid keys are: {}".format(interpolation, ",".join(valid_interpolations))) # build request payload = { "service": "WCS", "version": "1.0.0", "request": "GetCoverage", "coverage": "pub:bc_elevation_25m_bcalb", "Format": "GeoTIFF", "bbox": bbox, "CRS": src_crs, "RESPONSE_CRS": dst_crs, "resx": str(resolution), "resy": str(resolution), } if interpolation: payload["INTERPOLATION"] = interpolation # request data from WCS r = requests.get(bcdata.WCS_URL, params=payload) log.debug(r.url) # save to tiff if r.status_code == 200: with open(out_file, "wb") as file: file.write(r.content) return out_file else: raise RuntimeError( "WCS request failed with status code {}".format(str(r.status_code)) )
Python
0.000002
@@ -1782,29 +1782,8 @@ oad) -%0A log.debug(r.url) %0A%0A @@ -1991,16 +1991,19 @@ request + %7B%7D failed @@ -2030,16 +2030,23 @@ .format( +r.url, str(r.st
c4ad9519c117edfdc59f229380fa0797bc6bfffa
Update BitshareComFolder.py
module/plugins/crypter/BitshareComFolder.py
module/plugins/crypter/BitshareComFolder.py
# -*- coding: utf-8 -*- from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo class BitshareComFolder(SimpleCrypter): __name__ = "BitshareComFolder" __type__ = "crypter" __version__ = "0.03" __pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+' __config__ = [("use_premium" , "bool", "Use premium account if available" , True), ("use_subfolder" , "bool", "Save package to subfolder" , True), ("subfolder_per_pack", "bool", "Create a subfolder for each package", True)] __description__ = """Bitshare.com folder decrypter plugin""" __license__ = "GPLv3" __authors__ = [("stickell", "l.stickell@yahoo.it")] LINK_PATTERN = r'<a href="(http://bitshare\.com/files/.+)">.+</a></td>' NAME_PATTERN = r'View public folder "(?P<N>.+)"</h1>' getInfo = create_getInfo(BitshareComFolder)
Python
0
@@ -228,17 +228,17 @@ _ = %220.0 -3 +4 %22%0A%0A _ @@ -863,16 +863,17 @@ (?P%3CN%3E.+ +? )%22%3C/h1%3E'
326f0b881d36ed19d0a37495ae34fc24fc1eb707
Load the spotify header file from an absolute path
connect_ffi.py
connect_ffi.py
from cffi import FFI ffi = FFI() print "Loading Spotify library..." #TODO: Use absolute paths for open() and stuff #Header generated with cpp spotify.h > spotify.processed.h && sed -i 's/__extension__//g' spotify.processed.h with open("spotify.processed.h") as file: header = file.read() ffi.cdef(header) ffi.cdef(""" void *malloc(size_t size); void exit(int status); """) C = ffi.dlopen(None) lib = ffi.verify(""" #include "spotify.h" """, include_dirs=['./'], library_dirs=['./'], libraries=[str('spotify_embedded_shared')])
Python
0
@@ -229,16 +229,42 @@ th open( +os.path.join(sys.path%5B0%5D, %22spotify @@ -277,16 +277,17 @@ ssed.h%22) +) as file
278dcb8b2fb3e1f69434ec9c41e566501cdc50bd
Remove unused functionality
organizations/backends/forms.py
organizations/backends/forms.py
# -*- coding: utf-8 -*- # Copyright (c) 2012-2019, Ben Lopatin and contributors # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with # the distribution # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from django import forms from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _ class UserRegistrationForm(forms.ModelForm): """ Form class for completing a user's registration and activating the User. The class operates on a user model which is assumed to have the required fields of a BaseUserModel """ # TODO decouple first/last names from this form first_name = forms.CharField(max_length=30) last_name = forms.CharField(max_length=30) password = forms.CharField(max_length=30, widget=forms.PasswordInput) password_confirm = forms.CharField(max_length=30, widget=forms.PasswordInput) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.initial["username"] = "" def clean(self): password = self.cleaned_data.get("password") password_confirm = self.cleaned_data.get("password_confirm") if password != password_confirm or not password: raise forms.ValidationError(_("Your password entries must match")) return super().clean() class Meta: model = get_user_model() exclude = ( "is_staff", "is_superuser", "is_active", "last_login", "date_joined", "groups", "user_permissions", ) def org_registration_form(org_model): """ Generates a registration ModelForm for the given organization model class """ class OrganizationRegistrationForm(forms.ModelForm): """Form class for creating new organizations owned by new users.""" email = forms.EmailField() class Meta: model = org_model exclude = ("is_active", "users") def save(self, *args, **kwargs): self.instance.is_active = False super().save(*args, **kwargs) return OrganizationRegistrationForm
Python
0
@@ -1492,16 +1492,71 @@ zy as _%0A +from django.contrib.auth.forms import UserCreationForm%0A %0A%0Aclass @@ -1572,35 +1572,36 @@ trationForm( -forms.Model +UserCreation Form):%0A %22 @@ -1796,25 +1796,24 @@ del%0A %22%22%22%0A -%0A # TODO d @@ -1814,997 +1814,69 @@ TODO - decouple first/last names from this form%0A first_name = forms.CharField(max_length=30)%0A last_name = forms.CharField(max_length=30)%0A password = forms.CharField(max_length=30, widget=forms.PasswordInput)%0A password_confirm = forms.CharField(max_length=30, widget=forms.PasswordInput)%0A%0A def __init__(self, *args, **kwargs):%0A super().__init__(*args, **kwargs)%0A self.initial%5B%22username%22%5D = %22%22%0A%0A def clean(self):%0A password = self.cleaned_data.get(%22password%22)%0A password_confirm = self.cleaned_data.get(%22password_confirm%22)%0A if password != password_confirm or not password:%0A raise forms.ValidationError(_(%22Your password entries must match%22))%0A return super().clean()%0A%0A class Meta:%0A model = get_user_model()%0A exclude = (%0A %22is_staff%22,%0A %22is_superuser%22,%0A %22is_active%22,%0A %22last_login%22,%0A %22date_joined%22,%0A %22groups%22,%0A %22user_permissions%22,%0A ) +(bennylope): Remove this entirely and replace with base class %0A%0A%0Ad @@ -2280,16 +2280,18 @@ %0A + # def sav @@ -2324,20 +2324,22 @@ +# + self.ins @@ -2365,24 +2365,26 @@ alse%0A + # super()
9d6e5a9077add7ddbc3e03799d3a9916350e0baf
Add pre-save and post-delete handlers to custom image model
core/models.py
core/models.py
from django.db import models from wagtail.core.fields import RichTextField from wagtail.core.models import Page, Orderable from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel, InlinePanel from wagtail.search import index from wagtail.snippets.models import register_snippet from wagtail.images.edit_handlers import ImageChooserPanel from wagtail.images.models import Image, AbstractImage, AbstractRendition from modelcluster.fields import ParentalKey class StaticPage(Page): body = RichTextField() content_panels = Page.content_panels + [FieldPanel("body")] class StaffPage(Page): first_name = models.CharField(max_length=100) last_name = models.CharField(max_length=100) biography = RichTextField(null=True, blank=True) photo = models.ForeignKey( "wagtailimages.Image", null=True, blank=True, on_delete=models.PROTECT ) email_address = models.EmailField(null=True, blank=True) content_panels = [ MultiFieldPanel( [FieldPanel("first_name"), FieldPanel("last_name")], heading="Name" ), FieldPanel("email_address"), FieldPanel("biography"), ImageChooserPanel("photo"), InlinePanel("terms", label="Term", heading="Terms"), ] search_fields = [index.SearchField("first_name"), index.SearchField("last_name")] parent_page_types = ["StaffIndexPage"] subpage_types = [] def clean(self): super().clean() self.title = f"{self.first_name} {self.last_name}" def get_articles(self): return [r.article for r in self.articles.select_related("article").all()] def get_current_positions(self): return [ t.position for t in self.terms.filter(date_ended__isnull=True).select_related( "position" ) ] def get_active_positions(self): return [term.position for term in self.terms.all() if term.date_ended is None] class StaffIndexPage(Page): subpage_types = ["StaffPage"] def get_active_staff(self): return ( StaffPage.objects.live() .descendant_of(self) .filter(terms__date_ended__isnull=True) .select_related("photo") .prefetch_related("terms__position") .distinct() ) @register_snippet class Position(models.Model): title = models.CharField(max_length=100) def __str__(self): return self.title @register_snippet class Term(Orderable, models.Model): position = models.ForeignKey( Position, on_delete=models.PROTECT, related_name="terms" ) person = ParentalKey(StaffPage, on_delete=models.PROTECT, related_name="terms") date_started = models.DateField() date_ended = models.DateField(blank=True, null=True) def __str__(self): return f'{self.position.title} ({self.date_started}—{self.date_ended or "now"})' # https://docs.wagtail.io/en/v2.2.2/advanced_topics/images/custom_image_model.html class CustomImage(AbstractImage): photographer = models.ForeignKey( StaffPage, on_delete=models.CASCADE, related_name="images", blank=True, null=True, ) admin_form_fields = Image.admin_form_fields + ("photographer",) class CustomRendition(AbstractRendition): image = models.ForeignKey( CustomImage, on_delete=models.CASCADE, related_name="renditions" ) class Meta: unique_together = ("image", "filter_spec", "focal_point_key") @register_snippet class Photo(models.Model): image = models.ForeignKey( CustomImage, null=True, blank=True, on_delete=models.PROTECT ) caption = RichTextField(blank=True, null=True) panels = [ImageChooserPanel("image"), FieldPanel("caption")]
Python
0
@@ -21,16 +21,111 @@ t models +%0Afrom django.db.models.signals import pre_delete, pre_save%0Afrom django.dispatch import receiver %0A%0Afrom w @@ -3356,16 +3356,566 @@ er%22,)%0A%0A%0A +# Delete the source image file when an image is deleted%0A@receiver(pre_delete, sender=CustomImage)%0Adef image_delete(sender, instance, **kwargs):%0A instance.file.delete(False)%0A%0A%0A# Do feature detection when a user saves an image without a focal point%0A@receiver(pre_save, sender=CustomImage)%0Adef image_feature_detection(sender, instance, **kwargs):%0A # Make sure the image doesn't already have a focal point%0A if not instance.has_focal_point():%0A # Set the focal point%0A instance.set_focal_point(instance.get_suggested_focal_point())%0A%0A%0A class Cu @@ -4143,24 +4143,216 @@ int_key%22)%0A%0A%0A +# Delete the rendition image file when a rendition is deleted%0A@receiver(pre_delete, sender=CustomRendition)%0Adef rendition_delete(sender, instance, **kwargs):%0A instance.file.delete(False)%0A%0A%0A @register_sn
331fb50e6a4dcef99c8a6806d3efd7531859542f
add comments
achievements/templatetags/achievement_tags.py
achievements/templatetags/achievement_tags.py
from django import template from achievements.models import Category, Trophy from achievements import settings register = template.Library() @register.inclusion_tag('achievements/single_category.html') def render_category(category, user): return { 'category': category, 'percentage': category.get_complete_percentage(user), 'completed_achievements': category.count_all_complete_achievements(user) } @register.inclusion_tag('achievements/navigation.html') def render_navigation(current_category=None): return { 'categories': Category.objects.filter(parent_category__isnull=True), 'current_category': current_category, } @register.inclusion_tag('achievements/trophies.html') def render_trophies(user, takes_context=True): trophies = [None] * settings.TROPHY_COUNT for trophy in Trophy.objects.filter(user=user): trophies[trophy.position] = trophy return {'trophies': trophies} @register.simple_tag def render_subachievement(user, achievement): if hasattr(achievement, 'progressachievement'): return achievement.progressachievement.render(user) if hasattr(achievement, 'taskachievement'): return achievement.taskachievement.render(user) if hasattr(achievement, 'collectionachievement'): return achievement.collectionachievement.render(user)
Python
0
@@ -106,17 +106,16 @@ ttings%0A%0A -%0A register @@ -137,16 +137,70 @@ rary()%0A%0A +# call single_category.html with the given parameters%0A @registe @@ -474,32 +474,81 @@ ts(user)%0A %7D%0A%0A +# call navigation.html with the given parameters%0A @register.inclus @@ -776,16 +776,63 @@ %0A %7D%0A%0A +# call trophies.html with the given parameters%0A @registe @@ -970,16 +970,67 @@ Y_COUNT%0A + # put trophy on the given position in an array%0A for @@ -1151,16 +1151,87 @@ phies%7D%0A%0A +# check type of achievement and return the accordingly render function%0A @registe @@ -1551,32 +1551,32 @@ nachievement'):%0A - return a @@ -1600,28 +1600,56 @@ tionachievement.render(user) +%0A else:%0A return %22%22
66fc121dbe0dbb7a69a62bfdaf98838a4f7a0bf3
Update yeti.py
misp_modules/modules/expansion/yeti.py
misp_modules/modules/expansion/yeti.py
import json import json try: import pyeti except ImportError: print("pyeti module not installed.") misperrors = {'error': 'Error'} mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'], 'output': ['hostname', 'domain', 'ip-src', 'ip-dst', 'url']} # possible module-types: 'expansion', 'hover' or both moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven', 'description': 'Query on yeti', 'module-type': ['expansion', 'hover']}
Python
0
@@ -18,16 +18,17 @@ rt json%0A +%0A try:%0A @@ -513,8 +513,482 @@ ver'%5D%7D%0A%0A +moduleconfig = %5B'apikey', 'url'%5D%0A%0A%0Aclass Yeti:%0A%0A def __init__(self, url, key):%0A self.api = pyeti.YetiApi(url, api_key=key)%0A self.dict = %7B'Ip': 'ip-src', 'Domain': 'domain', 'Hostname': 'hostname'%7D%0A%0A def search(self, value):%0A obs = self.api.observable_search(value=value)%0A if obs:%0A return obs%0A%0A def %0Adef handler(q=False):%0A if q is False:%0A return False%0A request = json.loads(q)%0A attribute = request%5B'attribute'%5D%0A
24124edccd9a822bb300815907c37d6453defed5
Add recursive handling of nested states.
py/statemachines/simple_state_machine_script_test.py
py/statemachines/simple_state_machine_script_test.py
#---------------------------------------------------------------------------------------- # BEGIN: READ_HEXAPOD_CURRENT_POSE # TEMPLATE: ReadTransformState # smach.StateMachine.add('READ_HEXAPOD_CURRENT_POSE', TFListenerState('ur10_1/base', 'hexapod_1/top', 'hexapod_current_pose'), transitions={'succeeded':'MOVE_ABOVE_HEXAPOD_1'}, remapping={'hexapod_current_pose':'hexapod_current_pose'}) # END: READ_HEXAPOD_CURRENT_POSE #---------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------- # BEGIN: MOVE_ABOVE_HEXAPOD_1 # TEMPLATE: CartTrapVelActionState # sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_position_offset = np.asarray([0.0, 0.0, -0.2]) sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_rotation_offset = np.asarray([0.0, 0.0, 0.0]) sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_desired_velocity = 0.1 smach.StateMachine.add('MOVE_ABOVE_HEXAPOD_1', smach_ros.SimpleActionState('/ur10_1/cart_trap_vel_action_server', robot_module.msg.CartTrapVelAction, goal_cb = cart_trap_vel_goal_cb, input_keys=['cart_trap_vel_pose_input', 'cart_trap_vel_position_offset_input', 'cart_trap_vel_rotation_offset_input', 'cart_trap_vel_desired_velocity_input']), transitions={'succeeded':'OPEN_TOOL_EXCHANGE_1'}, remapping={'cart_trap_vel_pose_input':'hexapod_current_pose', 'cart_trap_vel_position_offset_input':'MOVE_ABOVE_HEXAPOD_1_position_offset', 'cart_trap_vel_rotation_offset_input':'MOVE_ABOVE_HEXAPOD_1_rotation_offset', 'cart_trap_vel_desired_velocity_input':'MOVE_ABOVE_HEXAPOD_1_desired_velocity'}) # END: MOVE_ABOVE_HEXAPOD_1 #----------------------------------------------------------------------------------------
Python
0
@@ -2291,28 +2291,1946 @@ --------------------------%0A%0A +#----------------------------------------------------------------------------------------%0A# BEGIN: OPEN_TOOL_EXCHANGE_1%0A# TEMPLATE: SetOutput%0A#%0AOPEN_TOOL_EXCHANGE_1_request = DigitalOutputRequest(TOOL_EXCHANGE_GPIO, TOOL_EXCHANGE_OPEN)%0A%0Asmach.StateMachine.add('OPEN_TOOL_EXCHANGE_1',%0A smach_ros.ServiceState('/ur10_1/set_output',%0A DigitalOutput,%0A request = OPEN_TOOL_EXCHANGE_1_request),%0A transitions=%7B'succeeded':'COUPLE_WITH_HEXAPOD'%7D)%0A# END: OPEN_TOOL_EXCHANGE_1%0A#----------------------------------------------------------------------------------------%0A%0A#----------------------------------------------------------------------------------------%0A# BEGIN: SUB_STATE_1%0A# TEMPLATE: ReadTransformState%0A#%0Asmach.StateMachine.add('SUB_STATE_1', TFListenerState('ur10_2/base', 'hexapod_1/top', 'hexapod_current_pose'),%0A transitions=%7B'succeeded':'SUB_STATE_2'%7D,%0A remapping=%7B'hexapod_current_pose':'hexapod_current_pose'%7D)%0A# END: SUB_STATE_1%0A#----------------------------------------------------------------------------------------%0A%0A#----------------------------------------------------------------------------------------%0A# BEGIN: SUB_STATE_2%0A# TEMPLATE: ReadTransformState%0A#%0Asmach.StateMachine.add('SUB_STATE_2', TFListenerState('ur10_2/base', 'hexapod_1/top', 'hexapod_current_pose'),%0A transitions=%7B'succeeded':'MOVE_ABOVE_HEXAPOD_1'%7D,%0A remapping=%7B'hexapod_current_pose':'hexapod_current_pose'%7D)%0A# END: SUB_STATE_2%0A#----------------------------------------------------------------------------------------%0A%0A
c3dffef7869c0ce19801d78393a336b6b6ecbce7
stop littering /tmp with temporary resource files
pynodegl-utils/pynodegl_utils/tests/cmp_resources.py
pynodegl-utils/pynodegl_utils/tests/cmp_resources.py
#!/usr/bin/env python # # Copyright 2020 GoPro Inc. # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os import csv import tempfile from .cmp import CompareSceneBase, get_test_decorator _COLS = ( 'Textures memory', 'Buffers count', 'Buffers total', 'Blocks count', 'Blocks total', 'Medias count', 'Medias total', 'Textures count', 'Textures total', 'Computes', 'GraphicCfgs', 'Renders', 'RTTs', ) class _CompareResources(CompareSceneBase): def __init__(self, scene_func, columns=_COLS, **kwargs): super().__init__(scene_func, width=320, height=240, **kwargs) # We can't use NamedTemporaryFile because we may not be able to open it # twice on some systems fd, self._csvfile = tempfile.mkstemp(suffix='.csv', prefix='ngl-test-resources-') os.close(fd) self._columns = columns self._hud = 1 self._hud_export_filename = self._csvfile def get_out_data(self, debug=False, debug_func=None): for frame in self.render_frames(): pass # filter columns with open(self._csvfile) as csvfile: reader = csv.DictReader(csvfile) data = [self._columns] for row in reader: data.append([row[k] for k in self._columns]) # rely on base string diff ret = '' for row in data: ret += ','.join(row) + '\n' os.remove(self._csvfile) return ret test_resources = get_test_decorator(_CompareResources)
Python
0.000002
@@ -2179,16 +2179,59 @@ + '%5Cn'%0A%0A + return ret%0A%0A def __del__(self):%0A @@ -2256,36 +2256,16 @@ vfile)%0A%0A - return ret%0A%0A %0Atest_re
deccf656db39ac949f93e562e4f41a32589feb9b
Use a more complex and extendable check for shortcuts in StructuredText
cybox/common/structured_text.py
cybox/common/structured_text.py
# Copyright (c) 2013, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import cybox import cybox.bindings.cybox_common as common_binding class StructuredText(cybox.Entity): _binding = common_binding _namespace = 'http://cybox.mitre.org/common-2' def __init__(self, value=None): self.value = value self.structuring_format = None def to_obj(self, structured_text_obj=None): if not structured_text_obj: text_obj = common_binding.StructuredTextType() else: text_obj = structured_text_obj text_obj.set_valueOf_(self.value) if self.structuring_format is not None: text_obj.set_structuring_format(self.structuring_format) return text_obj def to_dict(self): text_dict = {} text_dict['value'] = self.value text_dict['structuring_format'] = self.structuring_format return text_dict @classmethod def from_obj(cls, text_obj, text_class=None): if not text_obj: return None if not text_class: text = StructuredText() else: text = text_class text.value = text_obj.get_valueOf_() text.structuring_format = text_obj.get_structuring_format() return text @classmethod def from_dict(cls, text_dict, text_class=None): if text_dict is None: return None if not text_class: text = StructuredText() else: text = text_class if not isinstance(text_dict, dict): text.value = text_dict else: text.value = text_dict.get('value') text.structuring_format = text_dict.get('structuring_format') return text
Python
0
@@ -795,16 +795,132 @@ (self):%0A + # Shortcut if structuring_format is not defined.%0A if self.is_plain():%0A return self.value%0A%0A @@ -1067,16 +1067,353 @@ t_dict%0A%0A + def is_plain(self):%0A %22%22%22Whether this can be represented as a string rather than a dictionary%0A%0A Subclasses can override this to include their custom fields in this%0A check:%0A%0A return (super(..., self).is_plain() and self.other_field is None)%0A %22%22%22%0A return (self.structuring_format is None)%0A%0A @cla
67f6738121a5b8197297b69d11584af50ae15b4d
Fix #2702
module/plugins/hoster/OneFichierCom.py
module/plugins/hoster/OneFichierCom.py
# -*- coding: utf-8 -*- import re from module.network.RequestFactory import getURL as get_url from module.plugins.internal.SimpleHoster import SimpleHoster from module.plugins.internal.misc import format_exc class OneFichierCom(SimpleHoster): __name__ = "OneFichierCom" __type__ = "hoster" __version__ = "1.02" __status__ = "testing" __pattern__ = r'https?://(?:www\.)?(?:\w+\.)?(?P<HOST>1fichier\.com|alterupload\.com|cjoint\.net|d(?:es)?fichiers\.com|dl4free\.com|megadl\.fr|mesfichiers\.org|piecejointe\.net|pjointe\.com|tenvoi\.com)(?:/\?\w+)?' __config__ = [("activated" , "bool", "Activated" , True), ("use_premium" , "bool", "Use premium account if available" , True), ("fallback" , "bool", "Fallback to free download if premium fails" , True), ("chk_filesize", "bool", "Check file size" , True), ("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )] __description__ = """1fichier.com hoster plugin""" __license__ = "GPLv3" __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"), ("the-razer", "daniel_ AT gmx DOT net"), ("zoidberg", "zoidberg@mujmail.cz"), ("imclem", None), ("stickell", "l.stickell@yahoo.it"), ("Elrick69", "elrick69[AT]rocketmail[DOT]com"), ("Walter Purcaro", "vuolter@gmail.com"), ("Ludovic Lehmann", "ludo.lehmann@gmail.com"), ("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")] DISPOSITION = False #@TODO: Remove disposition in 0.4.10 URL_REPLACEMENTS = [("http:", "https:")] COOKIES = [("1fichier.com", "LG", "en")] NAME_PATTERN = r'>File\s*Name :</td>\s*<td.*>(?P<N>.+?)<' SIZE_PATTERN = r'>Size :</td>\s*<td.*>(?P<S>[\d.,]+) (?P<U>[\w^_]+)' OFFLINE_PATTERN = r'(?:File not found !\s*<)|(?:>The requested file has been deleted following an abuse request\.<)' LINK_PATTERN = r'<a href="(.+?)".*>Click here to download the file</a>' WAIT_PATTERN = r'>You must wait \d+ minutes' def setup(self): self.multiDL = self.premium self.resume_download = True @classmethod def get_info(cls, url="", html=""): redirect = url for i in xrange(10): try: headers = dict((k.lower(), v) for k,v in re.findall(r'(?P<name>.+?): (?P<value>.+?)\r?\n', get_url(redirect, just_header=True))) if 'location' in headers and headers['location']: redirect = headers['location'] else: if 'content-type' in headers and headers['content-type'] == "application/octet-stream": if "filename=" in headers.get('content-disposition'): name = dict(_i.split("=") for _i in map(str.strip, headers['content-disposition'].split(";"))[1:])['filename'].strip("\"'") else: name = url info = {'name' : name, 'size' : long(headers.get('content-length')), 'status': 7, 'url' : url} else: info = super(OneFichierCom, cls).get_info(url, html) break except Exception, e: print format_exc() info = {'status' : 8, 'error' : e.message} break else: info = {'status' : 8, 'error' : _("Too many redirects")} return info def handle_free(self, pyfile): url, inputs = self.parse_html_form('action="https://1fichier.com/\?[\w^_]+') if not url: return if "pass" in inputs: inputs['pass'] = self.get_password() inputs['dl_no_ssl'] = "on" self.data = self.load(url, post=inputs) m = re.search(self.LINK_PATTERN, self.data) if m: self.link = m.group(1) def handle_premium(self, pyfile): self.download(pyfile.url, post={'did': 0, 'dl_no_ssl': "on"}, disposition=False) #@TODO: Remove disposition in 0.4.10
Python
0
@@ -323,17 +323,17 @@ _ = %221.0 -2 +3 %22%0A __ @@ -1930,24 +1930,28 @@ PATTERN + = r'%3EFile%5Cs* @@ -2000,24 +2000,28 @@ PATTERN + = r'%3ESize :%3C @@ -2085,16 +2085,20 @@ ATTERN + = r'(?:F @@ -2207,24 +2207,28 @@ PATTERN + = r'%3Ca href= @@ -2273,16 +2273,144 @@ ile%3C/a%3E' +%0A TEMP_OFFLINE_PATTERN = r'Warning ! Without subscription, you can only download one file at%7COur services are in maintenance' %0A%0A WA
a2179c0773ad3f09a5d1a6bdced985367ede0cb6
Add ability to upload parent data with case import
corehq/apps/importer/tasks.py
corehq/apps/importer/tasks.py
from celery.task import task from xml.etree import ElementTree from dimagi.utils.parsing import json_format_datetime from corehq.apps.hqcase.utils import submit_case_blocks from corehq.apps.importer.const import LookupErrors import corehq.apps.importer.util as importer_util from corehq.apps.users.models import CouchUser from soil import DownloadBase from casexml.apps.case.tests.util import CaseBlock, CaseBlockError from casexml.apps.case.xml import V2 from dimagi.utils.prime_views import prime_views import uuid POOL_SIZE = 10 PRIME_VIEW_FREQUENCY = 500 @task def bulk_import_async(import_id, config, domain, excel_id): task = bulk_import_async excel_ref = DownloadBase.get(excel_id) spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns) if not spreadsheet: return {'error': 'EXPIRED'} if spreadsheet.has_errors: return {'error': 'HAS_ERRORS'} row_count = spreadsheet.get_num_rows() columns = spreadsheet.get_header_columns() match_count = created_count = too_many_matches = errors = 0 blank_external_ids = [] invalid_dates = [] owner_id_errors = [] prime_offset = 1 # used to prevent back-to-back priming user = CouchUser.get_by_user_id(config.couch_user_id, domain) username = user.username user_id = user._id # keep a cache of id lookup successes to help performance id_cache = {} for i in range(row_count): DownloadBase.set_progress(task, i, row_count) # skip first row if it is a header field if i == 0 and config.named_columns: continue priming_progress = match_count + created_count + prime_offset if priming_progress % PRIME_VIEW_FREQUENCY == 0: prime_views(POOL_SIZE) # increment so we can't possibly prime on next iteration prime_offset += 1 row = spreadsheet.get_row(i) search_id = importer_util.parse_search_id(config, columns, row) if config.search_field == 'external_id' and not search_id: # do not allow blank external id since we save this blank_external_ids.append(i + 1) continue case, error = importer_util.lookup_case( config.search_field, search_id, domain, config.case_type ) try: fields_to_update = importer_util.populate_updated_fields( config, columns, row ) except importer_util.InvalidDateException: invalid_dates.append(i + 1) continue if case: pass elif error == LookupErrors.NotFound: if not config.create_new_cases: continue elif error == LookupErrors.MultipleResults: too_many_matches += 1 continue uploaded_owner_id = fields_to_update.pop('owner_id', None) if uploaded_owner_id: # If an owner_id mapping exists, verify it is a valid user # or case sharing group if importer_util.is_valid_id(uploaded_owner_id, domain, id_cache): owner_id = uploaded_owner_id id_cache[uploaded_owner_id] = True else: owner_id_errors.append(i + 1) id_cache[uploaded_owner_id] = False continue else: # if they didn't supply an owner_id mapping, default to current # user owner_id = user_id external_id = fields_to_update.pop('external_id', None) if not case: id = uuid.uuid4().hex extras = {} if config.search_field == 'external_id': extras['external_id'] = search_id try: caseblock = CaseBlock( create=True, case_id=id, version=V2, user_id=user_id, owner_id=owner_id, case_type=config.case_type, update=fields_to_update, **extras ) submit_case_block(caseblock, domain, username, user_id) created_count += 1 except CaseBlockError: errors += 1 elif case and case.type == config.case_type: extras = {} if external_id: extras['external_id'] = external_id try: caseblock = CaseBlock( create=False, case_id=case._id, owner_id=owner_id, version=V2, update=fields_to_update, **extras ) submit_case_block(caseblock, domain, username, user_id) match_count += 1 except CaseBlockError: errors += 1 return { 'created_count': created_count, 'match_count': match_count, 'too_many_matches': too_many_matches, 'blank_externals': blank_external_ids, 'invalid_dates': invalid_dates, 'owner_id_errors': owner_id_errors, 'errors': errors, } def submit_case_block(caseblock, domain, username, user_id): """ Convert a CaseBlock object to xml and submit for creation/update """ casexml = ElementTree.tostring(caseblock.as_xml(format_datetime=json_format_datetime)) submit_case_blocks(casexml, domain, username, user_id)
Python
0
@@ -3592,16 +3592,678 @@ ', None) +%0A parent_id = fields_to_update.pop('parent_id', None)%0A parent_external_id = fields_to_update.pop('parent_external_id', None)%0A%0A extras = %7B%7D%0A if parent_id:%0A extras%5B'index'%5D = %7B%0A 'parent': (config.case_type, parent_id)%0A %7D%0A elif parent_external_id:%0A parent_case, error = importer_util.lookup_case(%0A 'external_id',%0A parent_external_id,%0A domain,%0A config.case_type%0A )%0A if parent_case:%0A extras%5B'index'%5D = %7B%0A 'parent': (config.case_type, parent_case._id)%0A %7D %0A%0A @@ -4316,32 +4316,8 @@ ex%0A%0A - extras = %7B%7D%0A @@ -4952,24 +4952,24 @@ errors += 1%0A + elif @@ -5013,32 +5013,8 @@ pe:%0A - extras = %7B%7D%0A
1eae87ee4435b4dda35d64295de13756394dbce9
Add GET to 'Allow-Methods' by default. Fixes #12
crossdomain.py
crossdomain.py
#!/usr/bin/env python from datetime import timedelta from flask import make_response, request, current_app from functools import update_wrapper def crossdomain(origin=None, methods=None, headers=None, max_age=21600, attach_to_all=True, automatic_options=True): if methods is not None: methods = ', '.join(sorted(x.upper() for x in methods)) if headers is not None and not isinstance(headers, basestring): headers = ', '.join(x.upper() for x in headers) if not isinstance(origin, basestring): origin = ', '.join(origin) if isinstance(max_age, timedelta): max_age = max_age.total_seconds() def get_methods(): if methods is not None: return methods options_resp = current_app.make_default_options_response() return options_resp.headers['allow'] def decorator(f): def wrapped_function(*args, **kwargs): if automatic_options and request.method == 'OPTIONS': resp = current_app.make_default_options_response() else: resp = make_response(f(*args, **kwargs)) if not attach_to_all and request.method != 'OPTIONS': return resp h = resp.headers h['Access-Control-Allow-Origin'] = origin h['Access-Control-Allow-Methods'] = get_methods() h['Access-Control-Max-Age'] = str(max_age) if headers is not None: h['Access-Control-Allow-Headers'] = headers return resp f.provide_automatic_options = False return update_wrapper(wrapped_function, f) return decorator
Python
0
@@ -177,20 +177,23 @@ methods= -None +%5B'GET'%5D , header
40f839b0189af282a387366127588dfc86eaae40
fix incorrect identation
cosmic_ray/commands/format.py
cosmic_ray/commands/format.py
from cosmic_ray.testing.test_runner import TestOutcome from cosmic_ray.work_record import WorkRecord from cosmic_ray.worker import WorkerOutcome import docopt import json import sys import xml.etree.ElementTree def _print_item(work_record, full_report): data = work_record.data outcome = work_record.worker_outcome if outcome in [WorkerOutcome.NORMAL, WorkerOutcome.EXCEPTION]: outcome = work_record.test_outcome ret_val = [ 'job ID {}:{}:{}'.format( work_record.job_id, outcome, work_record.module), 'command: {}'.format( ' '.join(work_record.command_line) if work_record.command_line is not None else ''), ] if outcome == TestOutcome.KILLED and not full_report: ret_val = [] elif work_record.worker_outcome == WorkerOutcome.TIMEOUT: if full_report: ret_val.append("timeout: {:.3f} sec".format(data)) else: ret_val = [] elif work_record.worker_outcome in [WorkerOutcome.NORMAL, WorkerOutcome.EXCEPTION]: ret_val += data ret_val += work_record.diff # for presentation purposes only if ret_val: ret_val.append('') return ret_val def is_killed(record): if record.worker_outcome == WorkerOutcome.TIMEOUT: return True elif record.worker_outcome == WorkerOutcome.NORMAL: if record.test_outcome == TestOutcome.KILLED: return True return False def create_report(records, show_pending, full_report=False): total_jobs = 0 pending_jobs = 0 kills = 0 for item in records: total_jobs += 1 if item.worker_outcome is None: pending_jobs += 1 if is_killed(item): kills += 1 if (item.worker_outcome is not None) or show_pending: yield from _print_item(item, full_report) completed_jobs = total_jobs - pending_jobs yield 'total jobs: {}'.format(total_jobs) if completed_jobs > 0: yield 'complete: {} ({:.2f}%)'.format( completed_jobs, completed_jobs / total_jobs * 100) yield 'survival rate: {:.2f}%'.format( (1 - kills / completed_jobs) * 100) else: yield 'no jobs completed' def survival_rate(): """cr-rate Usage: cr-rate Read JSON work-records from stdin and print the survival rate. """ records = (WorkRecord(json.loads(line)) for line in sys.stdin) total_jobs = 0 pending_jobs = 0 kills = 0 for item in records: total_jobs += 1 if item.worker_outcome is None: pending_jobs += 1 if is_killed(item): kills += 1 completed_jobs = total_jobs - pending_jobs if not completed_jobs: rate = 0 else: rate = (1 - kills / completed_jobs) * 100 print('{:.2f}'.format(rate)) def report(): """cr-report Usage: cr-report [--full-report] [--show-pending] Print a nicely formatted report of test results and some basic statistics. options: --full-report Show test output and mutation diff for killed mutants --show-pending Display results for incomplete tasks """ arguments = docopt.docopt(report.__doc__, version='cr-format 0.1') full_report = arguments['--full-report'] show_pending = arguments['--show-pending'] records = (WorkRecord(json.loads(line)) for line in sys.stdin) for line in create_report(records, show_pending, full_report): print(line) def create_element_from_item(work_record): data = work_record.data sub_elem = xml.etree.ElementTree.Element('testcase') sub_elem.set('classname', work_record.job_id) sub_elem.set('line', str(work_record.line_number)) sub_elem.set('file', work_record.module) if work_record.command_line: sub_elem.set('name', str(work_record.command_line)) outcome = work_record.worker_outcome if outcome == WorkerOutcome.TIMEOUT: error_elem = xml.etree.ElementTree.SubElement(sub_elem, 'error') error_elem.set('message', "Timeout: {:.3f} sec".format(data)) elif outcome == WorkerOutcome.EXCEPTION: error_elem = xml.etree.ElementTree.SubElement(sub_elem, 'error') error_elem.set('message', "Worker has encountered exception") error_elem.text = str(data) + "\n".join(work_record.diff) elif (outcome == WorkerOutcome.NORMAL and work_record.test_outcome in [TestOutcome.SURVIVED, TestOutcome.INCOMPETENT]): failure_elem = xml.etree.ElementTree.SubElement(sub_elem, 'failure') failure_elem.set('message', "Mutant has survived your unit tests") failure_elem.text = str(data) + "\n".join(work_record.diff) return sub_elem def create_xml_report(records): total_jobs = 0 errors = 0 failed = 0 root_elem = xml.etree.ElementTree.Element('testsuite') for item in records: total_jobs += 1 if item.worker_outcome is None: errors += 1 if is_killed(item): failed += 1 if (item.worker_outcome is not None): subelement = create_element_from_item(item) root_elem.append(subelement) root_elem.set('errors', str(errors)) root_elem.set('failures', str(failed)) root_elem.set('skips', str(0)) root_elem.set('tests', str(total_jobs)) return xml.etree.ElementTree.ElementTree(root_elem) def report_xml(): """cr-xml Usage: cr-xml Print an XML formatted report of test results for continuos integration systems """ records = (WorkRecord(json.loads(line)) for line in sys.stdin) xml_elem = create_xml_report(records) xml_elem.write(sys.stdout.buffer, encoding='utf-8', xml_declaration=True)
Python
0.999995
@@ -4964,28 +4964,24 @@ = 1%0A - if is_killed @@ -4996,28 +4996,24 @@ - failed += 1%0A @@ -5012,24 +5012,16 @@ ed += 1%0A - @@ -5070,24 +5070,16 @@ - subeleme @@ -5114,24 +5114,16 @@ m(item)%0A -
33d1422e87d2b0ba97d05bee65e005ac8ae94029
Fix matching undef behavior property
lib/symbioticpy/symbiotic/property.py
lib/symbioticpy/symbiotic/property.py
#!/usr/bin/python from . exceptions import SymbioticException from os.path import abspath, join class Property: def __init__(self, prpfile = None): self._prpfile = prpfile # property as LTL formulae (if available) self._ltl = [] def memsafety(self): """ Check for memory safety violations """ return False def signedoverflow(self): """ Check for signed integer overflows """ return False def assertions(self): """ Check for assertion violations """ return False def undefinedness(self): """ Check for undefined behavior """ return False def ltl(self): """ Is the property described by a generic LTL formula(e)? """ return False def getPrpFile(self): return self._prpfile def getLTL(self): return self._ltl class PropertyMemSafety(Property): def __init__(self, prpfile = None): Property.__init__(self, prpfile) def memsafety(self): return True class PropertyNoOverflow(Property): def __init__(self, prpfile = None): Property.__init__(self, prpfile) def signedoverflow(self): return True class PropertyDefBehavior(Property): def __init__(self, prpfile = None): Property.__init__(self, prpfile) def undefinedness(self): return True class PropertyUnreachCall(Property): def __init__(self, prpfile = None): Property.__init__(self, prpfile) def assertions(self): return True supported_ltl_properties = { 'CHECK( init(main()), LTL(G ! call(__VERIFIER_error())) )' : 'REACHCALL', 'CHECK( init(main()), LTL(G valid-free) )' : 'MEMSAFETY', 'CHECK( init(main()), LTL(G valid-deref) )' : 'MEMSAFETY', 'CHECK( init(main()), LTL(G valid-memtrack) )' : 'MEMSAFETY', 'CHECK( init(main()), LTL(G ! overflow) )' : 'SIGNED-OVERFLOW', 'CHECK( init(main()), LTL(G def-behavior) )' : 'UNDEF-BEHAVIOR', } supported_properties = { 'valid-deref' : 'MEMSAFETY', 'valid-free' : 'MEMSAFETY', 'valid-memtrack' : 'MEMSAFETY', 'null-deref' : 'NULL-DEREF', 'undefined-behavior' : 'UNDEF-BEHAVIOR', 'undef-behavior' : 'UNDEF-BEHAVIOR', 'undefined' : 'UNDEF-BEHAVIOR', 'signed-overflow' : 'SIGNED-OVERFLOW', 'memsafety' : 'MEMSAFETY', } def _get_prp(prp): from os.path import expanduser, isfile # if property is given in file, read the file epath = abspath(expanduser(prp)) if isfile(epath): prp_list = [] f = open(epath, 'r') for line in f.readlines(): line = line.strip() # ignore empty lines if line: prp_list.append(line) f.close() return (prp_list, epath) # it is not a file, so it is given as a string # FIXME: this does not work for properties given # as LTL (there are spaces) return (prp.split(), None) def _map_property(prps): mapped_prps = [] ltl_prps = [] for prp in prps: prp_key = supported_properties.get(prp) if not prp_key: prp_key = supported_ltl_properties.get(prp) if prp_key: ltl_prps.append(prp) if prp_key: mapped_prps.append(prp_key) else: msg = 'Unknown or unsupported property: {0}\n'.format(prp) msg += 'Supported properties are:\n' for k in supported_ltl_properties.keys(): msg += ' {0}\n'.format(k) msg += "or use shortcuts:\n" for k in supported_properties.keys(): msg += ' {0}\n'.format(k) msg += '\nBy default, we are looking just for assertion violations.\n' raise SymbioticException(msg) return (mapped_prps, ltl_prps) def get_property(symbiotic_dir, prp): if prp is None: prop = PropertyUnreachCall() prop._prpfile = abspath(join(symbiotic_dir, 'specs/PropertyUnreachCall.prp')) return prop prps, prpfile = _get_prp(prp) prps, ltl_prps = _map_property(prps) prop = None if 'REACHCALL' in prps: prop = PropertyUnreachCall(prpfile) if prpfile is None: prop._prpfile = abspath(join(symbiotic_dir, 'specs/PropertyUnreachCall.prp')) elif 'MEMSAFETY' in prps: prop = PropertyMemSafety(prpfile) if prpfile is None: prop._prpfile = abspath(join(symbiotic_dir, 'specs/PropertyMemSafety.prp')) elif 'UNDEF-BEHAVIOR' is prps: prop = PropertyDefBehavior(prpfile) if prpfile is None: prop._prpfile = abspath(join(symbiotic_dir, 'specs/PropertyDefBehavior.prp')) elif 'SIGNED-OVERFLOW' in prps: prop = PropertyNoOverflow(prpfile) if prpfile is None: prop._prpfile = abspath(join(symbiotic_dir, 'specs/PropertyNoOverflow.prp')) if prop: prop._ltl = ltl_prps return prop
Python
0.000002
@@ -4965,17 +4965,17 @@ AVIOR' i -s +n prps:%0A
9ec25b6a5f8400b68c51ce9c5667c8c0c1648521
Remove unneeded catch
cucco/regex.py
cucco/regex.py
#-*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import re """ Regular expression to match URLs as seen on http://daringfireball.net/2010/07/improved_regex_for_matching_urls """ URL_REGEX = re.compile( r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', re.IGNORECASE) """ Regular expression to match email addresses as seen on http://www.wellho.net/resources/ex.php4?item=y115/relib.py """ EMAIL_REGEX = re.compile(r"[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}", re.IGNORECASE) try: EMOJI_REGEX = re.compile(u'([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF])') except re.error: EMOJI_REGEX = re.compile(u'([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF])')
Python
0.000019
@@ -651,17 +651,8 @@ E)%0A%0A -try:%0A EMOJ @@ -708,17 +708,17 @@ (%5B%5CU0001 -f +F 300-%5CU00 @@ -719,17 +719,17 @@ 0-%5CU0001 -f +F 64F%5D)%7C(%5B @@ -734,17 +734,17 @@ (%5B%5CU0001 -f +F 680-%5CU00 @@ -749,156 +749,10 @@ 0001 -f6FF%5D)')%0Aexcept re.error:%0A EMOJI_REGEX = re.compile(u'(%5B%5Cu2600-%5Cu27BF%5D)%7C(%5B%5CuD83C%5D%5B%5CuDF00-%5CuDFFF%5D)%7C(%5B%5CuD83D%5D%5B%5CuDC00-%5CuDE4F%5D)%7C(%5B%5CuD83D%5D%5B%5CuDE80-%5CuDE +F6 FF%5D)
b226a346a5b2402e8bed5386c7a217e81db4a053
Bump to 9.5500.302.
counterpartylib/lib/config.py
counterpartylib/lib/config.py
"""Variables prefixed with `DEFAULT` should be able to be overridden by configuration file and command‐line arguments.""" UNIT = 100000000 # The same across assets. # Versions VERSION_MAJOR = 9 VERSION_MINOR = 5500 VERSION_REVISION = 301 VERSION_STRING = str(VERSION_MAJOR) + '.' + str(VERSION_MINOR) + '.' + str(VERSION_REVISION) # Counterparty protocol TXTYPE_FORMAT = '>I' SHORT_TXTYPE_FORMAT = 'B' TWO_WEEKS = 2 * 7 * 24 * 3600 MAX_EXPIRATION = 4 * 20160 # Two months MEMPOOL_BLOCK_HASH = 'mempool' MEMPOOL_BLOCK_INDEX = 9999999 # SQLite3 MAX_INT = 2**63 - 1 # Bitcoin Core OP_RETURN_MAX_SIZE = 80 # bytes # Currency agnosticism BTC = 'MONA' XCP = 'XMP' BTC_NAME = 'Monacoin' XCP_NAME = 'Monaparty' APP_NAME = XCP_NAME.lower() DEFAULT_RPC_PORT_TESTNET = 14000 DEFAULT_RPC_PORT = 4000 DEFAULT_BACKEND_PORT_TESTNET = 19402 DEFAULT_BACKEND_PORT = 9402 DEFAULT_BACKEND_PORT_TESTNET_BTCD = 18334 DEFAULT_BACKEND_PORT_BTCD = 8334 UNSPENDABLE_TESTNET = 'msVB7uMdzAwgQuph5pL8Zb7aiYgjYoFH1q' UNSPENDABLE_MAINNET = 'MMonapartyMMMMMMMMMMMMMMMMMMMUzGgh' ADDRESSVERSION_TESTNET = b'\x6f' P2SH_ADDRESSVERSION_TESTNET = b'\x75' PRIVATEKEY_VERSION_TESTNET = b'\xef' ADDRESSVERSION_MAINNET = b'\x32' P2SH_ADDRESSVERSION_MAINNET = b'\x37' PRIVATEKEY_VERSION_MAINNET = b'\xb2' MAGIC_BYTES_TESTNET = b'\xfd\xd2\xc8\xf1' # For bip-0010 MAGIC_BYTES_MAINNET = b'\xfb\xc0\xb6\xdb' # For bip-0010 BLOCK_FIRST_TESTNET_TESTCOIN = 79400 BURN_START_TESTNET_TESTCOIN = 79400 BURN_END_TESTNET_TESTCOIN = 1150000 # A long time. BLOCK_FIRST_TESTNET = 79400 BLOCK_FIRST_TESTNET_HASH = '000000001f605ec6ee8d2c0d21bf3d3ded0a31ca837acc98893876213828989d' BURN_START_TESTNET = 79400 BURN_END_TESTNET = 1150000 # A long time. BLOCK_FIRST_MAINNET_TESTCOIN = 1158585 BURN_START_MAINNET_TESTCOIN = 1166000 BURN_END_MAINNET_TESTCOIN = 11500000 # A long time. BLOCK_FIRST_MAINNET = 1158585 BLOCK_FIRST_MAINNET_HASH = '8d347ca2677f41f863bab03d28e13ceed52c6618fba29e6ea3a9e3cf2d190618' BURN_START_MAINNET = 1166000 BURN_END_MAINNET = 1179440 BURN_LIMIT = 3900 # Protocol defaults # NOTE: If the DUST_SIZE constants are changed, they MUST also be changed in counterblockd/lib/config.py as well # TODO: This should be updated, given their new configurability. # TODO: The dust values should be lowered by 90%, once transactions with smaller outputs start confirming faster: <https://github.com/mastercoin-MSC/spec/issues/192> DEFAULT_REGULAR_DUST_SIZE = 54600 # TODO: This is just a guess. I got it down to 5530 satoshis. DEFAULT_MULTISIG_DUST_SIZE = 78000 # <https://bitcointalk.org/index.php?topic=528023.msg7469941#msg7469941> DEFAULT_OP_RETURN_VALUE = 0 DEFAULT_FEE_PER_KB = 100000 # sane/low default, also used as minimum when estimated fee is used ESTIMATE_FEE_PER_KB = False # when True will use `estimatefee` from bitcoind instead of DEFAULT_FEE_PER_KB ESTIMATE_FEE_NBLOCKS = 3 # UI defaults DEFAULT_FEE_FRACTION_REQUIRED = .009 # 0.90% DEFAULT_FEE_FRACTION_PROVIDED = .01 # 1.00% DEFAULT_REQUESTS_TIMEOUT = 20 # 20 seconds DEFAULT_RPC_BATCH_SIZE = 20 # A 1 MB block can hold about 4200 transactions. # Custom exit codes EXITCODE_UPDATE_REQUIRED = 5 DEFAULT_CHECK_ASSET_CONSERVATION = True BACKEND_RAW_TRANSACTIONS_CACHE_SIZE = 20000 BACKEND_RPC_BATCH_NUM_WORKERS = 6 UNDOLOG_MAX_PAST_BLOCKS = 100 #the number of past blocks that we store undolog history DEFAULT_UTXO_LOCKS_MAX_ADDRESSES = 1000 DEFAULT_UTXO_LOCKS_MAX_AGE = 3.0 #in seconds ADDRESS_OPTION_REQUIRE_MEMO = 1 ADDRESS_OPTION_MAX_VALUE = ADDRESS_OPTION_REQUIRE_MEMO # Or list of all the address options # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
Python
0
@@ -239,17 +239,17 @@ ION = 30 -1 +2 %0AVERSION
1c6f53492fc4cdc132769e4ffcfb076557a45c34
Remove English words from Non-English corpus data
modules/preprocessor/emille_preprocessor.py
modules/preprocessor/emille_preprocessor.py
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- """EMILLE Corpus Preprocessor which inherits from BasePreprocessor.""" import regex as re from base_preprocessor import BasePreprocessor from nltk.tokenize import sent_tokenize from bs4 import BeautifulSoup import sys import unicodedata from collections import defaultdict class EmilleCorpusPreprocessor(BasePreprocessor): """Emille Corpus Preprocessor which preprocesses the EMILLE Corpus.""" def __init__( self, corpus_fname, corpus_dir_path='.', encoding='utf-8', language=None, need_preprocessing=False, limit=None ): """Constructor which initializes the BasePreprocessor constructor.""" self.language = language # If language is not specified, regex pattern for split is default '' self.lang_split_sent = defaultdict(lambda : u'') # Specify language specific split regex pattern lang_split_sent = [ ('hindi', u'[।]'), ] # Store language specific regex pattern in the defaultdict for k,v in lang_split_sent: self.lang_split_sent[k] = v super(EmilleCorpusPreprocessor, self).__init__( corpus_fname, corpus_dir_path=corpus_dir_path, encoding=encoding, need_preprocessing=need_preprocessing, limit=limit ) def _extract_corpus_data(self, data): """Extract contents of the 'p' tags which contain the body.""" soup = BeautifulSoup(data) ptags = soup.find_all('p') content =[] for index in range(len(ptags)): content.append( ". ".join(list(ptags[index].strings))) return ". ".join(content) def _clean_word(self, word): """ Preprocess words after tokenizing words from sentences. * Remove punctuations. """ return re.sub( pattern=ur"((\p{P}+)|(\p{S}+)|([0-9]+))", repl='', string=word.lower() ).strip() def _tokenize_sentences(self, data): """ Sentence tokenize corpus. * Sentence Tokenize the corpus using NLTK. * Remove punctuations [ except space ] from each individual sentences. """ lang_specific_split_pattern = self.lang_split_sent[self.language] for generic_sentence_split in sent_tokenize(data): for sentence in re.split( lang_specific_split_pattern, generic_sentence_split ): clean_sentence = sentence.expandtabs().strip() if len(clean_sentence) > 0: yield clean_sentence def _tokenize_words(self, sentence): """Tokenize Words from sentences.""" return sentence.split() BasePreprocessor.register(EmilleCorpusPreprocessor)
Python
0.000083
@@ -571,20 +571,25 @@ anguage= -None +'english' ,%0A @@ -1901,35 +1901,178 @@ -%22%22%22%0A%0A return re.sub( +* Remove English words from Non-English corpus data.%0A %22%22%22%0A if self.language is %22english%22:%0A regex = ur%22((%5Cp%7BP%7D+)%7C(%5Cp%7BS%7D+)%7C(%5B0-9%5D+))%22%0A else: %0A @@ -2084,16 +2084,16 @@ -pattern= +regex = ur%22( @@ -2118,18 +2118,78 @@ (%5B0-9%5D+) -)%22 +%7C(%5BA-Za-z%5D))%22%0A return re.sub(%0A pattern=regex ,%0A
efd96f03d51c1fce3ef370cae88928e16f0b9f17
Parse the response with json
buffer/api.py
buffer/api.py
from rauth import OAuth2Session BASE_URL = 'https://api.bufferapp.com/1/%s' class API(OAuth2Session): ''' Small and clean class that embrace all basic operations with the buffer app ''' def get(self, url): if not self.access_token: raise ValueError('Please set an access token first!') return super(OAuth2Session, self).get(url=BASE_URL % url)
Python
0.999999
@@ -1,8 +1,21 @@ +import json%0A%0A from rau @@ -331,12 +331,16 @@ re -turn +sponse = sup @@ -387,8 +387,49 @@ %25 url)%0A +%0A return json.loads(response.content)%0A
0629183a91046b746d04c1a68e190721a156560b
rename id->fileid (id is a builtin)
build/cook.py
build/cook.py
# # Copyright (c) 2004 Specifix, Inc. # All rights reserved # import recipe import time import files import commit import os import util import sha1helper def cook(repos, cfg, recipeFile): classList = recipe.RecipeLoader(recipeFile) built = [] if recipeFile[0] != "/": raise IOError, "recipe file names must be absolute paths" for (name, recipeClass) in classList.items(): print "Building", name # find the files and ids which were owned by the last version of # this package on the branch fileIdMap = {} fullName = cfg.packagenamespace + "/" + name if repos.hasPackage(fullName): for pkgName in repos.getPackageList(fullName): pkgSet = repos.getPackageSet(pkgName) pkg = pkgSet.getLatestPackage(cfg.defaultbranch) for (id, path, version) in pkg.fileList(): fileIdMap[path] = id ident = IdGen(fileIdMap) srcdirs = [ os.path.dirname(recipeFile), cfg.sourcepath % {'pkgname': name} ] recipeObj = recipeClass(cfg, srcdirs) ourBuildDir = cfg.buildpath + "/" + recipeObj.name recipeObj.setup() recipeObj.unpackSources(ourBuildDir) recipeObj.doBuild(ourBuildDir) rootDir = "/var/tmp/srs/%s-%d" % (recipeObj.name, int(time.time())) util.mkdirChain(rootDir) recipeObj.doInstall(ourBuildDir, rootDir) recipeObj.packages(rootDir) pkgSet = recipeObj.getPackageSet() pkgname = cfg.packagenamespace + "/" + recipeObj.name for (name, buildPkg) in pkgSet.packageSet(): built.append(pkgname + "/" + name) fileList = [] for filePath in buildPkg.keys(): realPath = rootDir + filePath f = files.FileFromFilesystem(realPath, ident(filePath)) fileList.append((f, realPath, filePath)) commit.finalCommit(repos, cfg, pkgname + "/" + name, recipeObj.version, fileList) recipeName = os.path.basename(recipeFile) f = files.FileFromFilesystem(recipeFile, ident(recipeName), type = "src") fileList = [ (f, recipeFile, recipeName) ] for file in recipeObj.allSources(): src = util.findFile(file, srcdirs) srcName = os.path.basename(src) f = files.FileFromFilesystem(src, ident(srcName), type = "src") fileList.append((f, src, srcName)) commit.finalCommit(repos, cfg, pkgname + "/sources", recipeObj.version, fileList) recipeObj.cleanup(ourBuildDir, rootDir) return built class IdGen: def __call__(self, path): if self.map.has_key(path): return self.map[path] return sha1helper.hashString("%s %f %s" % (path, time.time(), self.noise)) def __init__(self, map): # file ids need to be unique. we include the time and path when # we generate them; any data put here is also used uname = os.uname() self.noise = "%s %s" % (uname[1], uname[2]) self.map = map
Python
0
@@ -751,16 +751,20 @@ %0A%09%09for ( +file id, path @@ -817,16 +817,20 @@ path%5D = +file id%0A%0A%09ide
29273b0d7473a1efa955cd35686838780d390106
add more counters
monasca_persister/repositories/persister.py
monasca_persister/repositories/persister.py
# (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import monascastatsd from oslo_log import log from monasca_common.kafka.consumer import KafkaConsumer LOG = log.getLogger(__name__) statsd_client = monascastatsd.Client('monasca.persister', dimensions={'service': 'monitoring', 'component': 'monasca-persister'}) statsd_timer = statsd_client.get_timer() class Persister(object): def __init__(self, kafka_conf, zookeeper_conf, repository): self._data_points = [] self._kafka_topic = kafka_conf.topic self._database_batch_size = kafka_conf.database_batch_size self._consumer = KafkaConsumer( kafka_conf.uri, zookeeper_conf.uri, kafka_conf.zookeeper_path, kafka_conf.group_id, kafka_conf.topic, repartition_callback=self._flush, commit_callback=self._flush, commit_timeout=kafka_conf.max_wait_time_seconds) self.repository = repository() self.statsd_flush_error_count = statsd_client.get_counter('flush.errors') self.statsd_msg_count = statsd_client.get_counter('messages.processed') self.statsd_msg_dropped_count = statsd_client.get_counter('messages.dropped') @statsd_timer.timed("flush.time", sample_rate=0.01) def _flush(self): if not self._data_points: return try: self.repository.write_batch(self._data_points) LOG.info("Processed %d messages from topic %s", len(self._data_points), self._kafka_topic) self._data_points = [] self._consumer.commit() except Exception: LOG.exception("Error writing to database: %s", self._data_points) self.statsd_flush_error_count += 1 raise def run(self): try: for raw_message in self._consumer: message = None try: message = raw_message[1] data_point = self.repository.process_message(message) self._data_points.append(data_point) self.statsd_msg_count += 1 except Exception: LOG.exception('Error processing message. Message is ' 'being dropped. %s', message) self.statsd_msg_dropped_count += 1 if len(self._data_points) >= self._database_batch_size: self._flush() except: LOG.exception( 'Persister encountered fatal exception processing ' 'messages. ' 'Shutting down all threads and exiting') os._exit(1)
Python
0
@@ -961,16 +961,85 @@ timer()%0A +statsd_flush_error_count = statsd_client.get_counter('flush.errors')%0A %0A%0Aclass @@ -1708,35 +1708,27 @@ self.statsd_ -flush_error +msg _count = sta @@ -1755,187 +1755,258 @@ er(' -flush.errors')%0A self.statsd_msg_count = statsd_client.get_counter('messages.processed')%0A self.statsd_msg_dropped_count = statsd_client.get_counter('messages.dropped' +messages.consumed', dimensions=%7B'type': self._kafka_topic%7D)%0A self.statsd_msg_dropped_count = statsd_client.get_counter('messages.dropped',%0A dimensions=%7B'type': self._kafka_topic%7D )%0A%0A @@ -2496,29 +2496,68 @@ -self. +global statsd_flush_error_count%0A statsd_flush
fa1f4a1420f2ea6d66234dae2189a7fb8fdf1f6f
remove debug print
common/lib/xmodule/xmodule/mongo_utils.py
common/lib/xmodule/xmodule/mongo_utils.py
""" Common MongoDB connection functions. """ import logging import pymongo from pymongo import ReadPreference from mongodb_proxy import MongoProxy logger = logging.getLogger(__name__) # pylint: disable=invalid-name # pylint: disable=bad-continuation def connect_to_mongodb( db, host, port=27017, tz_aware=True, user=None, password=None, retry_wait_time=0.1, proxy=True, **kwargs ): """ Returns a MongoDB Database connection, optionally wrapped in a proxy. The proxy handles AutoReconnect errors by retrying read operations, since these exceptions typically indicate a temporary step-down condition for MongoDB. """ # The MongoReplicaSetClient class is deprecated in Mongo 3.x, in favor of using # the MongoClient class for all connections. Update/simplify this code when using # PyMongo 3.x. if kwargs.get('replicaSet'): # Enable reading from secondary nodes in the MongoDB replicaset by using the # MongoReplicaSetClient class. # The 'replicaSet' parameter in kwargs is required for secondary reads. # The read_preference should be set to a proper value, like SECONDARY_PREFERRED. mongo_client_class = pymongo.MongoReplicaSetClient else: # No 'replicaSet' in kwargs - so no secondary reads. mongo_client_class = pymongo.MongoClient # If read_preference is given as a name of a valid ReadPreference.<NAME> constant # such as "SECONDARY_PREFERRED", convert it. Otherwise pass it through unchanged. if 'read_preference' in kwargs: read_preference = getattr(ReadPreference, kwargs['read_preference'], None) if read_preference is not None: kwargs['read_preference'] = read_preference print "host" print host print "port" print port print "tz_aware" print tz_aware print "dict" print dict print "db" print db mongo_conn = pymongo.database.Database( mongo_client_class( host=host, port=port, tz_aware=tz_aware, document_class=dict, **kwargs ), db ) if proxy: mongo_conn = MongoProxy( mongo_conn, wait_time=retry_wait_time ) # default the authSource to be whatever db we are connecting to (for backwards compatiblity) authSource=db if kwargs.get('authSource'): # override if configured to use a different db for auth (e.g. Mongodb Atlas) authSource=kwargs.get('authSource') # If credentials were provided, authenticate the user. if user is not None and password is not None: mongo_conn.authenticate(user, password, authSource) return mongo_conn def create_collection_index( collection, keys, ignore_created=True, ignore_created_opts=True, **kwargs ): """ Create a MongoDB index in a collection. Optionally, ignore errors related to the index already existing. """ # For an explanation of the error codes: # https://github.com/mongodb/mongo/blob/v3.0/src/mongo/db/catalog/index_catalog.cpp#L542-L583 # https://github.com/mongodb/mongo/blob/v3.0/src/mongo/base/error_codes.err#L70-L87 # pylint: disable=invalid-name INDEX_ALREADY_EXISTS = 68 INDEX_OPTIONS_CONFLICT = 85 try: collection.create_index(keys, **kwargs) except pymongo.errors.OperationFailure as exc: errors_to_ignore = [] if ignore_created: errors_to_ignore.append(INDEX_ALREADY_EXISTS) if ignore_created_opts: errors_to_ignore.append(INDEX_OPTIONS_CONFLICT) if exc.code in errors_to_ignore: logger.warning("Existing index in collection '{}' remained unchanged!: {}".format( collection.full_name, exc.details['errmsg']) ) else: raise exc
Python
0.000008
@@ -1732,173 +1732,8 @@ ce%0A%0A - print %22host%22%0A print host%0A print %22port%22%0A print port%0A print %22tz_aware%22%0A print tz_aware%0A print %22dict%22%0A print dict%0A print %22db%22%0A print db%0A%0A
ef1f58b89e9b858c1cb8cca32e3cf982bd8d48f3
fix hash import
meerkat_nest/resources/upload_data.py
meerkat_nest/resources/upload_data.py
""" Data resource for upload data """ from flask_restful import Resource from flask import request, Response from sqlalchemy import create_engine import json import os import uuid import datetime import logging import copy from meerkat_nest import model from meerkat_nest import config from meerkat_nest.util import scramble, validate_request, encrypt from meerkat_nest import message_service db_url = os.environ['MEERKAT_NEST_DB_URL'] engine = create_engine(db_url) class UploadData(Resource): """ Receives JSON data and stores it in Meerkat Nest database Returns:\n HTTP return code\n """ def get(self): return "upload data GET" def post(self): logging.debug("received upload request") logging.debug(str(request.headers)) data_entry = request.get_json() logging.debug(str(data_entry)) # Validate the request try: validate_request(data_entry) except AssertionError as e: msg = "Input was not a valid Meerkat Nest JSON object: " + e.args[0] logging.error(msg) return Response(json.dumps({"message": msg}), status=400, mimetype='application/json') # Upload the data entry in to raw data storage try: uuid_pk = upload_to_raw_data(data_entry) # data_entry['uuid'] = uuid_pk except AssertionError as e: msg = "Raw input type '" + data_entry['content'] + "' is not supported" logging.error(msg) return Response(json.dumps({"message": msg}), status=400, mimetype='application/json') except Exception as e: msg = "Error in uploading data: " + e.args[0] logging.error(msg, exc_info=True) return Response(json.dumps({"message": msg}), status=502, mimetype='application/json') # Process the data entry try: processed_data_entry = process(data_entry) except AssertionError as e: msg = "Data type '" + data_entry['formId'] + "' is not supported for input type '"\ + data_entry['content'] + "'" logging.error(msg) return Response(json.dumps({"message": msg}), status=400, mimetype='application/json') # Store processed data entry in Nest try: store_processed_data(processed_data_entry) except Exception as e: msg = "Error in uploading data: " + e.args[0] logging.error(msg, exc_info=True) return Response(json.dumps({"message": msg}), status=502, mimetype='application/json') # Send processed data forward to the cloud try: sent = message_service.send_data(processed_data_entry) except AssertionError as e: msg = "Error in forwarding data to message queue: " + str(e) logging.error(msg) return Response(json.dumps({"message": msg}), status=502, mimetype='application/json') logging.debug("processed upload request") return Response(json.dumps(processed_data_entry), status=200, mimetype='application/json') def upload_to_raw_data(data_entry): """ Stores raw data in Meerkat Nest database Returns:\n uuid for the PK of the raw data row\n """ uuid_pk = str(uuid.uuid4()) insert_row = None if data_entry['content'] == 'record': insert_row = model.RawDataOdkCollect.__table__.insert().values( uuid=uuid_pk, received_on=datetime.datetime.now(), active_from=datetime.datetime.now(), authentication_token=data_entry['token'], content=data_entry['content'], formId=data_entry['formId'], formVersion=data_entry['formVersion'], data=data_entry['data'] ) assert insert_row is not None, "Content handling not implemented" connection = engine.connect() connection.execute(insert_row) connection.close() return uuid_pk def store_processed_data(data_entry): insert_row = model.data_type_tables[data_entry['formId']].__table__.insert().values( uuid=data_entry['uuid'], data=data_entry['data'] ) try: connection = engine.connect() connection.execute(insert_row) connection.close() return data_entry except Exception: raise def process(data_entry): """ Processes raw data and stores the processed data entry in in Meerkat Nest database Returns:\n processed data_entry if processing was successful, False otherwise """ assert data_entry['content'] in ['form', 'record'], "Content not supported" assert data_entry['formId'] in config.country_config['tables'], "Form not supported" processed_data_entry = restructure_aggregate_data(data_entry) processed_data_entry = scramble_fields(processed_data_entry) processed_data_entry = hash_fields(processed_data_entry) processed_data_entry = format_field_keys(processed_data_entry) country = config.country_config processed_data_entry = format_form_name(processed_data_entry) return processed_data_entry def restructure_aggregate_data(data_entry): """ Restructures data from aggregate JSON feed Returns:\n restructured data entry """ restructured_data = data_entry['data'][0] data_entry['data'] = restructured_data data_entry['uuid'] = data_entry['data']['*meta-instance-id*'] return data_entry def scramble_fields(data_entry): """ Scrambles fields in data entry based on configurations Returns:\n data entry structure with scrambled fields """ data_entry_scrambled = data_entry fields = config.country_config.get('scramble_fields', {}).get(data_entry['formId'], {}) for field in fields: if field in data_entry_scrambled['data'].keys(): data_entry_scrambled['data'][field] = scramble(data_entry_scrambled['data'][field]) return data_entry_scrambled def hash_fields(data_entry): """ Hashes fields in data entry based on configurations Returns:\n data entry structure with encrypted fields """ data_entry_hashed = data_entry fields = config.country_config.get('hash_fields', {}).get(data_entry['formId'], {}) for field in fields: if field in data_entry_hashed['data'].keys(): data_entry_hashed['data'][field] = encrypt(data_entry_hashed['data'][field]) return data_entry_hashed def format_field_keys(data_entry): """ Formats the field names in the data entry Returns:\n data entry structure with formatted field namess """ rename_fields = config.country_config.get('rename_fields', {}).get(data_entry['formId'], {}) character_replacements = config.country_config.get('replace_characters', {}).get(data_entry['formId'], []) data_fields = data_entry['data'].keys() # Perform character replacements to all fields for characters in character_replacements: for key in data_fields: if characters[0] in key: data_entry['data'][key.replace(characters[0],characters[1])] = data_entry['data'][key] data_entry['data'].pop(key) # Perform key replacements for key in rename_fields: if key in data_fields: data_entry['data'][rename_fields[key]] = data_entry['data'][key] data_entry['data'].pop(key) return data_entry def format_form_name(data_entry): """ Formats the form name of the data entry Returns:\n data entry structure with formatted form name """ rename_form = config.country_config.get('rename_forms', {}).get(data_entry['formId'], None) if rename_form: data_entry['formId'] = rename_form return data_entry
Python
0.000045
@@ -338,23 +338,20 @@ equest, -encrypt +hash %0Afrom me
1e041c55c7311808a2253ea8aa556db6a3dededc
Some string
clojure/lang/Seqs.py
clojure/lang/Seqs.py
import RT import Util from interfaces import * class ASeq(Obj, ISeq, Sequential, IHashEq): # Note: Java version implements java.util.List & java.io.Serializable also def __init__(self, meta=None): Obj.__init__(self, meta) self._hash = -1 self._hasheq = -1 def empty(self): return PersistentList.EMPTY def equiv(self, obj): if not isinstance(obj, Sequential): # Note: Java version supports List here also return False ms = RT.seq(obj) s = self.seq() while s is not None: if ms is None or not Util.equiv(s.first(), ms.first()): return False s = s.next() ms = ms.next() return ms is None def equals(self, obj): # TODO: This is java - what about python? if not isinstance(obj, Sequential): # Note: Java version supports List here also return False ms = RT.seq(obj) s = self.seq() while s is not None: if ms is None or not Util.equals(s.first(), ms.first()): return False s = s.next() ms = ms.next() return ms is None def hashCode(self): # TODO: This is java - what about python? if self._hash == -1: rhash = 1 s = self.seq() while s is not None: if s.first() is None: hval = 0 else: hval = s.first().hashCode() rhash = 31 * rhash + hval s = s.next() self._hash = rhash return self._hash def hasheq(self): if self._hasheq == -1: rhash = 1 s = self.seq() while s is not None: rhash = 31 * rhash + Util.hasheq(s.first()) s = s.next() self._hasheq = rhash return self._hasheq def count(self): i = 1 s = self.seq() while s is not None: if isinstance(s, Counted): return i + s.count() s = s.next() i += 1 return i def seq(self): return self def cons(self, o): return Cons(o, self) def more(self): s = self.next() if s is None: return PersistentList.EMPTY return s class Cons(ASeq): # TODO: Java implements Serializable also def __init__(self, a1, a2, a3=None): # To keep the overloading of the Java version I've got to have an optional first argument! if a3 is None: ASeq.__init__(self) self._first = a1 self._more = a2 else: ASeq.__init__(self, a1) self._first = a2 self._more = a3 def first(self): return self._first def next(self): return self.more().seq() def more(self): if self._more is None: return PersistentList.EMPTY return self._more def count(self): return 1 + RT.count(self._more) def withMeta(self, meta): return Cons(meta, self._first, self._more) class PersistentList(ASeq, IReduce, Counted): # TODO: Java implements List also def __init__(self, a, b=None, c=None, d=None): # To keep the overloading of the Java version I've got to have an optional first argument! if b is None: ASeq.__init__(self) self._first = a self._rest = None self._count = 1 else: ASeq.__init__(self, a) self._first = b self._rest = c self._count = d @classmethod def create(cls, init): ret = PersistentList.EMPTY for i in reversed(init): ret = ret.cons(i) return ret def first(self): return self._first def next(self): return self._rest def peek(self): return self.first() def pop(self): if self._rest is None: return PersistentList.EMPTY.withMeta(self._meta) return self._rest def count(self): return self._count def cons(self, o): return PersistentList(self.meta(), o, self, self._count + 1) def empty(self): return PersistentList.EMPTY.withMeta(self.meta()) def withMeta(self, meta): if meta != self._meta: return PersistentList(meta, self._first, self._rest, self._count) return self def reduce(self, f, start=None): if start is None: ret = self.first() else: ret = f.invoke(start, self.first()) s = self.next() while s is not None: ret = f.invoke(ret, s.first()) s = s.next() return ret def __iter__(self): class ListIterator: def __init__(self, lst): self.lst = lst def next(self): if self.lst is None: raise StopIteration head = self.lst.first() self.lst = self.lst.next() return head return ListIterator(self) def _inner_str(self): if self._rest is None: return self._first.__str__() return self._first.__str__() + " " + self._rest._inner_str() def __str__(self): return "(" + self._inner_str() + ")" class EmptyList(Obj, IPersistentList, ISeq, Counted): # TODO: Java is also List def hashCode(self): return 1 def equals(self, o): return isinstance(o, Sequential) and RT.seq(o) is None # TODO: Java is also List def equiv(self, o): return self.equals(o) def __init__(self, meta=None): Obj.__init__(self, meta) def first(self): return None def next(self): return None def more(self): return self def cons(self, o): return PersistentList(self.meta(), o, None, 1) def empty(self): return self def withMeta(self, meta): if meta != self.meta(): return PersistentList.EmptyList(meta) return self def peek(self): return None def pop(self): # TODO: Throw better error raise Exception # throw new IllegalStateException("Can't pop empty list"); def count(self): return 0 def seq(self): return None def __iter__(self): return [].__iter__() class creator(RestFn): # TODO: static????? def getRequiredArity(self): return 0 def doInvoke(self, *args): # if isinstance(args, ArraySeq): # TODO: When ArraySeq defined implement this optimisation # Object[] argsarray = (Object[]) ((ArraySeq) args).array; # IPersistentList ret = EMPTY; # for(int i = argsarray.length - 1; i >= 0; --i) # ret = (IPersistentList) ret.cons(argsarray[i]); # return ret; return PersistentList.create(args) def withMeta(self, meta): # TODO: Throw better error raise Exception # throw new UnsupportedOperationException() def meta(self): return None PersistentList.creator = creator PersistentList.EMPTY = PersistentList.EmptyList()
Python
0.999999
@@ -3168,16 +3168,118 @@ _more)%0A%0A + def __str__(self):%0A return self.first().__str__() + self.next().__str__() # TODO: better%0A%0A %0Aclass P
a9ff2da085738770e1b3c03162f79454851df3b8
Fix issue #144, getting wrong field name for harakiri_count
newrelic_plugin_agent/plugins/uwsgi.py
newrelic_plugin_agent/plugins/uwsgi.py
""" uWSGI """ import json import logging from newrelic_plugin_agent.plugins import base LOGGER = logging.getLogger(__name__) class uWSGI(base.SocketStatsPlugin): GUID = 'com.meetme.newrelic_uwsgi_agent' DEFAULT_HOST = 'localhost' DEFAULT_PORT = 1717 def add_datapoints(self, stats): """Add all of the data points for a node :param dict stats: all of the nodes """ self.add_gauge_value('Listen Queue Size', '', stats.get('listen_queue', 0)) self.add_gauge_value('Listen Queue Errors', '', stats.get('listen_queue_errors', 0)) for lock in stats.get('locks', list()): lock_name = lock.keys()[0] self.add_gauge_value('Locks/%s' % lock_name, '', lock[lock_name]) exceptions = 0 harakiris = 0 requests = 0 respawns = 0 signals = 0 apps = dict() for worker in stats.get('workers', list()): id = worker['id'] # totals exceptions += worker.get('exceptions', 0) harakiris += worker.get('harakiris', 0) requests += worker.get('requests', 0) respawns += worker.get('respawns', 0) signals += worker.get('signals', 0) # Add the per worker self.add_derive_value('Worker/%s/Exceptions' % id, '', worker.get('exceptions', 0)) self.add_derive_value('Worker/%s/Harakiri' % id, '', worker.get('harakiri_count', 0)) self.add_derive_value('Worker/%s/Requests' % id, '', worker.get('requests', 0)) self.add_derive_value('Worker/%s/Respawns' % id, '', worker.get('respawn_count', 0)) self.add_derive_value('Worker/%s/Signals' % id, '', worker.get('signals', 0)) for app in worker['apps']: if app['id'] not in apps: apps[app['id']] = {'exceptions': 0, 'requests': 0} apps[app['id']]['exceptions'] += app['exceptions'] apps[app['id']]['requests'] += app['requests'] for app in apps: self.add_derive_value('Application/%s/Exceptions' % app, '', apps[app].get('exceptions', 0)) self.add_derive_value('Application/%s/Requests' % app, '', apps[app].get('requests', 0)) self.add_derive_value('Summary/Applications', '', len(apps)) self.add_derive_value('Summary/Exceptions', '', exceptions) self.add_derive_value('Summary/Harakiris', '', harakiris) self.add_derive_value('Summary/Requests', '', requests) self.add_derive_value('Summary/Respawns', '', respawns) self.add_derive_value('Summary/Signals', '', signals) self.add_derive_value('Summary/Workers', '', len(stats.get('workers', ()))) def fetch_data(self, connection): """Read the data from the socket :param socket connection: The connection :return: dict """ data = super(uWSGI, self).fetch_data(connection, read_till_empty=True) if data: return json.loads(data) return {}
Python
0
@@ -1142,17 +1142,22 @@ harakiri -s +_count ', 0)%0A
fac6011b310e8632ceaf3eccbc3123dd5af7687a
fix path to save arrays; add tsdiff arrays
nipy/algorithms/diagnostics/screens.py
nipy/algorithms/diagnostics/screens.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Diagnostic 4d image screen ''' from os.path import join as pjoin import numpy as np from ...core.api import Image, drop_io_dim, append_io_dim from ...io.api import save_image from ..utils import pca from .timediff import time_slice_diffs from .tsdiffplot import plot_tsdiffs def screen(img4d, ncomp=10): ''' Diagnostic screen for 4d FMRI image Includes PCA, tsdiffana and mean, std, min, max images. Parameters ---------- img4d : ``Image`` 4d image file ncomp : int, optional number of component images to return. Default is 10 Returns ------- screen : dict with keys: * mean : mean image (all summaries are over last dimension) * std : standard deviation image * max : image of max * min : min * pca : 4D image of PCA component images * pca_res : dict of results from PCA * ts_res : dict of results from tsdiffana Examples -------- >>> import nipy as ni >>> from nipy.testing import funcfile >>> img = ni.load_image(funcfile) >>> screen_res = screen(img) >>> screen_res['mean'].ndim 3 >>> screen_res['pca'].ndim 4 ''' if img4d.ndim != 4: raise ValueError('Expecting a 4d image') data = img4d.get_data() cmap = img4d.coordmap cmap_3d = drop_io_dim(cmap, 't') screen_res = {} # standard processed images screen_res['mean'] = Image(np.mean(data, axis=-1), cmap_3d) screen_res['std'] = Image(np.std(data, axis=-1), cmap_3d) screen_res['max'] = Image(np.max(data, axis=-1), cmap_3d) screen_res['min'] = Image(np.min(data, axis=-1), cmap_3d) # PCA screen_res['pca_res'] = pca.pca(data, axis=-1, standardize=False, ncomp=ncomp) cmap_4d = append_io_dim(cmap_3d, 'l' , 't') screen_res['pca'] = Image(screen_res['pca_res']['basis_projections'], cmap_4d) # tsdiffana screen_res['ts_res'] = time_slice_diffs(data) return screen_res def write_screen_res(res, out_path, out_root, out_img_ext='.nii', pcnt_var_thresh=0.1): ''' Write results from ``screen`` to disk as images Parameters ---------- res : dict output from ``screen`` function out_path : str directory to which to write output images out_root : str part of filename between image-specific prefix and image-specific extension to use for writing images out_img_ext : str, optional extension (identifying image type) to which to write volume images. Default is '.nii' pcnt_var_thresh : float, optional threshold below which we do not plot percent variance explained by components; default is 0.1. This removes the long tail from percent variance plots. Returns ------- None ''' import matplotlib.pyplot as plt # save volume images for key in ('mean', 'min', 'max', 'std', 'pca'): fname = pjoin(out_path, '%s_%s%s' % (key, out_root, out_img_ext)) save_image(res[key], fname) # plot, save component time courses ncomp = res['pca'].shape[-1] vectors = res['pca_res']['basis_vectors'] pcnt_var = res['pca_res']['pcnt_var'] np.savez('vectors_components_%s.npz' % out_root, basis_vectors=vectors, pcnt_var=pcnt_var) plt.figure() for c in range(ncomp): plt.subplot(ncomp, 1, c+1) plt.plot(vectors[:,c]) plt.axis('tight') plt.suptitle(out_root + ': PCA basis vectors') plt.savefig(pjoin(out_path, 'components_%s.png' % out_root)) # plot percent variance plt.figure() plt.plot(pcnt_var[pcnt_var >= pcnt_var_thresh]) plt.axis('tight') plt.suptitle(out_root + ': PCA percent variance') plt.savefig(pjoin(out_path, 'pcnt_var_%s.png' % out_root)) # plot tsdiffana plt.figure() axes = [plt.subplot(4, 1, i+1) for i in range(4)] plot_tsdiffs(res['ts_res'], axes) plt.suptitle(out_root + ': tsdiffana') plt.savefig(pjoin(out_path, 'tsdiff_%s.png' % out_root))
Python
0
@@ -3421,16 +3421,41 @@ courses + and some tsdiffana stuff %0A nco @@ -3581,16 +3581,32 @@ p.savez( +pjoin(out_path, 'vectors @@ -3627,32 +3627,33 @@ .npz' %25 out_root +) ,%0A b @@ -3704,16 +3704,152 @@ pcnt_var +,%0A volume_means=res%5B'ts_res'%5D%5B'volume_means'%5D,%0A slice_mean_diff2=res%5B'ts_res'%5D%5B'slice_mean_diff2'%5D,%0A )%0A pl
7d4c6dbd282ea4b8d1a7d4f4a784b972163dbc7f
Fix the fix
datastore/services/bulk_sync.py
datastore/services/bulk_sync.py
import uuid import csv import logging import traceback try: from StringIO import StringIO except ImportError: from io import StringIO from django.db import connection def success_response(): return ({ "status": "success" }, 200) def error_response(): return ({ "status": "error" }, 400) def bulk_sync(records, fields, model_class, keys): """ Upsert data for the given `model_class` using a temporary table. Parameters ---------- records: list of dicts with data to insert Example: [{ start: 2016-01-01, value: 10.0, project_id: 1 }] fields: list of fields expected to import Example: ['start', 'value', 'project_id'] model_class: Django model class keys: primary or composite key Examples: ['id'] ['start', 'project_id'] """ if records is None or len(records) == 0: return success_response() # Error out if missing fields def valid_record(record): for field in fields: if field not in record: return False return True for record in records: if not valid_record(record): return error_response() # Build schema from field names schema = [ { 'name': field, 'type': model_class._meta.get_field(field).db_type(connection) } for field in fields ] # Bulk upsert, using (start, metadata_id) as primary key cursor = connection.cursor() # Create temporary table tmp_id = str(uuid.uuid4()).translate(None, '-') tablename = model_class._meta.db_table tmp_tablename = "tmp_" + tablename + "_" + tmp_id schema_statement = ",".join([ column['name'] + " " + column['type'] for column in schema ]) create_tmp_table_statement = """ CREATE TEMPORARY TABLE {tmp_tablename}({schema_statement}); """.format(tmp_tablename=tmp_tablename, schema_statement=schema_statement) # Write the request data to an in-memory CSV file for a subsequent Postgres COPY infile = StringIO.StringIO() fieldnames = records[0].keys() writer = csv.DictWriter(infile, fieldnames=fieldnames) for record in records: writer.writerow(record) infile.seek(0) # Build SQL statement for upsert from temporary table to real table update_schema_statement = ",".join([ "{name} = {tmp_tablename}.{name}".format(name=column['name'], tmp_tablename=tmp_tablename) for column in schema ]) insert_columns = ",".join([ column['name'] for column in schema ]) insert_schema_statement = ",".join([ "{tmp_tablename}.{name}".format(name=column['name'], tmp_tablename=tmp_tablename) for column in schema ]) key_statement = " AND ".join([ "{tablename}.{key} = {tmp_tablename}.{key}".format(tablename=tablename, tmp_tablename=tmp_tablename, key=key) for key in keys ]) where_statement = " AND ".join([ "{tablename}.{key} IS NULL".format(tablename=tablename, key=key) for key in keys ]) upsert_statement = """ UPDATE {tablename} SET {update_schema_statement} FROM {tmp_tablename} WHERE {key_statement}; INSERT INTO {tablename}({insert_columns}) SELECT {insert_schema_statement} FROM {tmp_tablename} LEFT OUTER JOIN {tablename} ON {key_statement} WHERE {where_statement}; """.format(tablename=tablename, tmp_tablename=tmp_tablename, key_statement=key_statement, where_statement=where_statement, update_schema_statement=update_schema_statement, insert_columns=insert_columns, insert_schema_statement=insert_schema_statement) response = success_response() try: # Create the temporary table cursor.execute(create_tmp_table_statement) # Load data into temporary table from CSV cursor.copy_from(file=infile, table=tmp_tablename, sep=',', columns=fieldnames) # Upsert it into the actual table cursor.execute(upsert_statement) except: # Log exception logging.error(traceback.print_exc()) response = error_response() finally: cursor.close() return response
Python
0.967081
@@ -2144,17 +2144,8 @@ e = -StringIO. Stri
4a83439926181f26e4656d2a2b78021209d3b629
fix the dropout to 0.2 because that is what they use
code/nolearntrail.py
code/nolearntrail.py
from nolearn.dbn import DBN from readfacedatabases import * from sklearn import cross_validation from sklearn.metrics import zero_one_score from sklearn.metrics import classification_report import argparse import numpy as np from common import * parser = argparse.ArgumentParser(description='nolearn test') parser.add_argument('--equalize',dest='equalize',action='store_true', default=False, help="if true, the input images are equalized before being fed into the net") parser.add_argument('--maxEpochs', type=int, default=1000, help='the maximum number of supervised epochs') args = parser.parse_args() def KanadeClassifier(): clf = DBN( [1200, 1500, 1500, 1500, 7], learn_rates=0.01, learn_rates_pretrain=0.05, learn_rate_decays=0.9, use_re_lu=True, nesterov=True, momentum=0.95, dropouts=[0.8, 0.5, 0.5, 0.5], real_valued_vis=True, minibatch_size=20, epochs=args.maxEpochs, verbose=False) data, labels = readKanade(False, None, equalize=args.equalize) data = scale(data) data, labels = shuffle(data, labels) labels = np.argmax(labels, axis=1) # Split data for training and testing kf = cross_validation.KFold(n=len(data), n_folds=5) for train, test in kf: break trainData = data[train] trainLabels = labels[train] testData = data[test] testLabels = labels[test] clf.fit(trainData, trainLabels) predictedLabels = clf.predict(testData) print "testLabels" print testLabels print predictedLabels print "Accuracy:", zero_one_score(testLabels, predictedLabels) print "Classification report:" print classification_report(testLabels, predictedLabels) if __name__ == '__main__': KanadeClassifier()
Python
0.002569
@@ -888,9 +888,9 @@ =%5B0. -8 +2 , 0.
ddc571f32212a57f725101314878d17df9124bb8
fix loop range
commands/cmd_roll.py
commands/cmd_roll.py
import random from lib.command import Command class RollCommand(Command): name = 'roll' description = 'Roll some dice.' def run(self, message, args): if not args: self.reply(message, 'No roll specification supplied. Try */roll 3d6*.', parse_mode='Markdown') return spec = ''.join(char for char in ''.join(args) if char.isdigit() or char == 'd') dice_count, __, dice_size = spec.partition('d') if not dice_count or not dice_size: self.reply(message, 'Invalid roll specification. Example: */roll 3d6*', parse_mode='Markdown') return dice_count = int(''.join(char for char in dice_count if char.isdigit())) dice_size = int(''.join(char for char in dice_size if char.isdigit())) if dice_count < 1 or dice_count > 64 or dice_size < 4 or dice_size > 128: self.reply(message, 'Invalid roll specification. Must be a minimum of *1d4* and a maximum of *64d128*', parse_mode='Markdown') return rolls = [random.SystemRandom().randint(1, dice_size) for _ in dice_count] self.reply(message, '[{0}] = {1}'.format(', '.join(map(str, rolls)), sum(rolls)))
Python
0.000001
@@ -1111,24 +1111,30 @@ e) for _ in +range( dice_count%5D%0A @@ -1131,16 +1131,17 @@ ce_count +) %5D%0A
f92c8c9620524d0414af6f039885c2875a247cd0
add msrest dependency (#7062)
sdk/appconfiguration/azure-appconfiguration/setup.py
sdk/appconfiguration/azure-appconfiguration/setup.py
#!/usr/bin/env python #------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- import sys import re import os.path from io import open from setuptools import find_packages, setup # Change the PACKAGE_NAME only to change folder and different name PACKAGE_NAME = "azure-appconfiguration" PACKAGE_PPRINT_NAME = "App Configuration Data" # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace('-', '/') # a-b-c => a.b.c namespace_name = PACKAGE_NAME.replace('-', '.') # azure v0.x is not compatible with this package # azure v0.x used to have a __version__ attribute (newer versions don't) try: import azure try: ver = azure.__version__ raise Exception( 'This package is incompatible with azure=={}. '.format(ver) + 'Uninstall it with "pip uninstall azure".' ) except AttributeError: pass except ImportError: pass # Version extraction inspired from 'requests' with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd: version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) if not version: raise RuntimeError('Cannot find version information') with open('README.md', encoding='utf-8') as f: readme = f.read() with open('HISTORY.md', encoding='utf-8') as f: history = f.read() exclude_packages = [ 'tests', 'examples', # Exclude packages that will be covered by PEP420 or nspkg 'azure', ] if sys.version_info < (3, 5, 3): exclude_packages.extend([ '*.aio', '*.aio.*' ]) setup( name=PACKAGE_NAME, version=version, description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME), long_description=readme + '\n\n' + history, long_description_content_type='text/markdown', license='MIT License', author='Microsoft Corporation', author_email='azpysdkhelp@microsoft.com', url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration', classifiers=[ 'Development Status :: 3 - Alpha', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: MIT License', ], zip_safe=False, packages=find_packages(exclude=exclude_packages), install_requires=[ "azure-core<2.0.0,>=1.0.0b2", ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], ":python_version<'3.4'": ['enum34>=1.0.4'], ":python_version<'3.5'": ['typing'], "async:python_version>='3.5'": [ 'aiohttp>=3.0', 'aiodns>=2.0' ], } )
Python
0
@@ -2842,16 +2842,49 @@ quires=%5B + %0A %22msrest%3E=0.5.0%22, %0A
fe06c3a839bdc13384250924a4a30d9dd3455fc7
fix archive resource unit test
service/test/unit/resources/test_archive_resource.py
service/test/unit/resources/test_archive_resource.py
import unittest import json from mockito import mock, when, verify from test.unit.resources import DummySite from twisted.web.test.requesthelper import DummyRequest from pixelated.resources.mails_resource import MailsArchiveResource from twisted.internet import defer class TestArchiveResource(unittest.TestCase): def setUp(self): self.mail_service = mock() self.web = DummySite(MailsArchiveResource(self.mail_service)) def test_render_POST_should_archive_mails(self): request = DummyRequest(['/mails/archive']) request.method = 'POST' content = mock() when(content).read().thenReturn(json.dumps({'idents': ['1', '2']})) when(self.mail_service).archive_mail('1').thenReturn(defer.Deferred()) when(self.mail_service).archive_mail('2').thenReturn(defer.Deferred()) request.content = content d = self.web.get(request) def assert_response(_): verify(self.mail_service).archive_mail('1') verify(self.mail_service).archive_mail('2') d.addCallback(assert_response) return d
Python
0
@@ -1,12 +1,31 @@ +from twisted.trial import unitt @@ -591,16 +591,44 @@ 'POST'%0A + idents = %5B'1', '2'%5D%0A @@ -636,32 +636,32 @@ ontent = mock()%0A - when(con @@ -717,24 +717,80 @@ ', '2'%5D%7D))%0A%0A + d1 = defer.Deferred()%0A d1.callback(None)%0A when @@ -839,31 +839,73 @@ Return(d -efer.Deferred() +1)%0A d2 = defer.Deferred()%0A d2.callback(None )%0A @@ -960,31 +960,17 @@ Return(d -efer.Deferred() +2 )%0A%0A
6ecae8f97723b90193bc64e53f8dcee22c3cbf55
add tag to return settings values to django template.
odm2admin/templatetags/admin_extras.py
odm2admin/templatetags/admin_extras.py
# this came from https://djangosnippets.org/snippets/2196/ # adds a collect tag for templates so you can build lists from django import template from django.contrib import admin from django.contrib.gis.geos import GEOSGeometry from django.core.management import settings register = template.Library() @register.tag def collect(token): bits = list(token.split_contents()) if len(bits) > 3 and bits[-2] == 'as': varname = bits[-1] items = bits[1:-2] return CollectNode(items, varname) else: raise template.TemplateSyntaxError('%r expected format is "item [item ...] as varname"' % bits[0]) class CollectNode(template.Node): def __init__(self, items, varname): self.items = map(template.Variable, items) self.varname = varname def render(self, context): context[self.varname] = [i.resolve(context) for i in self.items] return '' class AssignNode(template.Node): def __init__(self, name, value): self.name = name self.value = value def render(self, context): context[self.name] = self.value.resolve(context, True) return '' def do_assign(parser, token): """ Assign an expression to a variable in the current context. Syntax:: {% assign [name] [value] %} Example:: {% assign list entry.get_related %} """ bits = token.contents.split() if len(bits) != 3: raise template.TemplateSyntaxError("'%s' tag takes two arguments" % bits[0]) value = parser.compile_filter(bits[2]) return AssignNode(bits[1], value) register = template.Library() register.tag('assign', do_assign) # Extra template tags for map @register.filter() def get_lat_lng(value, gc): lat = GEOSGeometry(value).coords[1] lon = GEOSGeometry(value).coords[0] if gc == 'lat': return "{}".format(lat) elif gc == 'lon': return "{}".format(lon) @register.filter() def filter_coords(value): sites = list() for site in value: lat = GEOSGeometry(site.featuregeometry).coords[1] lon = GEOSGeometry(site.featuregeometry).coords[0] if lat != 0 and lon != 0: sites.append(site) return sites @register.filter() def get_title(value, short): if value == 'site_title': return admin.site.site_title elif value == 'site_header': return admin.site.site_header elif value == 'shortcut_title': return settings.ADMIN_SHORTCUTS[0]['shortcuts'][short]['title'] @register.filter() def in_field(value): val = value.split(" ") return val[0] # https://stackoverflow.com/questions/771890/how-do-i-get-the-class-of-a-object-within-a-django-template @register.filter(name='get_class') def get_class(value): return value.__class__.__name__
Python
0
@@ -2640,16 +2640,121 @@ al%5B0%5D%0A%0A%0A +%0A# settings value%0A@register.simple_tag%0Adef settings_value(name):%0A return getattr(settings, name, %22%22)%0A%0A # https:
4036e62862495dbd41da818af3fa5811dab2b0f6
stop doing cd for Library Checker
onlinejudge/service/library_checker.py
onlinejudge/service/library_checker.py
# Python Version: 3.x """ the module for yosupo's Library Checker (https://judge.yosupo.jp) """ import os import pathlib import re import subprocess import sys import urllib.parse from typing import * import requests import toml import onlinejudge._implementation.logging as log import onlinejudge._implementation.testcase_zipper import onlinejudge._implementation.utils as utils import onlinejudge.type from onlinejudge.type import TestCase class LibraryCheckerService(onlinejudge.type.Service): def get_url(self) -> str: return 'https://judge.yosupo.jp/' def get_name(self) -> str: return 'Library Checker' @classmethod def from_url(cls, url: str) -> Optional['LibraryCheckerService']: # example: https://judge.yosupo.jp/ result = urllib.parse.urlparse(url) if result.scheme in ('', 'http', 'https') \ and result.netloc == 'judge.yosupo.jp': return cls() return None @classmethod def _get_cloned_repository_path(cls) -> pathlib.Path: return utils.user_cache_dir / 'library-checker-problems' is_repository_updated = False @classmethod def _update_cloned_repository(cls) -> None: if cls.is_repository_updated: return try: subprocess.check_call(['git', '--version'], stdout=sys.stdout, stderr=sys.stderr) except FileNotFoundError: log.error('git command not found') raise path = LibraryCheckerService._get_cloned_repository_path() if not path.exists(): # init the problem repository url = 'https://github.com/yosupo06/library-checker-problems' log.status('$ git clone %s %s', url, path) subprocess.check_call(['git', 'clone', url, str(path)], stdout=sys.stdout, stderr=sys.stderr) else: # sync the problem repository log.status('$ git --git-dir %s pull', str(path / '.git')) subprocess.check_call(['git', '--git-dir', str(path / '.git'), 'pull'], stdout=sys.stdout, stderr=sys.stderr) cls.is_repository_updated = True class LibraryCheckerProblem(onlinejudge.type.Problem): def __init__(self, *, problem_id: str): self.problem_id = problem_id def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]: self._generate_test_cases_in_cloned_repository() path = self._get_problem_directory_path() files = [] # type: List[Tuple[str, bytes]] files += [(file.name, file.read_bytes()) for file in path.glob('in/*.in') if file.name.startswith('example_')] files += [(file.name, file.read_bytes()) for file in path.glob('out/*.out') if file.name.startswith('example_')] return onlinejudge._implementation.testcase_zipper.extract_from_files(iter(files)) def download_system_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]: self._generate_test_cases_in_cloned_repository() path = self._get_problem_directory_path() files = [] # type: List[Tuple[str, bytes]] files += [(file.name, file.read_bytes()) for file in path.glob('in/*.in')] files += [(file.name, file.read_bytes()) for file in path.glob('out/*.out')] return onlinejudge._implementation.testcase_zipper.extract_from_files(iter(files)) def _generate_test_cases_in_cloned_repository(self) -> None: LibraryCheckerService._update_cloned_repository() path = LibraryCheckerService._get_cloned_repository_path() log.status('$ cd %s', path) with utils.chdir(path): # generate test cases if sys.version_info < (3, 6): log.warning("generate.py may not work on Python 3.5 or older") if os.name == 'nt': log.warning("generate.py may not work on Windows") log.status('$ ./generate.py problems.toml -p %s', self.problem_id) try: subprocess.check_call([sys.executable, 'generate.py', 'problems.toml', '-p', self.problem_id], stdout=sys.stdout, stderr=sys.stderr) except subprocess.CalledProcessError: log.error("the generate.py failed: check https://github.com/yosupo06/library-checker-problems/issues") raise def _get_problem_directory_path(self) -> pathlib.Path: path = LibraryCheckerService._get_cloned_repository_path() problems = toml.load(path / 'problems.toml') return path / problems['problems'][self.problem_id]['dir'] def get_url(self) -> str: return 'https://judge.yosupo.jp/problem/{}'.format(self.problem_id) def get_service(self) -> LibraryCheckerService: return LibraryCheckerService() @classmethod def from_url(cls, url: str) -> Optional['LibraryCheckerProblem']: # example: https://judge.yosupo.jp/problem/unionfind result = urllib.parse.urlparse(url) if result.scheme in ('', 'http', 'https') \ and result.netloc == 'judge.yosupo.jp': m = re.match(r'/problem/(\w+)/?', result.path) if m: return cls(problem_id=m.group(1)) return None def download_checker_cpp(self) -> bytes: self._generate_test_cases_in_cloned_repository() path = self._get_problem_directory_path() with open(str(path / "checker.cpp"), "rb") as fh: return fh.read() onlinejudge.dispatch.services += [LibraryCheckerService] onlinejudge.dispatch.problems += [LibraryCheckerProblem]
Python
0
@@ -3579,114 +3579,8 @@ ()%0A%0A - log.status('$ cd %25s', path)%0A with utils.chdir(path):%0A # generate test cases%0A @@ -3613,20 +3613,16 @@ (3, 6):%0A - @@ -3696,20 +3696,16 @@ - if os.na @@ -3720,36 +3720,32 @@ t':%0A - - log.warning(%22gen @@ -3775,32 +3775,33 @@ n Windows%22)%0A +%0A log.stat @@ -3792,40 +3792,74 @@ - log.status('$ ./generate.py +command = %5Bsys.executable, str(path / 'generate.py'), str(path / ' prob @@ -3871,14 +3871,15 @@ toml - -p %25s +'), '-p ', s @@ -3892,22 +3892,64 @@ oblem_id -) +%5D %0A + log.status('$ %25s', ' '.join(command))%0A @@ -3945,36 +3945,32 @@ ))%0A try:%0A - subp @@ -3991,79 +3991,15 @@ all( -%5Bsys.executable, 'generate.py', 'problems.toml', '-p', self.problem_id%5D +command , st @@ -4038,28 +4038,24 @@ rr)%0A - - except subpr @@ -4076,28 +4076,24 @@ ocessError:%0A - @@ -4195,20 +4195,16 @@ ssues%22)%0A -
b9b7ed8f4ddf139bd031ce7650558f7a0e753718
Fix "compilation" error.
solidity/python/constants/PrintMaxExpPerPrecision.py
solidity/python/constants/PrintMaxExpPerPrecision.py
from math import factorial MIN_PRECISION = 32 MAX_PRECISION = 63 NUM_OF_VALUES_PER_ROW = 4 assert((MAX_PRECISION+1) % NUM_OF_VALUES_PER_ROW == 0) NUM_OF_COEFS = 34 maxFactorial = factorial(NUM_OF_COEFS) coefficients = [maxFactorial/factorial(i) for i in range(NUM_OF_COEFS)] def fixedExpUnsafe(x,precision): xi = x res = safeMul(coefficients[0],1 << precision) for i in range(1,NUM_OF_COEFS-1): res = safeAdd(res,safeMul(xi,coefficients[i])) xi = safeMul(xi,x) >> precision res = safeAdd(res,safeMul(xi,coefficients[-1])) return res / coefficients[0] def safeMul(x,y): assert(x * y < (1 << 256)) return x * y def safeAdd(x,y): assert(x + y < (1 << 256)) return x + y def binarySearch(func,args): lo = 1 hi = 1 << 256 while lo+1 < hi: mid = (lo+hi)/2 try: func(mid,args) lo = mid except Exception,error: hi = mid try: func(hi,args) return hi except Exception,error: func(lo,args) return lo def getMaxExp(precision,factor): maxExp = maxExpArray[MIN_PRECISION] for p in range (MIN_PRECISION,precision): maxExp = safeMul(maxExp,factor) >> MAX_PRECISION fixedExpUnsafe(maxExp,precision) return maxExp def assertFactor(factor,args): for precision in range(MIN_PRECISION,MAX_PRECISION+1): getMaxExp(precision,factor) maxExpArray = [0]*(MAX_PRECISION+1) for precision in range(MAX_PRECISION+1): maxExpArray[precision] = binarySearch(fixedExpUnsafe,precision) growthFactor = binarySearch(assertFactor,None) maxMaxExpLen = len('0x{:x}'.format(maxExpArray[-1])) print 'Max Exp Per Precision:' formatString = '{:s}{:d}{:s}'.format('Precision = {:2d} | Max Exp = {:',maxMaxExpLen,'s} | Ratio = {:9.7f}') for precision in range(MAX_PRECISION+1): maxExp = '0x{:x}'.format(maxExpArray[precision]) ratio = float(maxExpArray[precision])/float(maxExpArray[precision-1]) if precision > 0 else 0.0 print formatString.format(precision,maxExp,ratio) print '' print 'maxExpArray = [' formatString = '{:s}{:d}{:s}'.format('{:',maxMaxExpLen,'s},') for i in range(len(maxExpArray)/NUM_OF_VALUES_PER_ROW): items = [] for j in range(NUM_OF_VALUES_PER_ROW): items.append('0x{:x}'.format(maxExpArray[i*NUM_OF_VALUES_PER_ROW+j])) print ' '+''.join([formatString.format(item) for item in items]) print ']\n' print 'Compute the values dynamically, using a growth-factor of 0x{:x} >> {:d}:'.format(growthFactor,MAX_PRECISION) formatString = '{:s}{:d}{:s}{:d}{:s}'.format('Precision = {:2d} | Theoretical Max Exp = {:',maxMaxExpLen,'s} | Practical Max Exp = {:',maxMaxExpLen,'s} | Difference = {:d}') for precision in range(MIN_PRECISION,MAX_PRECISION+1): theoreticalMaxExp = maxExpArray[precision] practicalMaxExp = getMaxExp(precision,maxFactor) print formatString.format(precision,'0x{:x}'.format(theoreticalMaxExp),'0x{:x}'.format(practicalMaxExp),theoreticalMaxExp-practicalMaxExp)
Python
0.000013
@@ -2875,19 +2875,22 @@ ecision, -max +growth Factor)%0A
abd6438dfc3d930dc71178bac63a6c0bc41efbcd
Update e2e start help text for extras integrations (#3133)
datadog_checks_dev/datadog_checks/dev/tooling/commands/env/start.py
datadog_checks_dev/datadog_checks/dev/tooling/commands/env/start.py
# (C) Datadog, Inc. 2018 # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os import click import pyperclip from six import string_types from ..console import ( CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success, echo_waiting, echo_warning ) from ...e2e import E2E_SUPPORTED_TYPES, derive_interface, start_environment, stop_environment from ...testing import get_available_tox_envs from ...utils import get_tox_file from ....utils import dir_exists, file_exists, path_join @click.command( context_settings=CONTEXT_SETTINGS, short_help='Start an environment' ) @click.argument('check') @click.argument('env') @click.option( '--agent', '-a', default='6', help=( 'The agent build to use e.g. a Docker image like `datadog/agent:6.5.2`. For ' 'Docker environments you can use an integer corresponding to fields in the ' 'config (agent5, agent6, etc.)' ) ) @click.option('--dev/--prod', help='Whether to use the latest version of a check or what is shipped') @click.option('--base', is_flag=True, help='Whether to use the latest version of the base check or what is shipped') @click.pass_context def start(ctx, check, env, agent, dev, base): """Start an environment.""" if not file_exists(get_tox_file(check)): abort('`{}` is not a testable check.'.format(check)) base_package = None if base: core_dir = os.path.expanduser(ctx.obj.get('core', '')) if not dir_exists(core_dir): if core_dir: abort('`{}` directory does not exist.'.format(core_dir)) else: abort('`core` config setting does not exist.') base_package = path_join(core_dir, 'datadog_checks_base') if not dir_exists(base_package): abort('`datadog_checks_base` directory does not exist.') envs = get_available_tox_envs(check, e2e_only=True) if env not in envs: echo_failure('`{}` is not an available environment.'.format(env)) echo_info('See what is available via `ddev env ls {}`.'.format(check)) abort() api_key = ctx.obj['dd_api_key'] if api_key is None: echo_warning( 'Environment variable DD_API_KEY does not exist; a well-formatted ' 'fake API key will be used instead. You can also set the API key ' 'by doing `ddev config set dd_api_key`.' ) echo_waiting('Setting up environment `{}`... '.format(env), nl=False) config, metadata, error = start_environment(check, env) if error: echo_failure('failed!') echo_waiting('Stopping the environment...') stop_environment(check, env, metadata=metadata) abort(error) echo_success('success!') env_type = metadata['env_type'] # Support legacy config where agent5 and agent6 were strings agent_ver = ctx.obj.get('agent{}'.format(agent), agent) if isinstance(agent_ver, string_types): agent_build = agent_ver echo_warning( "Agent fields missing from ddev config, please update to the latest config, \ falling back to latest docker image" ) else: agent_build = agent_ver.get(env_type, env_type) interface = derive_interface(env_type) if interface is None: echo_failure('`{}` is an unsupported environment type.'.format(env_type)) echo_waiting('Stopping the environment...') stop_environment(check, env, metadata=metadata) abort() if env_type not in E2E_SUPPORTED_TYPES and agent.isdigit(): echo_failure('Configuration for default Agents are only for Docker. You must specify the full build.') echo_waiting('Stopping the environment...') stop_environment(check, env, metadata=metadata) abort() environment = interface(check, env, base_package, config, metadata, agent_build, api_key) echo_waiting('Updating `{}`... '.format(agent_build), nl=False) environment.update_agent() echo_success('success!') echo_waiting('Detecting the major version... ', nl=False) environment.detect_agent_version() echo_info('Agent {} detected'.format(environment.agent_version)) echo_waiting('Writing configuration for `{}`... '.format(env), nl=False) environment.write_config() echo_success('success!') echo_waiting('Starting the Agent... ', nl=False) result = environment.start_agent() if result.code: click.echo() echo_info(result.stdout + result.stderr) echo_failure('An error occurred.') echo_waiting('Stopping the environment...') stop_environment(check, env, metadata=metadata) environment.remove_config() abort() echo_success('success!') if base and not dev: dev = True echo_info( 'Will install the development version of the check too so the base package can import it (in editable mode)' ) editable_warning = ( '\nEnv will started with an editable check install for the {} package. ' 'This check will remain in an editable install after ' 'the environment is torn down. Would you like to proceed?' ) if base: echo_waiting('Upgrading the base package to the development version... ', nl=False) if environment.ENV_TYPE == 'local' and not click.confirm(editable_warning.format('base')): echo_success('skipping') else: environment.update_base_package() echo_success('success!') if dev: echo_waiting('Upgrading `{}` check to the development version... '.format(check), nl=False) if environment.ENV_TYPE == 'local' and not click.confirm(editable_warning.format(environment.check)): echo_success('skipping') else: environment.update_check() echo_success('success!') click.echo() try: pyperclip.copy(environment.config_file) except Exception: config_message = 'Config file: ' else: config_message = 'Config file (copied to your clipboard): ' echo_success(config_message, nl=False) echo_info(environment.config_file) echo_success('To run this check, do: ', nl=False) echo_info('ddev env check {} {}'.format(check, env)) echo_success('To stop this check, do: ', nl=False) echo_info('ddev env stop {} {}'.format(check, env))
Python
0
@@ -3032,17 +3032,17 @@ -%22 +' Agent fi @@ -3112,10 +3112,23 @@ ig, -%5C%0A +'%0A ' fall @@ -3158,17 +3158,20 @@ er image -%22 +...' %0A @@ -6350,32 +6350,192 @@ o: ', nl=False)%0A + if ctx.obj%5B'repo_choice'%5D == 'extras' and not ctx.obj.get('repo') == 'extras':%0A echo_info('ddev -e env stop %7B%7D %7B%7D'.format(check, env))%0A else:%0A echo_info('d
44a0d99ca8e292bbdecf5f1db307039041ba6485
Fix migration to set account flags field values to a list (not a set) (thanks @hodgestar)
go/vumitools/account/migrations.py
go/vumitools/account/migrations.py
from vumi.persist.model import ModelMigrator class UserAccountMigrator(ModelMigrator): def migrate_from_unversioned(self, mdata): # Copy stuff that hasn't changed between versions mdata.copy_values('username', 'created_at') mdata.copy_indexes('tagpools_bin', 'applications_bin') # Copy stuff that may not exist in the source data mdata.set_value('msisdn', mdata.old_data.get('msisdn', None)) mdata.set_value('confirm_start_conversation', mdata.old_data.get( 'confirm_start_conversation', False)) # Add stuff that's new in this version mdata.set_value('$VERSION', 1) mdata.set_value('tags', None) # We populate this later old_ehconfig = mdata.old_data.get('event_handler_config') mdata.set_value('event_handler_config', old_ehconfig or []) return mdata def migrate_from_1(self, mdata): # Copy stuff that hasn't changed between versions mdata.copy_values( 'username', 'created_at', 'msisdn', 'confirm_start_conversation', 'tags', 'event_handler_config') mdata.copy_indexes('tagpools_bin', 'applications_bin') # Add stuff that's new in this version mdata.set_value('$VERSION', 2) mdata.set_value('routing_table', None) # We populate this later return mdata def migrate_from_2(self, mdata): # There are no schema changes here, but we've tightened up some # validation and added a new field type to work with routing tables. # Copy stuff that hasn't changed between versions mdata.copy_values( 'username', 'created_at', 'msisdn', 'confirm_start_conversation', 'tags', 'event_handler_config', 'routing_table') mdata.copy_indexes('tagpools_bin', 'applications_bin') # We no longer allow nulls in these fields, so set them to empty. if mdata.new_data['tags'] is None: mdata.set_value('tags', []) if mdata.new_data['routing_table'] is None: mdata.set_value('routing_table', {}) # Add stuff that's new in this version mdata.set_value('$VERSION', 3) return mdata def migrate_from_3(self, mdata): """ Add the can_manage_optouts boolean and default it to ``False`` """ # Copy stuff that hasn't changed between versions mdata.copy_values( 'username', 'created_at', 'msisdn', 'confirm_start_conversation', 'tags', 'event_handler_config', 'routing_table') mdata.copy_indexes('tagpools_bin', 'applications_bin') # set the default `can_manage_optouts` value mdata.set_value('can_manage_optouts', False) # increment version counter mdata.set_value('$VERSION', 4) return mdata def migrate_from_4(self, mdata): """ Add the disable_optouts boolean and default it to ``False`` """ # Copy stuff that hasn't changed between versions mdata.copy_values( 'username', 'created_at', 'msisdn', 'confirm_start_conversation', 'tags', 'event_handler_config', 'routing_table', 'can_manage_optouts') mdata.copy_indexes('tagpools_bin', 'applications_bin') # set the default `disable_optouts` value mdata.set_value('disable_optouts', False) # increment version counter mdata.set_value('$VERSION', 5) return mdata def reverse_from_5(self, mdata): """ Remove disable_optouts boolean. """ # Copy stuff that hasn't changed between versions mdata.copy_values( 'username', 'created_at', 'msisdn', 'confirm_start_conversation', 'tags', 'event_handler_config', 'routing_table', 'can_manage_optouts') mdata.copy_indexes('tagpools_bin', 'applications_bin') # decrement version counter mdata.set_value('$VERSION', 4) return mdata def migrate_from_5(self, mdata): """ Remove the `disable_optouts` and `can_manage_optouts` fields, add the `flags` field, then set `'disable_optouts'` and `'can_manage_optouts'` as flags if their fields were `True`. """ # Copy stuff that hasn't changed between versions mdata.copy_values( 'username', 'created_at', 'msisdn', 'confirm_start_conversation', 'tags', 'event_handler_config', 'routing_table') mdata.copy_indexes('tagpools_bin', 'applications_bin') # set the can_manage_optouts and disable_optouts # flags if their fields were true flags = set() if mdata.old_data.get('can_manage_optouts'): flags.add(u'can_manage_optouts') if mdata.old_data.get('disable_optouts'): flags.add(u'disable_optouts') mdata.set_value('flags', flags) # increment version counter mdata.set_value('$VERSION', 6) return mdata def reverse_from_6(self, mdata): """ Bring back the `can_manage_optouts` and `disable_optouts` fields, setting them to `True` if corresponding values exist for them in the `flags` field, then remove the flags field. """ # Copy stuff that hasn't changed between versions mdata.copy_values( 'username', 'created_at', 'msisdn', 'confirm_start_conversation', 'tags', 'event_handler_config', 'routing_table') mdata.copy_indexes('tagpools_bin', 'applications_bin') flags = mdata.old_data.get('flags') mdata.set_value('disable_optouts', u'disable_optouts' in flags) mdata.set_value('can_manage_optouts', u'can_manage_optouts' in flags) # decrement version counter mdata.set_value('$VERSION', 5) return mdata
Python
0
@@ -4896,22 +4896,30 @@ flags', +sorted( flags) +) %0A%0A
1a062decb1f3c4f923f6a0417926021f9cbf10fa
Fix MVN sampling with sum lazy variables
gpytorch/lazy/sum_lazy_variable.py
gpytorch/lazy/sum_lazy_variable.py
from .lazy_variable import LazyVariable from .non_lazy_variable import NonLazyVariable from torch.autograd import Variable class SumLazyVariable(LazyVariable): def __init__(self, *lazy_vars): lazy_vars = list(lazy_vars) for i, lazy_var in enumerate(lazy_vars): if not isinstance(lazy_var, LazyVariable): if isinstance(lazy_var, Variable): lazy_vars[i] = NonLazyVariable(lazy_var) else: raise RuntimeError('All arguments of a SumLazyVariable should be lazy variables or vairables') super(SumLazyVariable, self).__init__(*lazy_vars) self.lazy_vars = lazy_vars def _matmul_closure_factory(self, *args): sub_closures = [] i = 0 for lazy_var in self.lazy_vars: len_repr = len(lazy_var.representation()) sub_closure = lazy_var._matmul_closure_factory(*args[i:i + len_repr]) sub_closures.append(sub_closure) i = i + len_repr def closure(rhs_mat): return sum(sub_closure(rhs_mat) for sub_closure in sub_closures) return closure def _t_matmul_closure_factory(self, *args): sub_closures = [] i = 0 for lazy_var in self.lazy_vars: len_repr = len(lazy_var.representation()) sub_closure = lazy_var._t_matmul_closure_factory(*args[i:i + len_repr]) sub_closures.append(sub_closure) i = i + len_repr def closure(rhs_mat): return sum(sub_closure(rhs_mat) for sub_closure in sub_closures) return closure def _derivative_quadratic_form_factory(self, *args): sub_closures = [] i = 0 for lazy_var in self.lazy_vars: len_repr = len(lazy_var.representation()) sub_closure = lazy_var._derivative_quadratic_form_factory(*args[i:i + len_repr]) sub_closures.append(sub_closure) i = i + len_repr def closure(*closure_args): return tuple(var for sub_closure in sub_closures for var in sub_closure(*closure_args)) return closure def _size(self): return self.lazy_vars[0].size() def _transpose_nonbatch(self): lazy_vars_t = list(lazy_var.t() for lazy_var in self.lazy_var) return SumLazyVariable(*lazy_vars_t) def _batch_get_indices(self, batch_indices, left_indices, right_indices): return sum(lazy_var._batch_get_indices(batch_indices, left_indices, right_indices) for lazy_var in self.lazy_vars) def _get_indices(self, left_indices, right_indices): return sum(lazy_var._get_indices(left_indices, right_indices) for lazy_var in self.lazy_vars) def add_jitter(self): lazy_vars = list(self.lazy_vars[:-1]) lazy_vars.append(self.lazy_vars[-1].add_jitter()) return SumLazyVariable(*lazy_vars) def _exact_predictive_covar_inv_quad_form_cache(self, train_train_covar_inv_root, test_train_covar): return tuple(lazy_var._exact_predictive_covar_inv_quad_form_cache(train_train_covar_inv_root, test_train_covar_comp) for lazy_var, test_train_covar_comp in zip(self.lazy_vars, test_train_covar.lazy_vars)) def _exact_predictive_covar_inv_quad_form_root(self, precomputed_cache, test_train_covar): # Here the precomputed cache is a list # where each component in the list is the precomputed cache for each component lazy variable return sum(lazy_var._exact_predictive_covar_inv_quad_form_root(cache_comp, test_train_covar_comp) for lazy_var, cache_comp, test_train_covar_comp in zip(self.lazy_vars, precomputed_cache, test_train_covar.lazy_vars)) def zero_mean_mvn_samples(self, n_samples): return sum(lazy_var.zero_mean_mvn_samples(n_samples) for lazy_var in self.lazy_vars) def __add__(self, other): if isinstance(other, SumLazyVariable): return SumLazyVariable(*(list(self.lazy_vars) + list(other.lazy_vars))) elif isinstance(other, LazyVariable): return SumLazyVariable(*(list(self.lazy_vars) + [other])) else: raise AttributeError('other must be a LazyVariable') def diag(self): return sum(lazy_var.diag() for lazy_var in self.lazy_vars) def __getitem__(self, index): results = tuple(lazy_var.__getitem__(index) for lazy_var in self.lazy_vars) if isinstance(results[0], LazyVariable): return SumLazyVariable(*results) else: return sum(results)
Python
0
@@ -3902,150 +3902,8 @@ ))%0A%0A - def zero_mean_mvn_samples(self, n_samples):%0A return sum(lazy_var.zero_mean_mvn_samples(n_samples) for lazy_var in self.lazy_vars)%0A%0A
e2e64a117430bc7b4a4e25ad40af4f13570eabd2
Change project to render_project
server_dev.py
server_dev.py
from flask import Flask, request, url_for, render_template, flash, redirect, abort from jinja2 import evalcontextfilter, Markup, escape from flask_mail import Mail, Message from projects_controller import ProjectsController from redirects_controller import RedirectsController import config import re app = Flask(__name__) app.secret_key = config.SECRET_KEY app.url_map.strict_slashes = False app.config.update(config.APP_CONFIG) app.config.update(config.MAIL_SETTINGS) mail = Mail(app) projects_controller = ProjectsController(config.DATA_DIR) redirects_controller = RedirectsController(config.DATA_DIR) _paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}') @app.template_filter() @evalcontextfilter def nl2br(eval_ctx, value): result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br>\n') \ for p in _paragraph_re.split(escape(value))) if eval_ctx.autoescape: result = Markup(result) return result @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @app.route('/') def index(): current_projects = projects_controller.get_current_projects() past_projects = projects_controller.get_past_projects() return render_template('index.html', current_projects=current_projects, past_projects=past_projects) @app.route('/start') def start_project(): if not request.args: return render_template('start_project.html') name = request.args.get('name') email = request.args.get('email') title = request.args.get('title') desc = request.args.get('desc') fields = {'name': name, 'email': email, 'title': title, 'desc': desc} if not name or not email or not title or not desc: return render_template('start_project.html', fields=fields) msg = Message("New Project Request") msg.add_recipient(config.CONTACT_EMAIL) msg.html = render_template('project_application.html', name=name, email=email, title=title, desc=desc) mail.send(msg) flash("Success! Your project has been submitted to the officer board, and you'll hear back from us in a few days.", 'success') return redirect(url_for('index')) @app.route('/<dynamic>') def dynamic(dynamic): projects = projects_controller.get_all_projects() redirects = redirects_controller.get_redirects() # First, test if if it's a project if dynamic in projects: project_data = projects[dynamic] if 'conclusion_post' in project_data: # The project is over, we should redirect to the post return redirect(project_data['conclusion_post']) else: return project(dynamic, project_data) # Next, check if it's a redirect elif dynamic in redirects: return redirect(redirects[dynamic]) else: abort(404) def project(project_name, project_data): if not request.args: return render_template('project.html', project_data=project_data) fields = {} join_email = request.args.get('join[email]') ask_email = request.args.get('ask[email]') ask_msg = request.args.get('ask[msg]') if join_email: fields['join'] = {'email': join_email} msg = Message("Someone wants to join your project!") msg.add_recipient(project_data['project_leaders'][0]['email']) msg.html = render_template('mail/join_project.html', email=join_email) mail.send(msg) flash_msg = "Success! You have successfully asked to join the " + project_data['project_title'] + " project!" flash(flash_msg, 'success') redirect_path = "/" + project_name return redirect(redirect_path) if ask_email or ask_msg: fields['ask'] = {'email': ask_email, 'msg': ask_msg} if ask_email and ask_msg: subject = project_data['project_title'] + " Question" msg = Message(subject, reply_to=ask_email) msg.add_recipient(project_data['project_leaders'][0]['email']) msg.html = render_template('mail/project_question.html', msg=ask_msg) mail.send(msg) flash_msg = "Success! Your question has been submitted, and you should hear from the project manager soon." flash(flash_msg, 'success') redirect_path = "/" + project_name return redirect(redirect_path) return render_template('project.html', project_data=project_data, fields=fields) @app.route('/dev_sync') def dev_save_and_reload_all_data(): save_all_data() reload_all_data() return redirect(redirect_url()) @app.route('/dev_reload') def dev_reload_all_data(): reload_all_data() return redirect(redirect_url()) def save_all_data(): projects_controller.write_projects() redirects_controller.load_redirects() def reload_all_data(): projects_controller.load_projects() redirects_controller.load_redirects() def redirect_url(): return request.args.get('next') or request.referrer or url_for('index') if __name__ == '__main__': app.run()
Python
0.000001
@@ -2617,16 +2617,23 @@ return +render_ project( @@ -2803,16 +2803,23 @@ 4)%0A%0Adef +render_ project(
218b5c90b265fe0d60c4672b27049cf86b5a68cf
Add mkdir
cyhdfs3/cli.py
cyhdfs3/cli.py
from __future__ import division import sys import traceback import click CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def main(): try: cli(obj={}) except Exception as e: click.echo(traceback.format_exc(), err=True) sys.exit(1) @click.group(context_settings=CONTEXT_SETTINGS) @click.option('--namenode', '-n', default='localhost', required=False, help='Namenode host', show_default=True) @click.option('--port', '-p', default=8020, required=False, help='Namenode port', show_default=True) @click.pass_context def cli(ctx, namenode, port): import pyximport; pyximport.install() import os os.environ["LIBHDFS3_CONF"] = "/etc/hadoop/conf/hdfs-site.xml" import cyhdfs3 ctx.obj = {} ctx.obj['client'] = cyhdfs3.HDFSClient() @cli.command(short_help='List a path') @click.argument('path', required=False, default='/') @click.option('--recurse', '-R', is_flag=True, default=False, required=False, help='Recurse into subdirectories', show_default=True) @click.pass_context def ls(ctx, path, recurse): client = ctx.obj['client'] files = client.list_dir(path, recurse=recurse) for f in files: row = [] perm = octal_to_perm(f.permissions) t_perm = ('d' if f.kind == 'd' else '-') + perm row.append(t_perm) s = max([len(j.owner) for j in files]) + 3 row.append(f.owner.ljust(s)) s = max([len(j.group) for j in files]) + 3 row.append(f.group.ljust(s)) s = max([len(str(j.size)) for j in files]) + 3 row.append(str(f.size).ljust(s)) row.append(f.name) click.echo(" ".join(row)) def octal_to_perm(octal): import stat perms = list("-" * 9) if octal & stat.S_IRUSR: perms[0] = "r" if octal & stat.S_IWUSR: perms[1] = "w" if octal & stat.S_IXUSR: perms[2] = "x" if octal & stat.S_IRGRP: perms[3] = "r" if octal & stat.S_IWGRP: perms[4] = "w" if octal & stat.S_IXGRP: perms[5] = "x" if octal & stat.S_IROTH: perms[6] = "r" if octal & stat.S_IWOTH: perms[7] = "w" if octal & stat.S_IXOTH: perms[8] = "x" return "".join(perms) @cli.command(short_help='Display fs stats') @click.pass_context def df(ctx): client = ctx.obj['client'] fs = "hdfs://{}:{}".format(client.host, client.port) used = client.get_used() capacity = client.get_capacity() avalable = capacity - used block_size = client.get_default_block_size() use_p = "%.2f" % (used / capacity) headers = ["Filesystem", "Block Size", "Size", "Used", "Available", "Use%"] row1 = [fs, block_size, capacity, used, avalable, use_p] cols_lenghts = [] for header, row in zip(headers, row1): cols_lenghts.append(max([len(str(_)) for _ in [header, row]]) + 1) for row in [headers, row1]: lrow = [] for i, col in enumerate(row): if i == 0: lrow.append(str(col).ljust(cols_lenghts[i])) else: lrow.append(str(col).rjust(cols_lenghts[i])) click.echo(" ".join(lrow))
Python
0.000003
@@ -24,16 +24,27 @@ division +%0A%0Aimport os %0Aimport @@ -79,16 +79,69 @@ click%0A%0A +import pyximport; pyximport.install()%0Aimport cyhdfs3%0A %0ACONTEXT @@ -653,65 +653,8 @@ t):%0A - import pyximport; pyximport.install()%0A%0A import os%0A @@ -721,27 +721,8 @@ l%22%0A%0A - import cyhdfs3%0A @@ -777,24 +777,253 @@ SClient()%0A%0A%0A +@cli.command(short_help='Create directory and all non-existent parents')%0A@click.argument('path', required=False, default='/')%0A@click.pass_context%0Adef mkdir(ctx, path):%0A client = ctx.obj%5B'client'%5D%0A client.create_dir(path)%0A%0A%0A @cli.command @@ -3347,12 +3347,52 @@ join(lrow))%0A +%0A%0Aif __name__ == '__main__':%0A main()%0A
283dd9918bd16202bf799c470e8e5b50d2ef1cd6
Increment version number to 0.7.0
datajoint/version.py
datajoint/version.py
__version__ = "0.6.1"
Python
0.99997
@@ -14,9 +14,9 @@ %220. -6.1 +7.0 %22%0A
0f5fe279d6b4641b2a2741271da4f021238f00a1
fix import in generator
dataset_generator.py
dataset_generator.py
import csv # execfile("C:\\Users\\YONI\\Documents\\Projects\\degree\\attack detection methods\\anomaly_generator\\dataset_generator.py") ROW_NUM = 10 path = "C:\\Users\\YONI\\Documents\\anomally_detector\\data_sets\\example\\" users_num = 100 features_num = 20 directory = "data_sets\\" if not os.path.exists(directory): os.makedirs(directory) users = [] features = [] for i in range(0,users_num): users.append('user'+str(i)) for i in range(0,features_num): features.append('feature'+str(i)) for user in users: with open("data_sets\\"+user+'.csv', 'w') as csvfile: writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=features) writer.writeheader() for i in range(1,ROW_NUM): featDic = {} for feature in features: featDic[feature] = user + '_' + feature + '_' + str(i) writer.writerow(featDic)
Python
0
@@ -3,16 +3,26 @@ port csv +%0Aimport os %0A%0A# exec
7ccb9cb0d6e3ce6e3c6c09604af5e2bbdfae63ae
update urls.py
openstax/urls.py
openstax/urls.py
from django.conf import settings from django.conf.urls import include, url from django.conf.urls.static import static from django.contrib import admin from wagtail.contrib.wagtailapi import urls as wagtailapi_urls from wagtail.wagtailadmin import urls as wagtailadmin_urls from wagtail.wagtailcore import urls as wagtail_urls from wagtail.wagtaildocs import urls as wagtaildocs_urls from wagtail.wagtailimages import urls as wagtailimages_urls from .api import api_router from .functions import S3DocumentServe from news.search import search from api import urls as api_urls urlpatterns = [ url(r'^django-admin/', include(admin.site.urls)), url(r'^admin/', include(wagtailadmin_urls)), url(r'^accounts/', include('accounts.urls')), url(r'^documents/(?P<document_id>\d+)/(.*)$', S3DocumentServe.as_view(), name='wagtaildocs_serve'), url(r'^documents/', include(wagtaildocs_urls)), url(r'^images/', include(wagtailimages_urls)), url(r'^api/mail/', include('mail.urls')), url(r'^api/v2/', api_router.urls), url(r'^api/', include(wagtailapi_urls)), url(r'^api/', include(api_urls)), url(r'^api/search/$', search, name='search'), url(r'^api/pages/', include('pages.urls')), url(r'^api/books/', include('books.urls')), url(r'^api/news/', include('news.urls')), # For anything not caught by a more specific rule above, hand over to # Wagtail's serving mechanism url(r'', include(wagtail_urls)), ] if settings.DEBUG: from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.views.generic.base import RedirectView urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns += [ url(r'^favicon\.ico$', RedirectView.as_view( url=settings.STATIC_URL + 'pages/images/favicon.ico')) ]
Python
0.000001
@@ -469,47 +469,9 @@ ter%0A -from .functions import S3DocumentServe %0A + from @@ -706,112 +706,8 @@ )),%0A - url(r'%5Edocuments/(?P%3Cdocument_id%3E%5Cd+)/(.*)$', S3DocumentServe.as_view(), name='wagtaildocs_serve'),%0A @@ -856,47 +856,8 @@ )),%0A - url(r'%5Eapi/v2/', api_router.urls),%0A @@ -927,32 +927,32 @@ ude(api_urls)),%0A - url(r'%5Eapi/s @@ -984,16 +984,55 @@ earch'), +%0A url(r'%5Eapi/v2/', api_router.urls), %0A%0A ur
853135b61f34ece1363da9b53244e775a2ba16a8
Add docstring for convert_timezone()
datetime/datetime.py
datetime/datetime.py
import datetime # ============================================================================== # TIMESTAMP 2 STR # ============================================================================== def timestamp2str(t, pattern="%Y-%m-%d %H:%M:%S"): """ Given a float timestamp it returns the date as a formatted string, based on the date `pattern` specified """ return datetime.datetime.fromtimestamp(t).strftime(pattern) # import datetime from dateutil import tz def convert_timezone(time, a="UTC", b="local"): # TIMEZONE OBJECTS tza = tz.tzlocal(a) if (a=="local") else tz.gettz(a) tzb = tz.tzlocal(b) if (b=="local") else tz.gettz(b) # FORMAT TIME WITH FROM TIMEZONE time = time.replace(tzinfo=tza) # CHANGE TIME ZONE newtime = time.astimezone(tzb) return newtime
Python
0
@@ -498,97 +498,711 @@ rn)%0A -# import datetime%0Afrom dateutil import tz%0Adef convert_timezone(time, a=%22UTC%22, b=%22local%22): +%0A# ==============================================================================%0A# CONVERT_TIMEZONE%0A# ==============================================================================%0A# import datetime%0Afrom dateutil import tz%0Adef convert_timezone(time, a=%22UTC%22, b=%22local%22):%0A %22%22%22 Given a datetime object, in timezone a, it changes it to timezone b.%0A%0A Args:%0A time: (datetime object)%0A a: (str) timezone code to set the from time as.%0A eg:%0A %22UTC%22%0A %22Australia/Melbourne%22%0A or..%0A %22local%22%0A b: (str) timezone to set the to time as.%0A %22%22%22 %0A
9d2ef02367380c76f39c4bd84ea2f35897d0bebf
Edit school enrollment management command
education/management/commands/create_school_enrollment_script.py
education/management/commands/create_school_enrollment_script.py
''' Created on May 28, 2013 @author: raybesiga ''' import datetime import logging import itertools from logging import handlers from django.core.management.base import BaseCommand from django.contrib.sites.models import Site from django.contrib.auth.models import User from django.core.mail import send_mail from django.conf import settings from django.template import Context, Template import traceback from rapidsms.models import Contact, Connection, Backend from rapidsms_httprouter.models import Message from django.db import transaction from rapidsms.messages.outgoing import OutgoingMessage from script.utils.outgoing import check_progress from script.models import ScriptProgress, Email, Script, ScriptStep from poll.models import Poll from optparse import OptionParser, make_option class Command(BaseCommand): help = "Create school enrollment termly polls" def handle(self, **options): poll0 = Poll.objects.get(name="total_enrollment_girls") poll1 = Poll.objects.get(name="total_enrollment_boys") script_school_enrollment_termly = Script.objects.create( slug="edtrac_school_enrollment_termly", name="School Enrollment Termly Script", ) script_school_enrollment_termly.sites.add(Site.objects.get_current()) script_school_enrollment_termly.steps.add(ScriptStep.objects.create( script=script_headteacher_violence_monthly, poll=poll0, order=0, rule = ScriptStep.WAIT_MOVEON, start_offset=0, giveup_offset=14400, # we'll give them four hours to respond )) script_school_enrollment_termly.steps.add(ScriptStep.objects.create( script=script_headteacher_violence_monthly, poll=poll1, order=1, rule=ScriptStep.WAIT_MOVEON, # for polls, this likely means a poll whose answer we aren't particularly concerned with start_offset=0, #start immediately after the giveup time has elapsed from the previous step giveup_offset=14400, # we'll give them four hours to respond ))
Python
0
@@ -864,21 +864,22 @@ termly -polls +script %22%0A %0A @@ -1439,34 +1439,30 @@ ipt_ -headteacher_violence_month +school_enrollment_term ly,%0A @@ -1800,34 +1800,30 @@ ipt_ -headteacher_violence_month +school_enrollment_term ly,%0A
1dfc3909f15f8fcce993fa9eb6aea2a3bd6fef40
update version number
lib/svtplay_dl/__init__.py
lib/svtplay_dl/__init__.py
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import sys import re import os import logging from optparse import OptionParser from svtplay_dl.log import log from svtplay_dl.utils import get_http_data from svtplay_dl.service import service_handler, Generic __version__ = "0.9.2013.03.23" class Options: """ Options used when invoking the script from another Python script. Simple container class used when calling get_media() from another Python script. The variables corresponds to the command line parameters parsed in main() when the script is called directly. When called from a script there are a few more things to consider: * Logging is done to 'log'. main() calls setup_log() which sets the logging to either stdout or stderr depending on the silent level. A user calling get_media() directly can either also use setup_log() or configure the log manually. * Progress information is printed to 'progress_stream' which defaults to sys.stderr but can be changed to any stream. * Many errors results in calls to system.exit() so catch 'SystemExit'- Exceptions to prevent the entire application from exiting if that happens. """ def __init__(self): self.output = None self.resume = False self.live = False self.silent = False self.quality = None self.flexibleq = None self.hls = False self.other = None self.subtitle = False self.username = None self.password = None def get_media(url, options): stream = service_handler(url) if not stream: url, stream = Generic().get(url) url = url.replace("&amp;", "&") if not stream: log.error("That site is not supported. Make a ticket or send a message") sys.exit(2) if not options.output or os.path.isdir(options.output): data = get_http_data(url) match = re.search("(?i)<title.*>\s*(.*?)\s*</title>", data) if match: if sys.version_info > (3, 0): title = re.sub('[^\w\s-]', '', match.group(1)).strip().lower() if options.output: options.output = options.output + re.sub('[-\s]+', '-', title) else: options.output = re.sub('[-\s]+', '-', title) else: title = unicode(re.sub('[^\w\s-]', '', match.group(1)).strip().lower()) if options.output: options.output = unicode(options.output + re.sub('[-\s]+', '-', title)) else: options.output = unicode(re.sub('[-\s]+', '-', title)) stream.get(options, url) def setup_log(silent): if silent: stream = sys.stderr level = logging.WARNING else: stream = sys.stdout level = logging.INFO fmt = logging.Formatter('%(levelname)s %(message)s') hdlr = logging.StreamHandler(stream) hdlr.setFormatter(fmt) log.addHandler(hdlr) log.setLevel(level) def main(): """ Main program """ usage = "usage: %prog [options] url" parser = OptionParser(usage=usage, version=__version__) parser.add_option("-o", "--output", metavar="OUTPUT", help="Outputs to the given filename.") parser.add_option("-r", "--resume", action="store_true", dest="resume", default=False, help="Resume a download") parser.add_option("-l", "--live", action="store_true", dest="live", default=False, help="Enable for live streams") parser.add_option("-s", "--silent", action="store_true", dest="silent", default=False) parser.add_option("-q", "--quality", default=0, metavar="quality", help="Choose what format to download.\nIt will download the best format by default") parser.add_option("-Q", "--flexible-quality", default=0, metavar="amount", dest="flexibleq", help="Allow given quality (as above) to differ by an amount.") parser.add_option("-H", "--hls", action="store_true", dest="hls", default=False) parser.add_option("-S", "--subtitle", action="store_true", dest="subtitle", default=False, help="Download subtitle from the site if available.") parser.add_option("-u", "--username", default=None, help="Username") parser.add_option("-p", "--password", default=None, help="Password") (options, args) = parser.parse_args() if len(args) != 1: parser.error("incorrect number of arguments") setup_log(options.silent) if options.flexibleq and not options.quality: log.error("flexible-quality requires a quality") sys.exit(4) url = args[0] get_media(url, options)
Python
0.000002
@@ -363,12 +363,12 @@ 13.0 -3.23 +4.02 %22%0A%0Ac
3905fe5c8808eaf57fd1dff38329f3753e652bd9
Add update date to the EIF json if it exists in the session.
activity/activity_ConvertJATS.py
activity/activity_ConvertJATS.py
import activity import json import os from os import path from jats_scraper import jats_scraper from boto.s3.key import Key from boto.s3.connection import S3Connection from provider.execution_context import Session from provider.article_structure import ArticleInfo """ ConvertJATS.py activity """ class activity_ConvertJATS(activity.activity): def __init__(self, settings, logger, conn=None, token=None, activity_task=None): activity.activity.__init__(self, settings, logger, conn, token, activity_task) self.name = "ConvertJATS" self.version = "1" self.default_task_heartbeat_timeout = 30 self.default_task_schedule_to_close_timeout = 60 * 5 self.default_task_schedule_to_start_timeout = 30 self.default_task_start_to_close_timeout = 60 * 5 self.description = "Process a JATS xml file into EIF" self.logger = logger def do_activity(self, data=None): """ Do the work """ session = Session(self.settings) version = session.get_value(self.get_workflowId(), 'version') article_id = session.get_value(self.get_workflowId(), 'article_id') article_version_id = article_id + '.' + version run = session.get_value(self.get_workflowId(), 'run') self.emit_monitor_event(self.settings, article_id, version, run, "Convert JATS", "start", "Starting conversion of article xml to EIF for " + article_id) try: if self.logger: self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4)) expanded_folder_name = session.get_value(self.get_workflowId(), 'expanded_folder') expanded_folder_bucket = self.settings.publishing_buckets_prefix + self.settings.expanded_bucket print expanded_folder_name conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key) bucket = conn.get_bucket(expanded_folder_bucket) bucket_folder_name = expanded_folder_name (xml_key, xml_filename) = self.get_article_xml_key(bucket, bucket_folder_name) if xml_key is None: self.logger.error("Article XML path not found") return False if self.logger: self.logger.info("Converting file %s" % xml_filename) xml = xml_key.get_contents_as_string() if self.logger: self.logger.info("Downloaded contents of file %s" % xml_filename) json_output = jats_scraper.scrape(xml, article_version=version) if self.logger: self.logger.info("Scraped file %s" % xml_filename) output_folder = article_version_id + '/' + run output_name = xml_filename.replace('.xml', '.json') output_bucket = self.settings.publishing_buckets_prefix + self.settings.eif_bucket output_path = output_folder + '/' + output_name destination = conn.get_bucket(output_bucket) destination_key = Key(destination) output_key = output_path destination_key.key = output_key destination_key.set_contents_from_string(json_output) if self.logger: self.logger.info("Uploaded key %s to %s" % (output_path, output_bucket)) session.store_value(self.get_workflowId(), "eif_filename", output_key) eif_object = json.loads(json_output) session.store_value(self.get_workflowId(), 'article_path', eif_object.get('path')) self.emit_monitor_event(self.settings, article_id, version, run, "Post EIF", "success", "XML converted to EIF for article " + article_id + " to " + output_key) except Exception as e: self.logger.exception("Exception when converting article XML to EIF") self.emit_monitor_event(self.settings, article_id, version, run, "Convert JATS", "error", "Error in conversion of article xml to EIF for " + article_id + " message:" + e.message) return False return True @staticmethod def get_article_xml_key(bucket, expanded_folder_name): files = bucket.list(expanded_folder_name + "/", "/") for bucket_file in files: key = bucket.get_key(bucket_file.key) filename = key.name.rsplit('/', 1)[1] info = ArticleInfo(filename) if info.file_type == 'ArticleXML': return key, filename return None
Python
0
@@ -51,16 +51,46 @@ rt path%0A +from datetime import datetime%0A from jat @@ -119,16 +119,16 @@ scraper%0A - from bot @@ -2652,16 +2652,399 @@ version) +%0A %0A # Add update date if it is in the session%0A update_date = None%0A try:%0A update_date = session.get_value(self.get_workflowId(), 'update_date')%0A except:%0A update_date = None%0A if update_date:%0A json_output = self.add_update_date_to_json(json_output, update_date, xml_filename) %0A%0A @@ -5038,24 +5038,24 @@ y, filename%0A - retu @@ -5058,12 +5058,761 @@ return None%0A +%0A def add_update_date_to_json(self, json_string, update_date, xml_filename = None):%0A %22%22%22%0A Update date is a string in the YYYYMMDDHHMMSS format%0A We want to add update: YYYY-MM-DD to the json%0A xml_filename is just for logging purposes%0A %22%22%22%0A try:%0A json_obj = json.loads(json_string)%0A updated_date = datetime.strptime(update_date, %22%25Y%25m%25d%25H%25M%25S%22)%0A update_date_string = updated_date.strftime('%25Y-%25m-%25d')%0A json_obj%5B'update'%5D = update_date_string%0A json_string = json.dumps(json_obj)%0A except:%0A if self.logger:%0A self.logger.error(%22Unable to set the update date in the json %25s%22 %25 str(xml_filename))%0A return json_string%0A
900ddf92a1cf65270a7b420a848c0f2611647899
handle &amp;
freelancefinder/remotes/sources/workinstartups/workinstartups.py
freelancefinder/remotes/sources/workinstartups/workinstartups.py
"""Wrapper for the WorkInStartups source.""" import json import bleach import maya import requests from jobs.models import Post ADDITIONAL_TAGS = ['p', 'br'] class WorkInStartups(object): """Wrapper for the WorkInStartups source.""" json_api_address = 'http://workinstartups.com/job-board/api/api.php?action=getJobs&type=0&category=0&count=100&random=0&days_behind=0&response=json' def __init__(self, source): """Parse the API.""" self.api_response = requests.get(self.json_api_address) self.source = source def jobs(self): """Iterate through all available jobs.""" # Remove the 'var jobs = ' at the beginning and the ';' at the end response_json = json.loads(self.api_response.text[len("var jobs = "):-1]) for job_info in response_json: post = self.parse_job_to_post(job_info) yield post def parse_job_to_post(self, job_info): """Convert from the rss feed format to a Post.""" created = maya.parse(job_info['mysql_date']).datetime() job_url = 'http://workinstartups.com/job-board/job/{}/{}/'.format(job_info['id'], job_info['url_title']) post = Post( url=job_url, source=self.source, title=job_info['type_name'] + " - " + bleach.clean(job_info['title'], strip=True), description=bleach.clean(job_info['description'], tags=bleach.ALLOWED_TAGS + ADDITIONAL_TAGS, strip=True), unique=job_info['id'], created=created, subarea='all', ) return post
Python
0.000001
@@ -1324,16 +1324,38 @@ 'title'%5D +.replace(%22&amp;%22, %22&%22) , strip=
2c53bc17f98a3e9fdc71ba77f1ab9c1c06f82509
remove test param on srvy
collection/srvy.py
collection/srvy.py
#!/usr/bin/python import sys import time from time import sleep from datetime import datetime import random import sqlite3 import csv from configparser import ConfigParser if __name__ == '__main__': # Check if running on a Raspberry Pi try: from gpiozero import Button except ImportError: print("gpiozero is not installed.") pass try: import pygame except ImportError: print("pygame is not installed.") pass # VARIABLES question_csv_location = '../archive/questions.csv' sqlite_file = '../archive/srvy.db' yes_button = Button(26) no_button = Button(19) # FUNCTIONS def module_installed(module): if module in sys.modules: return True else: return False def get_current_questions(file_location): """Add each question from a text file to a list. Questions should be separated by newlines.""" with open(file_location, 'r') as csv_file: readCSV = csv.reader(csv_file, delimiter=',', quotechar='"') questions = [] for row in readCSV: if row: question = row[0] questions.append(question) return questions def random_questions(): """pulls returns a random question into main loop.""" question = get_current_questions(question_csv_location) return random.choice(question) def add_response_to_database(question, opinion): """Add response to SQLite 3 database""" conn = sqlite3.connect(sqlite_file) c = conn.cursor() current_date = datetime.now() current_unix_time = time.time() try: c.execute('''INSERT INTO responses (pythonDateTime, unixTime, question, opinion) VALUES (?,?,?,?)''', (current_date, current_unix_time, question, opinion)) print("Successfully added response to database.") print("Thank you!") except Exception as e: print(e) conn.commit() conn.close() main() def main(): qs = random_questions() # calls questions function that returns random question. print(qs) while True: opinion = input("Opinion [y/n]: ") if opinion == "y": sleep(.5) opinion = 1 add_response_to_database(qs, opinion) elif opinion == "n": sleep(.5) opinion = -1 add_response_to_database(qs, opinion) main()
Python
0.000001
@@ -169,315 +169,50 @@ ser%0A -%0Aif __name__ == '__main__':%0A%0A # Check if running on a Raspberry Pi%0A%0A try:%0A from gpiozero import Button%0A except ImportError:%0A print(%22gpiozero is not installed.%22)%0A pass%0A%0A try:%0A import pygame%0A except ImportError:%0A print(%22pygame is not installed.%22)%0A pass +from gpiozero import Button%0Aimport pygame%0A %0A%0A#
e9eb8317334e21d5e401b03c2eda5c70082ec2e3
add test description in xml reporter
zopkio/reporters/junit_reporter.py
zopkio/reporters/junit_reporter.py
# Copyright 2014 LinkedIn Corp. # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Class used to generate the report. """ import os from jinja2 import Environment, FileSystemLoader import zopkio.constants as constants import zopkio.runtime as runtime import zopkio.utils as utils from junit_xml import TestSuite, TestCase class _ReportInfo(object): """ Holds data shared among all report pages """ def __init__(self, output_dir, logs_dir, naarad_dir): self.output_dir = os.path.abspath(output_dir) self.resource_dir = os.path.join(output_dir, "resources/") self.logs_dir = os.path.abspath(logs_dir) self.naarad_dir = os.path.abspath(naarad_dir) self.config_to_test_names_map = {} self.junit_xml_path = output_dir self.results_map = { "passed": constants.PASSED, "failed": constants.FAILED, "skipped": constants.SKIPPED, # "error": constants.ERROR } class Reporter(object): """ Class that converts the aggregated output into a user-friendly web page. """ def __init__(self, report_name, output_dir, logs_dir, naarad_dir): """ :param report_name: used in the title of the front-end :param output_dir: directory where the report will be generated :param logs_dir: directory of where the logs will be collected :param naarad_dir: directory containing the naarad reports """ self.name = report_name self.env = Environment(loader=FileSystemLoader(constants.WEB_RESOURCE_DIR)) # used to load html pages for Jinja2 self.data_source = runtime.get_collector() self.report_info = _ReportInfo(output_dir, logs_dir, naarad_dir) def get_config_to_test_names_map(self): config_to_test_names_map = {} for config_name in self.data_source.get_config_names(): config_to_test_names_map[config_name] = self.data_source.get_test_names(config_name) return config_to_test_names_map def get_report_location(self): """ Returns the filename of the landing page """ return os.path.join(self.report_info.junit_xml_path, '_junit_reports.xml') def generate(self): """ Generates the report """ self._setup() testsuites = [] for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) testsuite = self._generate_junit_xml(config_name) # print "JUNIT TEST FORMAT------------------------------" # print(TestSuite.to_xml_string([testsuites])) with open(os.path.join(self.report_info.junit_xml_path, '_junit_reports.xml'), 'w') as file: TestSuite.to_file(file, [testsuite], prettyprint=False) def _generate_junit_xml(self, config_name): testcases = [] summary_stats = [ self.data_source.count_tests(config_name), self.data_source.count_tests_with_result(config_name, constants.PASSED), self.data_source.count_tests_with_result(config_name, constants.FAILED), self.data_source.count_tests_with_result(config_name, constants.SKIPPED), self.data_source.get_config_exec_time(config_name), self.data_source.get_config_start_time(config_name), self.data_source.get_config_end_time(config_name) ] config_data=self.data_source.get_config_result(config_name) tests=self.data_source.get_test_results(config_name) for test in tests: test_time = 0 if test.func_end_time != None and test.func_start_time != None: test_time = test.func_end_time - test.func_start_time tc = TestCase(test.name,'',test_time, test.result, test.message) if 'failed' in test.result: tc.add_failure_info(test.result) elif 'skipped' in test.result: tc.add_skipped_info(test.result) testcases.append(tc) testsuite = TestSuite(config_name+self.name, testcases) # report_info=self.report_info # summary=summary_stats return testsuite def _setup(self): utils.makedirs(self.report_info.output_dir) utils.makedirs(self.report_info.resource_dir) self.report_info.config_to_test_names_map = self.get_config_to_test_names_map()
Python
0
@@ -4381,22 +4381,27 @@ e, test. -result +description , test.m
a2d9edbe8b154858fe89be12ca281a926ad46ac7
Remove double negative
api/init/health/routes.py
api/init/health/routes.py
import os from flask import jsonify from flask_restplus import Resource, Namespace # pylint: disable=unused-variable def register_health(namespace: Namespace): """Method used to register the health check namespace and endpoint.""" @namespace.route('/health') @namespace.doc() class Health(Resource): def get(self): """ Get API health status Use this endpoint to get the health status of this API. """ is_debug = os.environ.get('FLASK_DEBUG') mode = 'production' if not is_debug else 'debug' message = {'message': f'MobyDQ API running in {mode} mode'} return jsonify(message)
Python
0.999999
@@ -545,27 +545,18 @@ = ' -production +debug ' if -not is_d @@ -566,21 +566,26 @@ g else ' -debug +production '%0A
9a2b3477dcfd3e8ba6fac43678713f5213fe87b2
Caugh edge cause for initials of n=0 v. n=None
dedupe/predicates.py
dedupe/predicates.py
#!/usr/bin/python # -*- coding: utf-8 -*- import re def tokenFieldPredicate(field): """returns the tokens""" return tuple(field.split()) def commonIntegerPredicate(field): """"return any integers""" return tuple(re.findall("\d+", field)) def nearIntegersPredicate(field): """return any integers N, N+1, and N-1""" ints = sorted([int(i) for i in re.findall("\d+", field)]) near_ints = set([]) [near_ints.update((i - 1, i, i + 1)) for i in ints] return tuple(near_ints) def ngrams(field, n): """ngrams returns all unique, contiguous sequences of n characters of a given field. :param field: the string to be :param n: the number of characters to be included in each gram usage: >>> from dedupe.dedupe.predicated import ngrams >>> ngrams("deduplicate", 3) ('ded', 'edu', 'dup', 'upl', 'pli', 'lic', 'ica', 'cat', 'ate') """ return tuple([field[pos:pos + n] for pos in xrange(len(field) - n + 1)]) def commonFourGram(field): """return 4-grams""" return ngrams(field, 4) def commonSixGram(field): """"return 6-grams""" return ngrams(field, 6) def initials(field, n=None): """predicate which returns first a tuple containing the first n chars of a field if and only if the field contains at least n characters, or an empty tuple otherwise. :param field: the string :type n: int, default None usage: >>> initials("dedupe", 7) () >>> initials("deduplication", 7) ('dedupli', ) >>> initials("noslice") ('noslice', ) """ return (field[:n], ) if not n or len(field) > n-1 else () def wholeFieldPredicate(field): """return the whole field consider replacing with initials(field) """ return (field, ) if field else () def sameThreeCharStartPredicate(field): """return first three characters""" return initials(field, 3) def sameFiveCharStartPredicate(field): """return first five characters""" return initials(field, 5) def sameSevenCharStartPredicate(field): """return first seven characters""" return initials(field, 7)
Python
0.999999
@@ -1623,12 +1623,20 @@ if n -ot n + is not None or
9dcfab557c30a040580afe5ab8c5d68bf9d64a37
Fix log summary tab. Some odd cacheops problem here. Possibly caching a function that calls other @cached functions does not work. Removed caching on message_fields_recorded and types_recorded.
afterflight/logbrowse/fltdata.py
afterflight/logbrowse/fltdata.py
#Copyright 2013 Aaron Curtis #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. """ High-level data retrieval from the Flight model. Most of these could be methods on the Flight model, but cacheops doesn't work with instance methods so we do the cacheable stuff here. Some of these are then wrapped by flight instance methods so they are easy to use in templates. TODO better solution. """ from cacheops import cached #not using 'from logbrowse.models import MavDatum' because of circular dependency import logbrowse from af_utils import dt2jsts import scipy, pandas, pdb @cached() def initial_plot(flight): #first we try to plot #right_yax='Mot 1' #left_yax='Mot 2' if flight.is_tlog: right_yax='throttle' left_yax='servo3_raw' else: msg_fields=message_fields_recorded(flight) #pdb.set_trace() if 'Mot 1' in msg_fields: #probably because it is a dataflash log, not a tlog right_yax='Mot 1' left_yax='Mot 2' elif 'Mot1' in msg_fields: #probably because it is a dataflash log, not a tlog right_yax='Mot1' left_yax='Mot2' elif 'roll_sensor' in msg_fields: right_yax='roll_sensor' left_yax='pitch_sensor' else: right_yax=msg_fields[0] left_yax=msg_fields[1] return {"labels":"['%s','%s']" % (right_yax, left_yax), "data":"[[%s],[%s]]"%(flight.sensor_plot_data(right_yax),flight.sensor_plot_data(left_yax))} @cached() def message_fields_recorded(flight): return logbrowse.models.MavDatum.objects.filter(message__flight=flight).values('msgField').order_by('msgField').distinct().values_list('msgField',flat=True) @cached() def message_types_recorded(flight): return logbrowse.models.MavDatum.objects.filter(message__flight=flight).values_list('message__msgType',flat=True).order_by('message__msgType').distinct() @cached() def count_messages_by_type(flight): msgTypeCounts=[None]*len(flight.message_types_recorded) x=0 for msgType in flight.message_types_recorded: msgTypeCounts[x]=flight.mavmessage_set.filter(msgType=msgType).count() x+=1 return zip(flight.message_types_recorded, msgTypeCounts) @cached() def lat_lons_JSON(flight): return scipy.array([flight.lons(), flight.lats()]).transpose().tolist() @cached() def gps_timestamps(flight): #unfortunately the timestamps end up with L for 'long' in the JS unless we remove them here. #Actually, could probably do the multiplication by 1000 to convert to JS timestamp on the client side. #return str(longTstamps).replace('L','') return [dt2jsts(timestamp) for timestamp in flight.gps_times()] def gps_times(flight): return logbrowse.models.MavMessage.objects.filter(flight=flight,msgType__in=['GLOBAL_POSITION_INT','df_GPS','GPS']).order_by('timestamp').values_list('timestamp',flat=True) @cached() def sensor_plot_data(flight, msg_field): dataQ=logbrowse.models.MavDatum.objects.filter(message__flight=flight, msgField=msg_field) vals=dataQ.values_list('message__timestamp','value') return ','.join([r'[%.1f,%.1f]' % (dt2jsts(timestamp),value) for timestamp, value in vals]) @cached() def sensor_plot_pandas(flight, msg_field): thrindex=logbrowse.models.MavDatum.objects.filter(message__flight=flight, msgField=msg_field).values_list('message_id',flat=True) thr=logbrowse.models.MavDatum.objects.filter(message__flight=flight, msgField=msg_field).values_list('value',flat=True) return pandas.Series(thr, index=thrindex) def length_str(flight): # try: flt_length=flight.end_time() - flight.start_time() return str(flt_length)[:7] # except: # return None @cached() def start_time(flight): try: return flight.mavmessage_set.exclude(msgType='BAD_DATA').order_by('timestamp')[0].timestamp except IndexError: pass @cached() def end_time(flight): #if self.mavmessage_set.exclude(msgType='BAD_DATA').exists(): try: return flight.mavmessage_set.exclude(msgType='BAD_DATA').latest('timestamp').timestamp except IndexError: pass @cached() def location(flight): lat=logbrowse.models.MavDatum.objects.filter(msgField__in=['lat','Lat'], message__flight=flight).latest().value lon=logbrowse.models.MavDatum.objects.filter(msgField__in=['lon','Long','Lng'], message__flight=flight).latest().value return (lat, lon) def invalidate_caches(flight): flight_funcs=[initial_plot, message_fields_recorded, message_types_recorded, count_messages_by_type, lat_lons_JSON, gps_timestamps, sensor_plot_data, sensor_plot_pandas, start_time, end_time] for fn in flight_funcs: fn.invalidate(flight)
Python
0
@@ -1102,13 +1102,8 @@ ndas -, pdb %0A%0A@c @@ -2066,16 +2066,17 @@ yax))%7D%0A%0A +# @cached( @@ -2272,32 +2272,33 @@ ld',flat=True)%0A%0A +# @cached()%0Adef me @@ -2590,16 +2590,18 @@ recorded +() )%0A x= @@ -2650,16 +2650,18 @@ recorded +() :%0A @@ -2790,16 +2790,18 @@ recorded +() , msgTyp
40d2cebdb41bd13040ab747df4f8046d1c597d50
Fix mets dumping
demetsiiify/tasks.py
demetsiiify/tasks.py
import os.path import smtplib import time from collections import deque, OrderedDict from email.message import EmailMessage import lxml.etree as ET import requests import shortuuid from flask import current_app, g, url_for from rq import get_current_job from . import mets from . import iiif from . import make_queues, make_redis from .models import db, Manifest, IIIFImage, Image, Identifier EMAIL_TEMPLATE = """ The IIIF manifests for your METS files are now available for viewing at the following addresses: {} """ def get_redis(): if not hasattr(g, 'redis'): g.redis = make_redis() return g.redis queue, notification_queue, failed_queue = make_queues(get_redis()) def import_mets_job(mets_url): job = get_current_job() try: xml = requests.get(mets_url, allow_redirects=True).content tree = ET.fromstring(xml) doc = mets.MetsDocument(tree, url=mets_url) if current_app.config['DUMP_METS']: xml_path = os.path.join(current_app.config['DUMP_METS'], doc.primary_id + ".xml") with open(xml_path, "w") as fp: fp.write(ET.tostring(tree, pretty_print=True)) times = deque(maxlen=50) start_time = time.time() for idx, total in doc.read_files(jpeg_only=True, yield_progress=True): duration = time.time() - start_time times.append(duration) if job: job.meta.update(dict( current_image=idx, total_images=total, eta=(sum(times)/len(times)) * (total - idx))) job.save() start_time = time.time() if not doc.files: raise mets.MetsImportError( "METS at {} does not reference any JPEG images" .format(mets_url)) doc.read_physical_items() doc.read_toc_entries() doc.read_metadata() iiif_map = OrderedDict() thumbs_map = {} for phys_id, itm in doc.physical_items.items(): image_ident = shortuuid.uuid() largest_image = max(itm.files, key=lambda f: f.height) smallest_image = min(itm.files, key=lambda f: f.height) iiif_info = iiif.make_info_data( image_ident, [(f.width, f.height) for f in itm.files]) db_iiif_img = IIIFImage(iiif_info, id=image_ident) IIIFImage.save(db_iiif_img) for f in itm.files: db_img = Image(f.url, f.width, f.height, f.mimetype, image_ident) Image.save(db_img) iiif_map[phys_id] = (image_ident, itm.label, (largest_image.width, largest_image.height)) thumbs_map[image_ident] = (smallest_image.width, smallest_image.height) existing_manifest = Manifest.by_origin(mets_url) if existing_manifest: manifest_id = existing_manifest.id else: manifest_id = doc.primary_id manifest = iiif.make_manifest(manifest_id, doc, iiif_map, thumbs_map) db_manifest = Manifest(mets_url, manifest, id=manifest_id, label=manifest['label']) db_manifest.identifiers = [ Identifier(id_, type, db_manifest.id) for type, id_ in doc.identifiers.items()] Manifest.save(db_manifest) Identifier.save(*db_manifest.identifiers) # Since the METS might have already been indexed, there's the # possibility that the IIIF images might have changed, leading to # orphaned images. IIIFImage.delete_orphaned() db.session.commit() redis = get_redis() recipients = redis.smembers('recipients.{}'.format(job.id)) for recipient in recipients: redis.srem('notifications.{}.jobs'.format(recipient), job.id) redis.sadd('notifications.{}.manifests'.format(recipient), manifest['@id']) notify_email(recipient) redis.delete('recipients.{}'.format(job.id)) return manifest['@id'] except Exception as e: db.session.rollback() raise e # NOTE: This is not actually a task, but since it depends on the tasks, # it lives in the same module... def notify_email(recipient): redis = get_redis() jobs_key = 'notifications.{}.jobs'.format(recipient) num_outstanding_jobs = redis.scard(jobs_key) if num_outstanding_jobs > 0: return manifests_key = 'notifications.{}.manifests'.format(recipient) manifest_ids = redis.smembers(manifests_key) redis.delete(jobs_key, manifests_key) msg = EmailMessage() msg['Subject'] = 'Your IIIF manifests are ready' msg['From'] = 'notifications@{}'.format(current_app.config['SERVER_NAME']) msg['To'] = recipient msg.set_content(EMAIL_TEMPLATE.format("\n".join( url_for('view.view_endpoint', manifest_id=manifest_id, _external=True) for manifest_id in manifest_ids))) with smtplib.SMTP(current_app.config['SMTP_SERVER']) as s: s.login(current_app.config['SMTP_USER'], current_app.config['SMTP_PASSWORD']) s.send_message(msg, to_addrs=[recipient])
Python
0.000001
@@ -1117,16 +1117,17 @@ path, %22w +b %22) as fp
2fdfb070aaf7f4ae01721853e71dee0932fea2eb
Replace deprecated API tff.learning.build_federarted_averaging_process() with new API.
tensorflow_federated/python/learning/federated_evaluation_test.py
tensorflow_federated/python/learning/federated_evaluation_test.py
# Lint as: python3 # Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import numpy as np import tensorflow as tf from tensorflow_federated.python import core as tff from tensorflow_federated.python.common_libs import test from tensorflow_federated.python.learning import federated_evaluation from tensorflow_federated.python.learning import keras_utils from tensorflow_federated.python.learning import model from tensorflow_federated.python.learning import model_utils class TestModel(model.Model): def __init__(self): self._variables = collections.namedtuple('Vars', 'max_temp num_over')( max_temp=tf.Variable( lambda: tf.zeros(dtype=tf.float32, shape=[]), name='max_temp', trainable=True), num_over=tf.Variable(0.0, name='num_over', trainable=False)) @property def trainable_variables(self): return [self._variables.max_temp] @property def non_trainable_variables(self): return [] @property def local_variables(self): return [self._variables.num_over] @property def input_spec(self): return collections.OrderedDict([('temp', tf.TensorSpec([None], tf.float32))]) @tf.function def forward_pass(self, batch, training=True): assert not training num_over = tf.reduce_sum( tf.cast( tf.greater(batch['temp'], self._variables.max_temp), tf.float32)) self._variables.num_over.assign_add(num_over) loss = tf.constant(0.0) predictions = tf.zeros_like(batch['temp']) return model.BatchOutput( loss=loss, predictions=predictions, num_examples=tf.shape(predictions)[0]) @tf.function def report_local_outputs(self): return collections.OrderedDict([('num_over', self._variables.num_over)]) @property def federated_output_computation(self): return tff.federated_computation( lambda metrics: {'num_over': tff.federated_sum(metrics.num_over)}) class FederatedEvaluationTest(test.TestCase): def test_federated_evaluation(self): evaluate = federated_evaluation.build_federated_evaluation(TestModel) self.assertEqual( str(evaluate.type_signature), '(<<trainable=<float32>,non_trainable=<>>@SERVER,' '{<temp=float32[?]>*}@CLIENTS> -> <num_over=float32@SERVER>)') def _temp_dict(temps): return {'temp': np.array(temps, dtype=np.float32)} result = evaluate( collections.OrderedDict([ ('trainable', [5.0]), ('non_trainable', []), ]), [ [_temp_dict([1.0, 10.0, 2.0, 7.0]), _temp_dict([6.0, 11.0])], [_temp_dict([9.0, 12.0, 13.0])], [_temp_dict([1.0]), _temp_dict([22.0, 23.0])], ]) self.assertEqual(str(result), '<num_over=9.0>') def test_federated_evaluation_with_keras(self): def model_fn(): keras_model = tf.keras.Sequential([ tf.keras.layers.Dense( 1, kernel_initializer='ones', bias_initializer='zeros', activation=None) ], name='my_model') keras_model.compile( loss='mean_squared_error', optimizer='sgd', metrics=[tf.keras.metrics.Accuracy()]) return keras_utils.from_compiled_keras_model( keras_model, dummy_batch=collections.OrderedDict([ ('x', np.zeros((1, 1), np.float32)), ('y', np.zeros((1, 1), np.float32)), ])) evaluate_comp = federated_evaluation.build_federated_evaluation(model_fn) initial_weights = tf.nest.map_structure( lambda x: x.read_value(), model_utils.enhance(model_fn()).weights) def _input_dict(temps): return collections.OrderedDict([ ('x', np.reshape(np.array(temps, dtype=np.float32), (-1, 1))), ('y', np.reshape(np.array(temps, dtype=np.float32), (-1, 1))), ]) result = evaluate_comp( initial_weights, [[_input_dict([1.0, 10.0, 2.0, 7.0]), _input_dict([6.0, 11.0])], [_input_dict([9.0, 12.0, 13.0])], [_input_dict([1.0]), _input_dict([22.0, 23.0])]]) # Expect 100% accuracy and no loss because we've constructed the identity # function and have the same x's and y's for training data. self.assertEqual( str(result), '<accuracy=1.0,loss=0.0,keras_training_time_client_sum_sec=0.0>') if __name__ == '__main__': tf.compat.v1.enable_v2_behavior() test.main()
Python
0.000002
@@ -3491,16 +3491,61 @@ ntial(%5B%0A + tf.keras.layers.Input(shape=(1,)),%0A @@ -3768,148 +3768,8 @@ l')%0A - keras_model.compile(%0A loss='mean_squared_error',%0A optimizer='sgd',%0A metrics=%5Btf.keras.metrics.Accuracy()%5D)%0A @@ -3798,17 +3798,8 @@ rom_ -compiled_ kera @@ -3868,33 +3868,32 @@ ons.OrderedDict( -%5B %0A ( @@ -3891,22 +3891,18 @@ -('x', +x= np.zeros @@ -3913,33 +3913,32 @@ 1), np.float32) -) ,%0A @@ -3937,22 +3937,18 @@ -('y', +y= np.zeros @@ -3967,16 +3967,28 @@ float32) +,%0A ),%0A @@ -3992,18 +3992,104 @@ +loss=tf.keras.losses.MeanSquaredError(),%0A metrics=%5Btf.keras.metrics.Accuracy() %5D -) )%0A%0A e
2b0f4345ff1d4f97f8c00bdad3be035bd5478073
Use a temporary file which exists.
libcloud/test/test_init.py
libcloud/test/test_init.py
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more§ # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import logging try: import paramiko have_paramiko = True except ImportError: have_paramiko = False from mock import patch import libcloud from libcloud import _init_once from libcloud.utils.loggingconnection import LoggingConnection from libcloud.base import DriverTypeNotFoundError from libcloud.test import unittest class TestUtils(unittest.TestCase): def tearDown(self): if 'LIBCLOUD_DEBUG' in os.environ: del os.environ['LIBCLOUD_DEBUG'] def test_init_once_and_debug_mode(self): if have_paramiko: paramiko_logger = logging.getLogger('paramiko') paramiko_logger.setLevel(logging.INFO) # Debug mode is disabled _init_once() self.assertIsNone(LoggingConnection.log) if have_paramiko: paramiko_log_level = paramiko_logger.getEffectiveLevel() self.assertEqual(paramiko_log_level, logging.INFO) # Enable debug mode os.environ['LIBCLOUD_DEBUG'] = '/tmp/foobartest' _init_once() self.assertTrue(LoggingConnection.log is not None) if have_paramiko: paramiko_log_level = paramiko_logger.getEffectiveLevel() self.assertEqual(paramiko_log_level, logging.DEBUG) def test_factory(self): driver = libcloud.get_driver(libcloud.DriverType.COMPUTE, libcloud.DriverType.COMPUTE.EC2) self.assertEqual(driver.__name__, 'EC2NodeDriver') def test_raises_error(self): with self.assertRaises(DriverTypeNotFoundError): libcloud.get_driver('potato', 'potato') @patch.object(libcloud.requests, '__version__', '2.6.0') @patch.object(libcloud.requests.packages.chardet, '__version__', '2.2.1') def test_init_once_detects_bad_yum_install_requests(self, *args): expected_msg = 'Known bad version of requests detected' with self.assertRaisesRegexp(AssertionError, expected_msg): _init_once() @patch.object(libcloud.requests, '__version__', '2.6.0') @patch.object(libcloud.requests.packages.chardet, '__version__', '2.3.0') def test_init_once_correct_chardet_version(self, *args): _init_once() if __name__ == '__main__': sys.exit(unittest.main())
Python
0
@@ -821,16 +821,32 @@ ort sys%0A +import tempfile%0A import l @@ -877,16 +877,24 @@ paramiko + # NOQA %0A hav @@ -1804,16 +1804,57 @@ ug mode%0A + _, tmp_path = tempfile.mkstemp()%0A @@ -1888,25 +1888,16 @@ %5D = -'/ tmp -/foobartest' +_path %0A
a51a089e90719dfda2e6164b0f4c1aec50c26534
Add ordering
entity/migrations/0006_entity_relationship_unique.py
entity/migrations/0006_entity_relationship_unique.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2016-12-12 18:20 from __future__ import unicode_literals from django.db import migrations, connection from django.db.models import Count, Max def disable_triggers(apps, schema_editor): """ Temporarily disable user triggers on the relationship table. We do not want things like entity history to attach onto these migrations as this is a core bug where duplicates should not exist :param apps: :param schema_editor: :return: """ with connection.cursor() as cursor: cursor.execute( """ ALTER TABLE entity_entityrelationship DISABLE TRIGGER USER; """ ) def enable_triggers(apps, schema_editor): """ Re-enable the triggers (if any) :param apps: :param schema_editor: :return: """ with connection.cursor() as cursor: cursor.execute( """ ALTER TABLE entity_entityrelationship ENABLE TRIGGER USER; """ ) def remove_duplicates(apps, schema_editor): """ Remove any duplicates from the entity relationship table :param apps: :param schema_editor: :return: """ # Get the model EntityRelationship = apps.get_model('entity', 'EntityRelationship') # Find the duplicates duplicates = EntityRelationship.objects.all().order_by().values( 'sub_entity_id', 'super_entity_id' ).annotate( Count('sub_entity_id'), Count('super_entity_id'), max_id=Max('id') ).filter( super_entity_id__count__gt=1 ) # Loop over the duplicates and delete for duplicate in duplicates: EntityRelationship.objects.filter( sub_entity_id=duplicate['sub_entity_id'], super_entity_id=duplicate['super_entity_id'] ).exclude( id=duplicate['max_id'] ).delete() class Migration(migrations.Migration): dependencies = [ ('entity', '0005_remove_entitygroup_entities'), ] operations = [ migrations.RunPython(disable_triggers), migrations.RunPython(remove_duplicates), migrations.RunPython(enable_triggers), migrations.AlterUniqueTogether( name='entityrelationship', unique_together=set([('sub_entity', 'super_entity')]), ), ]
Python
0
@@ -1377,16 +1377,72 @@ rder_by( +%0A 'sub_entity_id',%0A 'super_entity_id'%0A ).values @@ -1467,16 +1467,24 @@ ity_id', +%0A 'super_
d3c2eb97d82b74abf3ac44f452a30c54d72a99b2
Bumped version number to 2.0.2alpha1
morfessor/__init__.py
morfessor/__init__.py
#!/usr/bin/env python """ Morfessor 2.0 - Python implementation of the Morfessor method """ import logging __all__ = ['MorfessorException', 'ArgumentException', 'MorfessorIO', 'BaselineModel', 'main', 'get_default_argparser', 'main_evaluation', 'get_evaluation_argparser'] __version__ = '2.0.1' __author__ = 'Sami Virpioja, Peter Smit' __author_email__ = "morfessor@cis.hut.fi" show_progress_bar = True _logger = logging.getLogger(__name__) def get_version(): return __version__ # The public api imports need to be at the end of the file, # so that the package global names are available to the modules # when they are imported. from .baseline import BaselineModel, FixedCorpusWeight, AnnotationCorpusWeight, NumMorphCorpusWeight, MorphLengthCorpusWeight from .cmd import main, get_default_argparser, main_evaluation, \ get_evaluation_argparser from .exception import MorfessorException, ArgumentException from .io import MorfessorIO from .utils import _progress from .evaluation import MorfessorEvaluation, MorfessorEvaluationResult
Python
0.998853
@@ -310,16 +310,22 @@ = '2.0. +2alpha 1'%0A__aut
0adcebf6de030a5f158895b6afe2a952269b35e9
Rewrite generation of compressed file for testing
msumastro/conftest.py
msumastro/conftest.py
from tempfile import mkdtemp import os from shutil import rmtree import gzip from socket import timeout import numpy as np import pytest from astropy.io import fits from astropy.coordinates import SkyCoord, name_resolve from .header_processing.patchers import IRAF_image_type @pytest.fixture def triage_setup(request): n_test = {'files': 0, 'need_object': 0, 'need_filter': 0, 'bias': 0, 'compressed': 0, 'light': 0, 'need_pointing': 0} test_dir = '' for key in n_test.keys(): n_test[key] = 0 test_dir = mkdtemp() original_dir = os.getcwd() os.chdir(test_dir) img = np.uint16(np.arange(100)) no_filter_no_object = fits.PrimaryHDU(img) no_filter_no_object.header['imagetyp'] = IRAF_image_type('light') no_filter_no_object.writeto('no_filter_no_object_light.fit') n_test['files'] += 1 n_test['need_object'] += 1 n_test['need_filter'] += 1 n_test['light'] += 1 n_test['need_pointing'] += 1 no_filter_no_object.header['imagetyp'] = IRAF_image_type('bias') no_filter_no_object.writeto('no_filter_no_object_bias.fit') n_test['files'] += 1 n_test['bias'] += 1 filter_no_object = fits.PrimaryHDU(img) filter_no_object.header['imagetyp'] = IRAF_image_type('light') filter_no_object.header['filter'] = 'R' filter_no_object.writeto('filter_no_object_light.fit') n_test['files'] += 1 n_test['need_object'] += 1 n_test['light'] += 1 n_test['need_pointing'] += 1 filter_no_object.header['imagetyp'] = IRAF_image_type('bias') filter_no_object.writeto('filter_no_object_bias.fit') n_test['files'] += 1 n_test['bias'] += 1 filter_object = fits.PrimaryHDU(img) filter_object.header['imagetyp'] = IRAF_image_type('light') filter_object.header['filter'] = 'R' filter_object.header['OBJCTRA'] = '00:00:00' filter_object.header['OBJCTDEC'] = '00:00:00' filter_object.writeto('filter_object_light.fit') n_test['files'] += 1 n_test['light'] += 1 n_test['need_object'] += 1 filter_file = open('filter_object_light.fit', 'rb') fzipped = gzip.open('filter_object_light.fit.gz', 'wb') fzipped.writelines(filter_file) fzipped.close() n_test['files'] += 1 n_test['compressed'] += 1 n_test['light'] += 1 n_test['need_object'] += 1 filter_object.header['RA'] = filter_object.header['OBJCTRA'] filter_object.header['Dec'] = filter_object.header['OBJCTDEC'] filter_object.writeto('filter_object_RA_keyword_light.fit') n_test['files'] += 1 n_test['light'] += 1 n_test['need_object'] += 1 def teardown(): for key in n_test.keys(): n_test[key] = 0 rmtree(test_dir) os.chdir(original_dir) request.addfinalizer(teardown) class Result(object): def __init__(self, n, directory): self.n_test = n self.test_dir = directory return Result(n_test, test_dir) @pytest.fixture def make_overscan_test_files(request, tmpdir): """ Creates two files, one with overscan, one without for Alta U9 Parameters test_dir: str Directory in which to create the overscan files. Returns info: list (working_dir, has_oscan, has_no_oscan) working_dir: str subdirectory of test_dir in which files are created has_oscan: str Name of FITS file that has overscan region has_no_oscan: str Name of FITS file that has no overscan region """ from .header_processing.feder import ApogeeAltaU9, MaximDL5 from os import path, mkdir import astropy.io.fits as fits import numpy as np test_dir = tmpdir oscan_names = ['yes_scan', 'no_scan'] oscan = {'yes_scan': True, 'no_scan': False} apogee = ApogeeAltaU9() working_dir = 'overscan_test' working_path = test_dir.mkdir(working_dir) add_instrument = lambda hdr: hdr.set('instrume', 'Apogee Alta') name_fits = lambda name: name + '.fit' for name in oscan_names: if oscan[name]: data = np.zeros([apogee.rows, apogee.columns]) has_oscan = name else: data = np.zeros([apogee.rows, apogee.overscan_start]) no_oscan = name hdu = fits.PrimaryHDU(data) hdr = hdu.header add_instrument(hdu.header) mdl5 = MaximDL5() # all headers need a software name hdr[mdl5.fits_keyword] = mdl5.fits_name[0] hdr['imagetyp'] = 'LIGHT' hdu.writeto(path.join(working_path.strpath, name_fits(name))) return (working_path.strpath, name_fits(has_oscan), name_fits(no_oscan)) @pytest.fixture def simbad_down(): simbad_down = False try: SkyCoord.from_name("m101") except (name_resolve.NameResolveError, timeout): simbad_down = True return simbad_down
Python
0
@@ -2072,21 +2072,12 @@ -filter_file = +with ope @@ -2114,22 +2114,30 @@ rb') -%0A fzipped = + as f_in:%0A with gzi @@ -2182,63 +2182,54 @@ wb') -%0A fzipped.writelines(filter_file)%0A fzipped.close( + as f_out:%0A f_out.write(f_in.read() )%0A
930c96267b0cfda52437764703ac18861fb0c7e6
Add all error context to APIClientException instances
mtvc_client/client.py
mtvc_client/client.py
import logging import hammock from requests.auth import AuthBase logger = logging.getLogger(__name__) class APIClientException(Exception): """ Exception class that contains the error code and message from the MTVC """ def __init__(self, error_code=None, error_message=None, **kwargs): self.error_code = error_code self.error_message = error_message def __str__(self): return '[%(error_code)s] %(error_message)s' % (self.__dict__) class APIClientAuthentication(AuthBase): """ Attaches Tastypie-style HTTP ApiKey Authentication to the given Request object. """ def __init__(self, username, key): self.username = username self.key = key def __call__(self, r): r.headers['Authorization'] = 'ApiKey %s:%s' % (self.username, self.key) return r class APIClient(object): def __init__(self, offering_id, host, username, key, port=80, version='v1'): self.api = hammock.Hammock( 'http://%s:%s/api/%s' % (host, port, version), auth=APIClientAuthentication(username, key), append_slash=True) self.offering_id = offering_id def from_json_response(self, response): if response.status_code < 200 or response.status_code >= 300: error_context = { 'status_code': response.status_code, 'status_reason': response.reason, 'error_code': response.status_code, 'error_message': response.reason, 'content': response.content, } try: error_context.update(response.json()) except ValueError: pass logger.error('MTVC Server error %s: %s' % ( response.status_code, error_context)) raise APIClientException(**error_context) try: return response.json() except ValueError: # the server did not return JSON, so just return {} return {} def get_countries(self): return self.from_json_response(self.api.country.GET()) def get_channels(self, **kwargs): params = {'offering__slug': self.offering_id} params.update(kwargs) return self.from_json_response( self.api.channel.GET(params=params)) def get_shows(self, **kwargs): params = {'offering__slug': self.offering_id} params.update(kwargs) return self.from_json_response( self.api.show.GET(params=params)) def get_showchannels(self, **kwargs): params = {'offering__slug': self.offering_id} params.update(kwargs) return self.from_json_response( self.api.showchannel.GET(params=params)) def get_clips(self, **kwargs): params = {'offering__slug': self.offering_id} params.update(kwargs) return self.from_json_response( self.api.clip.GET(params=params)) def get_clip(self, clip_id, **kwargs): params = {'offering__slug': self.offering_id} params.update(kwargs) return self.from_json_response( self.api.clip(clip_id).GET(params=params)) def get_epg(self, channel_id, **kwargs): params = {'days': 1} params.update(kwargs) return self.from_json_response( self.api.channel(channel_id).GET(params=params)) def get_banners(self, **kwargs): params = {'offering__slug': self.offering_id} params.update(kwargs) return self.from_json_response( self.api.banner.GET(params=params)) def get_stream_url( self, content_type, content_id, user_agent, msisdn, client_ip): return self.from_json_response( self.api(content_type)(content_id).play.GET( params={'offering__slug': self.offering_id}, headers={ 'User-Agent': user_agent, 'X-MSISDN': msisdn, 'X-FORWARDED-FOR': client_ip, })) def get_account_info(self, msisdn, client_ip): return self.from_json_response(self.api.subscriber(msisdn).GET()) def get_profile_schema(self): return self.from_json_response(self.api.subscriberprofile.schema.GET( params={'offering__slug': self.offering_id})) def post_profile(self, msisdn, client_ip, data): return self.from_json_response(self.api.subscriberprofile.POST( headers={ 'X-MSISDN': msisdn, 'X-FORWARDED-FOR': client_ip, 'Content-Type': 'application/json'}, params={'offering__slug': self.offering_id}, data=data)) def get_transaction_schema(self): return self.from_json_response( self.api.subscribertransaction.schema.GET( params={'offering__slug': self.offering_id})) def post_transaction(self, user_agent, msisdn, client_ip, data): return self.from_json_response(self.api.subscribertransaction.POST( headers={ 'User-Agent': user_agent, 'X-MSISDN': msisdn, 'X-FORWARDED-FOR': client_ip, 'Content-Type': 'application/json'}, params={'offering__slug': self.offering_id}, data=data))
Python
0.000002
@@ -383,16 +383,53 @@ _message +%0A self.__dict__.update(kwargs) %0A%0A de
f83076f722d66ebc27d66bc13798d4e5bc9cc27a
Fix `TypeError: must use keyword argument for key function` on Python 3
scikits/image/transform/tests/test_hough_transform.py
scikits/image/transform/tests/test_hough_transform.py
import numpy as np from numpy.testing import * import scikits.image.transform as tf import scikits.image.transform.hough_transform as ht from scikits.image.transform import probabilistic_hough def append_desc(func, description): """Append the test function ``func`` and append ``description`` to its name. """ func.description = func.__module__ + '.' + func.func_name + description return func from scikits.image.transform import * def test_hough(): # Generate a test image img = np.zeros((100, 100), dtype=int) for i in range(25, 75): img[100 - i, i] = 1 out, angles, d = tf.hough(img) y, x = np.where(out == out.max()) dist = d[y[0]] theta = angles[x[0]] assert_equal(dist > 70, dist < 72) assert_equal(theta > 0.78, theta < 0.79) def test_hough_angles(): img = np.zeros((10, 10)) img[0, 0] = 1 out, angles, d = tf.hough(img, np.linspace(0, 360, 10)) assert_equal(len(angles), 10) def test_py_hough(): ht._hough, fast_hough = ht._py_hough, ht._hough yield append_desc(test_hough, '_python') yield append_desc(test_hough_angles, '_python') tf._hough = fast_hough def test_probabilistic_hough(): # Generate a test image img = np.zeros((100, 100), dtype=int) for i in range(25, 75): img[100 - i, i] = 100 img[i, i] = 100 # decrease default theta sampling because similar orientations may confuse # as mentioned in article of Galambos et al theta=np.linspace(0, np.pi, 45) lines = probabilistic_hough(img, theta=theta, threshold=10, line_length=10, line_gap=1) # sort the lines according to the x-axis sorted_lines = [] for line in lines: line = list(line) line.sort(lambda x,y: cmp(x[0], y[0])) sorted_lines.append(line) assert([(25, 75), (74, 26)] in sorted_lines) assert([(25, 25), (74, 74)] in sorted_lines) if __name__ == "__main__": run_module_suite()
Python
0.000409
@@ -1746,16 +1746,20 @@ ort( +key= lambda x ,y: @@ -1758,27 +1758,14 @@ da x -,y: cmp(x%5B0%5D, y%5B0%5D) +: x%5B0%5D )%0A
c6f1ab2d33c31201c00435f336c7793b4ff8dd2a
Fix for docutils.error_reporting -> docutils.utils.error_reporting
notebook_sphinxext.py
notebook_sphinxext.py
import sys import os.path import re import time from docutils import io, nodes, statemachine, utils from docutils.error_reporting import ErrorString from docutils.parsers.rst import Directive, convert_directive_function from docutils.parsers.rst import directives, roles, states from docutils.parsers.rst.roles import set_classes from docutils.transforms import misc from nbconvert import ConverterHTML class Notebook(Directive): """ Use nbconvert to insert a notebook into the environment. This is based on the Raw directive in docutils """ required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True option_spec = {} has_content = False def run(self): # check if raw html is supported if not self.state.document.settings.raw_enabled: raise self.warning('"%s" directive disabled.' % self.name) # set up encoding attributes = {'format': 'html'} encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) e_handler = self.state.document.settings.input_encoding_error_handler # get path to notebook source_dir = os.path.dirname( os.path.abspath(self.state.document.current_source)) nb_path = os.path.normpath(os.path.join(source_dir, self.arguments[0])) nb_path = utils.relative_path(None, nb_path) # convert notebook to html converter = ConverterHTML(nb_path) converter.read() # add HTML5 scoped attribute to header style tags header = map(lambda s: s.replace('<style', '<style scoped="scoped"'), converter.header_body()) # concatenate raw html lines lines = ['<div class="ipynotebook">'] lines.extend(header) lines.extend(converter.main_body()) lines.append('</div>') text = '\n'.join(lines) # add dependency self.state.document.settings.record_dependencies.add(nb_path) attributes['source'] = nb_path # create notebook node nb_node = notebook('', text, **attributes) (nb_node.source, nb_node.line) = \ self.state_machine.get_source_and_line(self.lineno) return [nb_node] class notebook(nodes.raw): pass def visit_notebook_node(self, node): self.visit_raw(node) def depart_notebook_node(self, node): self.depart_raw(node) def setup(app): app.add_node(notebook, html=(visit_notebook_node, depart_notebook_node)) app.add_directive('notebook', Notebook)
Python
0
@@ -107,16 +107,22 @@ ocutils. +utils. error_re
62b7b8fc1ed346db99e7c993c205081a12d091f9
remove broken metric
hypergan/trainers/depth_trainer.py
hypergan/trainers/depth_trainer.py
import tensorflow as tf import numpy as np import hyperchamber as hc import inspect from hypergan.trainers.base_trainer import BaseTrainer TINY = 1e-12 class DepthTrainer(BaseTrainer): def create(self): self.hist = [0 for i in range(2)] config = self.config self.global_step = tf.train.get_global_step() self.mix_threshold_reached = False decay_function = config.decay_function variables = self.gan.d_vars() + self.gan.g_vars() self.ema = [ tf.Variable(_v) for _v in variables ] self.store_v = [ _v.assign(_v2) for _v,_v2 in zip(self.ema, variables) ] self.combine = [ _v.assign(config.decay *_ema + (1.-config.decay)*_new) for _v, _ema, _new in zip(variables, self.ema, variables)] self._delegate = self.gan.create_component(config.trainer, d_vars=self.d_vars, g_vars=self.g_vars, loss=self.loss) self._delegate.create() self.slot_vars_g = self._delegate.slot_vars_g self.slot_vars_d = self._delegate.slot_vars_g if self.config.candidate: self.mixg = tf.Variable(1, dtype=tf.float32) self.mixd = tf.Variable(1, dtype=tf.float32) self.gan.add_metric('mixg', self.mixg) self.gan.add_metric('mixd', self.mixd) self.combine_d = [ _v.assign(self.mixd *_ema + (1.-self.mixd)*_new) for _v, _ema, _new in zip(self.gan.d_vars(), self.ema, self.gan.d_vars())] self.combine_g = [ _v.assign(self.mixg *_ema + (1.-self.mixg)*_new) for _v, _ema, _new in zip(self.gan.g_vars(), self.ema[len(self.gan.d_vars()):], self.gan.g_vars())] self.candidate = [ tf.Variable(_v) for _v in variables ] self.store_candidate = [ _v.assign(_v2) for _v,_v2 in zip(self.candidate, variables) ] self.reset_discriminator = [ _v.assign(_v2) for _v,_v2 in zip(self.gan.d_vars(), self.ema) ] self.reset_generator = [ _v.assign(_v2) for _v,_v2 in zip(self.gan.g_vars(), self.ema[len(self.gan.d_vars()):]) ] self.reset_candidate_discriminator = [ _v.assign(_v2) for _v,_v2 in zip(self.gan.d_vars(), self.candidate) ] self.reset_candidate_generator = [ _v.assign(_v2) for _v,_v2 in zip(self.gan.g_vars(), self.candidate[len(self.gan.d_vars()):]) ] self.candidate_loss = self.gan.loss.sample[0] - self.gan.loss.sample[1] def required(self): return "".split() def _step(self, feed_dict): gan = self.gan sess = gan.session config = self.config loss = self.loss gan.session.run(self.store_v) for i in range(config.depth or 2): self._delegate.step(feed_dict) if self.config.candidate: d_fake2b = np.sum([gan.session.run(self.candidate_loss) for i in range(self.config.candidate_tests)]) gan.session.run(self.store_candidate) gan.session.run(self.reset_discriminator) d_fake2a = np.sum([gan.session.run(self.candidate_loss) for i in range(self.config.candidate_tests)]) gan.session.run(self.reset_generator) d_fake1a = np.sum([gan.session.run(self.candidate_loss) for i in range(self.config.candidate_tests)]) gan.session.run(self.reset_candidate_discriminator) d_fake1b = np.sum([gan.session.run(self.candidate_loss) for i in range(self.config.candidate_tests)]) gan.session.run(self.reset_candidate_generator) payoff = [[d_fake1a, d_fake1b],[d_fake2a, d_fake2b]] d1 = d_fake1a + d_fake1b d2 = d_fake2a + d_fake2b g1 = d_fake1a + d_fake2a g2 = d_fake2b + d_fake2b mixd = d2/(d1 + d2) mixg = g1/(g1 + g2) if self.config.reverse: mixd = d1/(d1 + d2) mixg = g2/(g1 + g2) mixd = np.minimum(1.0, mixd) mixd = np.maximum(0.0, mixd) mixg = np.minimum(1.0, mixg) mixg = np.maximum(0.0, mixg) gan.session.run([self.combine_d, self.combine_g], {self.mixd: mixd, self.mixg: mixg}) feed_dict[self.mixd]=mixd feed_dict[self.mixg]=mixg else: gan.session.run(self.combine)
Python
0.00237
@@ -4085,84 +4085,8 @@ g%7D)%0A - feed_dict%5Bself.mixd%5D=mixd%0A feed_dict%5Bself.mixg%5D=mixg%0A
15016615c5406a56468171ab55c0cc60797dc580
Check author is not NoneType, not author.name
reddit2kindle.py
reddit2kindle.py
import os from flask import Flask, request, jsonify from flask.templating import render_template import util import forms app = Flask(__name__) app.secret_key = os.urandom(24) forms.csrf.init_app(app) @app.route('/') def index(): post = forms.Submission() subreddit = forms.Subreddit() return render_template('index.html', post=post, subreddit=subreddit) @app.route('/thread', methods=['POST']) def thread(): if util.validate_request_post(request.form) is not None: return jsonify(type='danger', text=util.validate_request_post(request.form)) try: submission = util.r.get_submission(url=request.form['submission']) except: return jsonify(type='danger', text='That wasn\'t a reddit link, was it?') comments = None if request.form['comments'] == 'true': submission.replace_more_comments(limit=0) comments = util.get_comments(submission) if submission.selftext == '': body = util.get_readability(submission.url) else: body = util.markdown(submission.selftext, output_format='html5') title = submission.title author = "[deleted]" if submission.author.name is not None: author = submission.author.name address = request.form['email'] kindle_address = request.form['kindle_address'] attachment = render_template('comments.html', title=title, body=body, author=author, comments=comments) status = util.send_email(address, kindle_address, attachment, title) if status is None: return jsonify(type='success', text='Success!') else: return jsonify(type='warning', text='Uh oh! Something went wrong on our end') @app.route('/subreddit', methods=['POST']) def convert(): if util.validate_request_subreddit(request.form) is not None: return jsonify(type='danger', text=util.validate_request_subreddit(request.form)) subreddit = request.form['subreddit'] time = request.form['time'] limit = int(request.form['limit']) address = request.form['email'] kindle_address = request.form['kindle_address'] try: posts = util.get_posts(subreddit, time, limit) if time == 'all': title = 'Top ' + str(limit) + ' posts from /r/' + subreddit + ' ever' else: title = 'Top ' + str(limit) + ' posts from /r/' + subreddit + ' over the past ' + time top = [] for post in posts: try: top.append({'title': post.title, 'body': util.get_readability(post.url) if post.selftext == '' else util.markdown(post.selftext), 'author': '[deleted]' if post.author is None else post.author.name }) except: pass except: return jsonify(type='danger', text='That ain\'t no subreddit I\'ve ever heard of!') attachment = render_template('posts.html', posts=top) status = util.send_email(address, kindle_address, attachment, title) if status is None: return jsonify(type='success', text='Success!') else: return jsonify(type='warning', text='Uh oh! Something went wrong on our end') if __name__ == '__main__': app.run(debug=True)
Python
0.021749
@@ -1158,21 +1158,16 @@ n.author -.name is not
f3cada11b253ceea129342040f7e3d75f4f0cf15
use assertions with the form elements in test_new instead of the regex on the html content
test_notes.py
test_notes.py
from webtest import TestApp import os import re import notes import dbaccessor DB = 'notes.db' class TestWebserver(): def test_index(self): dba = dbaccessor.DbAccessor(DB) dba.addNote('eins', 'lorem ipsum') dba.addNote('zwei', 'blabla') bottle = TestApp(notes.app) result = bottle.get('/') assert result.status == '200 OK' match = re.search(r'<td>blabla</td>\s*</tr>', result.body) assert match def test_new(self): bottle = TestApp(notes.app) result = bottle.get('/new') assert result.status == '200 OK' match = re.search(r'<input type="text" size="100" maxlength="100" name="content">', result.body) assert match def test_adding_new_note(self): bottle = TestApp(notes.app) result = bottle.get('/new') form = result.form form['title'] = "testtitle" form['content'] = "testcontent" result = form.submit('save') assert result.status == '200 OK' def tearDown(self): if os.path.isfile(DB): os.remove(DB)
Python
0
@@ -611,125 +611,183 @@ -match = re.search(r'%3Cinput type=%22text%22 size=%22100%22 maxlength=%22100%22 name=%22content%22%3E', result.body)%0A assert match +form = result.form%0A assert form.action == '/new'%0A assert form.method == 'GET'%0A assert form%5B'title'%5D.value == ''%0A assert form%5B'content'%5D.value == '' %0A%0A
77e3f0da9bec64c2bf0f34faec735a29f1a74284
remove test for Google+
tests/test.py
tests/test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from you_get import * from you_get.__main__ import url_to_module def test_urls(urls): for url in urls: url_to_module(url).download(url, info_only = True) class YouGetTests(unittest.TestCase): def test_freesound(self): test_urls([ "http://www.freesound.org/people/Corsica_S/sounds/184419/", ]) def test_googleplus(self): test_urls([ "http://plus.google.com/102663035987142737445/posts/jJRu43KQFT5", "http://plus.google.com/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/posts/jJRu43KQFT5", "http://plus.google.com/+平田梨奈/posts/jJRu43KQFT5", "http://plus.google.com/photos/102663035987142737445/albums/5844078581209509505/5844078587839097874", "http://plus.google.com/photos/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/albums/5844078581209509505/5844078587839097874", "http://plus.google.com/photos/+平田梨奈/albums/5844078581209509505/5844078587839097874", ]) def test_jpopsuki(self): test_urls([ "http://jpopsuki.tv/video/Dragon-Ash---Run-to-the-Sun/8ad7aec604badd0b0798cd999b63ae17", ]) def test_mixcloud(self): test_urls([ "http://www.mixcloud.com/beatbopz/beat-bopz-disco-mix/", "http://www.mixcloud.com/beatbopz/tokyo-taste-vol4/", "http://www.mixcloud.com/DJVadim/north-america-are-you-ready/", ]) def test_vimeo(self): test_urls([ "http://vimeo.com/56810854", ]) def test_xiami(self): test_urls([ "http://www.xiami.com/song/1769835121", ]) def test_youtube(self): test_urls([ "http://www.youtube.com/watch?v=pzKerr0JIPA", "http://youtu.be/pzKerr0JIPA", ])
Python
0.00001
@@ -414,655 +414,8 @@ %0A - def test_googleplus(self):%0A test_urls(%5B%0A %22http://plus.google.com/102663035987142737445/posts/jJRu43KQFT5%22,%0A %22http://plus.google.com/+%25E5%25B9%25B3%25E7%2594%25B0%25E6%25A2%25A8%25E5%25A5%2588/posts/jJRu43KQFT5%22,%0A %22http://plus.google.com/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/posts/jJRu43KQFT5%22,%0A %22http://plus.google.com/photos/102663035987142737445/albums/5844078581209509505/5844078587839097874%22,%0A %22http://plus.google.com/photos/+%25E5%25B9%25B3%25E7%2594%25B0%25E6%25A2%25A8%25E5%25A5%2588/albums/5844078581209509505/5844078587839097874%22,%0A %22http://plus.google.com/photos/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/albums/5844078581209509505/5844078587839097874%22,%0A %5D)%0A %0A
bbbd535ecabc6017aec6a3549c917d26036aff3b
Remove checks for testGotTrace unit test until trace event importer is implemented.
tools/telemetry/telemetry/core/chrome/tracing_backend_unittest.py
tools/telemetry/telemetry/core/chrome/tracing_backend_unittest.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import cStringIO import json import logging import os import unittest from telemetry.core import util from telemetry.core.chrome import tracing_backend from telemetry.test import tab_test_case class TracingBackendTest(tab_test_case.TabTestCase): def _StartServer(self): base_dir = os.path.dirname(__file__) self._browser.SetHTTPServerDirectories( os.path.join(base_dir, '..', '..', '..', 'unittest_data')) def _WaitForAnimationFrame(self): def _IsDone(): js_is_done = """done""" return bool(self._tab.EvaluateJavaScript(js_is_done)) util.WaitFor(_IsDone, 5) def testGotTrace(self): if not self._browser.supports_tracing: logging.warning('Browser does not support tracing, skipping test.') return self._StartServer() self._browser.StartTracing() self._browser.StopTracing() model = self._browser.GetTraceResultAndReset().AsTimelineModel() events = model.GetAllEvents() assert len(events) > 0 class TracingResultImplTest(unittest.TestCase): def testWrite1(self): ri = tracing_backend.TraceResultImpl([]) f = cStringIO.StringIO() ri.Serialize(f) v = f.getvalue() j = json.loads(v) assert 'traceEvents' in j self.assertEquals(j['traceEvents'], []) def testWrite2(self): ri = tracing_backend.TraceResultImpl([ '"foo"', '"bar"']) f = cStringIO.StringIO() ri.Serialize(f) v = f.getvalue() j = json.loads(v) assert 'traceEvents' in j self.assertEquals(j['traceEvents'], ['foo', 'bar']) def testWrite3(self): ri = tracing_backend.TraceResultImpl([ '"foo"', '"bar"', '"baz"']) f = cStringIO.StringIO() ri.Serialize(f) v = f.getvalue() j = json.loads(v) assert 'traceEvents' in j self.assertEquals(j['traceEvents'], ['foo', 'bar', 'baz'])
Python
0.000002
@@ -1012,137 +1012,123 @@ g()%0A +%0A -model = self._browser.GetTraceResultAndReset().AsTimelineModel()%0A events = model.GetAllEvents()%0A assert len(events) %3E 0 +# TODO(tengs): check model for correctness after trace_event_importer%0A # is implemented (crbug.com/173327). %0A%0Acl
a440cef4140a4225fc093c9143d7cfd1a0c4e917
Update metric through API
biggraphite/cli/web/namespaces/biggraphite.py
biggraphite/cli/web/namespaces/biggraphite.py
#!/usr/bin/env python # Copyright 2018 Criteo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BigGraphite API.""" from __future__ import absolute_import import flask_restplus as rp from biggraphite.cli.web import context # TODO: # - Add the equivalent of what the accessor provides # - Add the ability to get/set points. api = rp.Namespace("biggraphite", description="BigGraphite API") metric = api.model( "Metric", { "id": rp.fields.String(readOnly=True, description="The metric identifier"), "name": rp.fields.String(description="The metric name"), "metadata": rp.fields.Raw(description="The metric metadata"), "created_on": rp.fields.DateTime(), "updated_on": rp.fields.DateTime(), "read_on": rp.fields.DateTime(), }, ) @api.route("/metric/<string:name>") @api.doc("Operations on metrics.") @api.param("name", "The metric name") class MetricResource(rp.Resource): """A Metric.""" @api.doc("Get a metric by name.") @api.marshal_with(metric) def get(self, name): """Get a metric.""" m = context.accessor.get_metric(name) if not m: rp.abort(404) return m.as_string_dict()
Python
0
@@ -649,16 +649,42 @@ import%0A%0A +from flask import request%0A import f @@ -704,16 +704,60 @@ as rp%0A%0A +from biggraphite import metric as bg_metric%0A from big @@ -956,16 +956,329 @@ API%22)%0A%0A +metric_metadata = api.model(%0A %22MetricMetadata%22,%0A %7B%0A %22aggregator%22: rp.fields.String(description=%22The metric aggregator%22),%0A %22retention%22: rp.fields.String(description=%22The metric retention%22),%0A %22carbon_xfilesfactor%22: rp.fields.Float(description=%22The metric carbon xfiles factor%22),%0A %7D%0A)%0A%0A metric = @@ -1492,12 +1492,32 @@ lds. -Raw( +Nested(metric_metadata, desc @@ -2064,16 +2064,16 @@ rt(404)%0A - @@ -2098,8 +2098,625 @@ _dict()%0A +%0A @api.doc(%22Update a metric.%22)%0A @api.expect(metric_metadata)%0A def post(self, name):%0A %22%22%22Update a metric.%22%22%22%0A if not context.accessor.has_metric(name):%0A return %22Unknown metric: '%25s'%22 %25 name, 404%0A payload = request.json%0A metadata = bg_metric.MetricMetadata(%0A aggregator=bg_metric.Aggregator.from_config_name(payload%5B%22aggregator%22%5D),%0A retention=bg_metric.Retention.from_string(payload%5B%22retention%22%5D),%0A carbon_xfilesfactor=payload%5B%22carbon_xfilesfactor%22%5D%0A )%0A context.accessor.update_metric(name, metadata)%0A return '', 204%0A
4eb34493583a525da52d3464e8c8f6c07dfe0e84
update setup
src/ensae_teaching_cs/automation/win_setup_helper.py
src/ensae_teaching_cs/automation/win_setup_helper.py
""" @file @brief Customize a Windows Setup for these teachings """ import os import shutil from pyquickhelper import remove_folder def last_function(innosetup, folders, verbose=False, fLOG=print): """ applies last modifications to the setup @param innosetup innosetup script which defines the setup @param folders dictionary with keys *workspace*, *python*, *tools*, *build*, *docs* @param verbose verbose @param fLOG logging function """ from pymyinstall import unzip_files from pymyinstall.win_installer.win_setup_r import r_run_script work = folders["workspace"] python = folders["python"] python = folders["tools"] build = os.path.join(folders["build"], "custom_ensae_teaching_cs") docs = os.path.join(work, "docs") logs = folders["logs"] this = os.path.abspath(os.path.dirname(__file__)) doc_annee = [os.path.join(this, "..", "..", "..", "_doc", "notebooks", "td1a"), os.path.join( this, "..", "..", "..", "_doc", "notebooks", "td2a"), os.path.join( this, "..", "..", "..", "_doc", "notebooks", "td3a"), ] if not os.path.exists(build): os.mkdir(build) # folders fLOG("folders:", folders) fLOG("innosetup:", innosetup) # docs fLOG("--- cleaning creating folder", docs) if os.path.exists(docs): remove_folder(docs) os.mkdir(docs) # R_install fLOG("--- R install") r_script = os.path.join(os.path.dirname(__file__), "R_install.r") if os.path.exists(r_script): r_run_script(os.path.join(tools, "R"), dest, os.path.join( logs, "r_ensae_teaching_cs.install.log.txt")) # documentation fLOG("--- documentation, TDs +++") for ipy in os.listdir(dist): if ipy.endswith("ipynb"): if verbose: fLOG("copy ", ipy) full = os.path.join(dist, ipy) shutil.copy(full, docs) # others packages not from Microsoft from pymyinstall import ModuleInstall from pymyinstall.win_installer.win_packages import win_install_package_other_python, is_package_installed # modules modules = [ModuleInstall("pyquickhelper", "pip"), ModuleInstall("pyensae", "pip"), ModuleInstall("pyrsslocal", "pip"), ModuleInstall("code_beatrix", "pip"), ModuleInstall("pymmails", "pip"), ModuleInstall("pymyinstall", "pip"), ModuleInstall("ensae_teaching_cs", "pip"), ModuleInstall("actuariat_python", "pip"), ] # new packages fLOG("--- download new packages") pack = [] for mod in modules: mname = mod.mname if mod.mname is not None else mod.name if not is_package_installed(python, [mod.name, mname]): fLOG("download:", mod.name) p = mod.download(temp_folder=build) pack.append((p, mod)) # install packages fLOG("--- install packages") for p, mod in pack: fLOG("install", os.path.split(p)[-1]) win_install_package_other_python(python, p, verbose=verbose, fLOG=fLOG) # remove unnecessary folders fLOG("--- remove too big folfers") to_remove = [] for rem in to_remove: sub = os.path.join(python, rem) if os.path.exists(sub): fLOG("remove", sub) remove_folder(sub) # modifies the setup fLOG("--- modifies the setup") with open(innosetup, "r", encoding="utf8") as f: content = f.read() with open(innosetup, "w", encoding="utf8") as f: f.write(content)
Python
0.000001
@@ -693,30 +693,29 @@ ython%22%5D%0A -python +tools = folders%5B%22 @@ -1783,16 +1783,182 @@ s +++%22)%0A + for dist in doc_annee:%0A end = os.path.split(dist)%5B-1%5D%0A to = os.path.join(docs, end)%0A if not os.path.exists(to):%0A os.mkdir(to)%0A for @@ -1990,16 +1990,20 @@ + + if ipy.e @@ -2032,16 +2032,20 @@ + + if verbo @@ -2048,16 +2048,20 @@ erbose:%0A + @@ -2099,16 +2099,20 @@ + + full = o @@ -2146,16 +2146,20 @@ + shutil.c @@ -2168,20 +2168,18 @@ y(full, -docs +to )%0A%0A #
0c1b706c2804cdc4324b4b0b7d960d6ba07268ea
Remove debug print from discordbot utilspy
discordbot/titanembeds/utils.py
discordbot/titanembeds/utils.py
import discord import time from email import utils as emailutils def format_datetime(datetimeobj): return emailutils.formatdate(time.mktime(datetimeobj.timetuple())) # https://stackoverflow.com/questions/3453177/convert-python-datetime-to-rfc-2822 def get_formatted_message(message): edit_ts = message.edited_at if not edit_ts: edit_ts = None else: edit_ts = format_datetime(edit_ts) msg = { "id": str(message.id), "channel_id": str(message.channel.id), "content": message.content, "author": get_message_author(message), "timestamp": format_datetime(message.created_at), "edited_timestamp": edit_ts, } if hasattr(message, "mentions"): msg["mentions"] = get_message_mentions(message.mentions) if hasattr(message, "attachments"): msg["attachments"] = get_attachments_list(message.attachments) if hasattr(message, "embeds"): msg["embeds"] = get_embeds_list(message.embeds) if hasattr(message, "author"): nickname = None if hasattr(message.author, 'nick') and message.author.nick: nickname = message.author.nick msg["author"]["nickname"] = nickname if hasattr(message, "mentions"): for mention in msg["mentions"]: mention["nickname"] = None member = message.guild.get_member(mention["id"]) if member: mention["nickname"] = member.nick return msg def get_formatted_user(user): userobj = { "avatar": user.avatar, "avatar_url": user.avatar_url, "color": str(user.color)[1:], "discriminator": user.discriminator, "game": None, "hoist-role": None, "id": str(user.id), "status": str(user.status), "username": user.name, "nick": None, "bot": user.bot, "roles": [] } if userobj["color"] == "000000": userobj["color"] = None # if userobj["avatar_url"][len(userobj["avatar_url"])-15:] != ".jpg": # userobj["avatar_url"] = userobj["avatar_url"][:len(userobj["avatar_url"])-14] + ".jpg" if user.nick: userobj["nick"] = user.nick if hasattr(user, "activity") and user.activity: userobj["activity"] = { "name": user.activity.name } roles = sorted(user.roles, key=lambda k: k.position, reverse=True) for role in roles: userobj["roles"].append(str(role.id)) print(role, role.hoist) if role.hoist and userobj["hoist-role"] == None: userobj["hoist-role"] = { "id": str(role.id), "name": role.name, "position": role.position, } return userobj def get_message_author(message): if not hasattr(message, "author"): return {} author = message.author obj = { "username": author.name, "discriminator": author.discriminator, "bot": author.bot, "id": str(author.id), "avatar": author.avatar } return obj def get_formatted_emojis(emojis): emotes = [] for emo in emojis: emotes.append({ "id": str(emo.id), "managed": emo.managed, "name": emo.name, "require_colons": emo.require_colons, "roles": get_roles_list(emo.roles), "url": emo.url, }) return emotes def get_formatted_guild(guild): guil = { "id": str(guild.id), "name": guild.name, "icon": guild.icon, "icon_url": guild.icon_url, } return guil def get_formatted_channel(channel): chan = { "id": str(channel.id), "guild_id": str(channel.guild.id), } return chan def get_formatted_role(role): rol = { "id": str(role.id), "guild_id": str(role.guild.id), "name": role.name, "color": role.color.value, "hoist": role.hoist, "position": role.position, "permissions": role.permissions.value, } return rol def get_message_mentions(mentions): ments = [] for author in mentions: ments.append({ "username": author.name, "discriminator": author.discriminator, "bot": author.bot, "id": str(author.id), "avatar": author.avatar }) return ments def get_webhooks_list(guild_webhooks): webhooks = [] for webhook in guild_webhooks: webhooks.append({ "id": str(webhook.id), "guild_id": str(webhook.guild.id), "channel_id": str(webhook.channel.id), "name": webhook.name, "token": webhook.token, }) return webhooks def get_emojis_list(guildemojis): emojis = [] for emote in guildemojis: emojis.append({ "id": str(emote.id), "name": emote.name, "require_colons": emote.require_colons, "managed": emote.managed, "roles": list_role_ids(emote.roles), "url": emote.url, "animated": emote.animated }) return emojis def get_roles_list(guildroles): roles = [] for role in guildroles: roles.append({ "id": str(role.id), "name": role.name, "color": role.color.value, "hoist": role.hoist, "position": role.position, "permissions": role.permissions.value }) return roles def get_channels_list(guildchannels): channels = [] for channel in guildchannels: if isinstance(channel, discord.channel.TextChannel) or isinstance(channel, discord.channel.CategoryChannel): overwrites = [] isTextChannel = isinstance(channel, discord.channel.TextChannel) for target, overwrite in channel.overwrites: if isinstance(target, discord.Role): type = "role" else: type = "member" allow, deny = overwrite.pair() allow = allow.value deny = deny.value overwrites.append({ "id": str(target.id), "type": type, "allow": allow, "deny": deny, }) parent = channel.category if parent: parent = str(parent.id) channels.append({ "id": str(channel.id), "name": channel.name, "topic": channel.topic if isTextChannel else None, "position": channel.position, "type": "text" if isTextChannel else "category", "permission_overwrites": overwrites, "parent_id": parent, "nsfw": channel.is_nsfw(), }) return channels def list_role_ids(usr_roles): ids = [] for role in usr_roles: ids.append(str(role.id)) return ids def get_attachments_list(attachments): attr = [] for attach in attachments: a = { "id": str(attach.id), "size": attach.size, "filename": attach.filename, "url": attach.url, "proxy_url": attach.proxy_url, } if attach.height: a["height"] = attach.height if attach.width: a["width"] = attach.width attr.append(a) return attr def get_embeds_list(embeds): em = [] for e in embeds: em.append(e.to_dict()) return em
Python
0.000001
@@ -2458,40 +2458,8 @@ d))%0A - print(role, role.hoist)%0A
769b4d6d7fb04e095df54d85ab3dcd577298ece7
fix customobject filtering
distance_scripts/filterlevel.py
distance_scripts/filterlevel.py
"""Filter objects from a level or CustomObject.""" import os import sys import argparse import re from io import BytesIO from distance.level import Level from distance.levelobjects import PROBER as LEVEL_PROBER from distance.bytes import ( DstBytes, MAGIC_2, MAGIC_3, MAGIC_32, MAGIC_9, Section, ) from distance.printing import PrintContext from distance.prober import BytesProber PROBER = BytesProber() @PROBER.func def _detect_other(section): if section.magic == MAGIC_9: return Level return None PROBER.extend(LEVEL_PROBER) MAGICMAP = {2: MAGIC_2, 3: MAGIC_3, 32: MAGIC_32} def parse_section(arg): parts = arg.split(",") magic = MAGICMAP[int(parts[0])] return Section(magic, *(int(p, base=0) for p in parts[1:])) class ObjectMatcher(object): def __init__(self, args): self.all = args.all self.objnum = args.objnum self.type_patterns = [re.compile(r) for r in args.type] self.maxrecurse = args.maxrecurse self.num_matches = 0 self.matches = [] self.sections = {parse_section(arg).to_key() for arg in args.section} def _match_sections(self, obj): for sec in obj.sections: if sec.to_key() in self.sections: return True for child in obj.children: if self._match_sections(child): return True return False def match_props(self, obj): if not self.type_patterns and not self.sections: return True if self.type_patterns: typename = obj.type if any(r.search(typename) for r in self.type_patterns): return True if self.sections: if not obj.is_object_group and self._match_sections(obj): return True return False def match(self, obj): if self.match_props(obj): num = self.num_matches self.num_matches = num + 1 self.matches.append(obj) if self.all: return True if num in self.objnum: return True return False def _filter_objects(self, objs, recurse): result = [] for obj in objs: if self.match(obj): continue result.append(obj) if obj.is_object_group and recurse != 0: obj.children = self._filter_objects(obj.children, recurse - 1) return result def filter_objects(self, objs): return self._filter_objects(objs, self.maxrecurse) def filter_level(self, level): for layer in level.layers: layer.objects = self.filter_objects(layer.objects) return level def count_objects(objs): n_obj = 0 n_grp = 0 for obj in objs: n_obj += 1 if obj.is_object_group: n_grp += 1 res = count_objects(obj.children) n_obj += res[0] n_grp += res[1] return n_obj, n_grp def main(): parser = argparse.ArgumentParser( description=__doc__) parser.add_argument("-f", "--force", action='store_true', help="Allow overwriting OUT file.") parser.add_argument("-n", "--objnum", action='append', type=int, default=[], help="Select by candidate number.") parser.add_argument("-a", "--all", action='store_true', help="Filter out all matching objects.") parser.add_argument("-l", "--maxrecurse", type=int, default=-1, help="Maximum of recursions. 0 only lists layer objects.") parser.add_argument("-t", "--type", action='append', default=[], help="Match object type (regex).") parser.add_argument("-s", "--section", action='append', default=[], help="Match sections.") parser.add_argument("IN", help="Level .bytes filename.") parser.add_argument("OUT", help="output .bytes filename.") args = parser.parse_args() do_write = args.all or bool(args.objnum) if do_write: write_mode = 'xb' if args.force: write_mode = 'wb' if not args.force and os.path.exists(args.OUT): print(f"file {args.OUT} exists. pass -f to force.", file=sys.stderr) return 1 with open(args.IN, 'rb') as in_f: content = PROBER.read(DstBytes(in_f)) matcher = ObjectMatcher(args) if isinstance(content, Level): result = matcher.filter_level(content) else: if not content.is_object_group: print(f"CustomObject is a {content.type!r}, but" f" CustomObject filtering is only supported for" f" object Groups.", file=sys.stderr) return 1 content.children = matcher.filter_objects(content.children) if not do_write: from .mkcustomobject import print_candidates print_candidates(matcher.matches) return 1 p = PrintContext(file=sys.stdout, flags=('groups', 'subobjects')) p.print_data_of(result) num_objs, num_groups = count_objects(matcher.matches) p(f"Removed matches: {len(matcher.matches)}") if num_objs != len(matcher.matches): p(f"Removed objects: {num_objs}") p(f"Removed groups: {num_groups}") buf = BytesIO() dbytes = DstBytes(buf) result.write(dbytes) with open(args.OUT, write_mode) as out_f: out_f.write(buf.getbuffer()) print(f"{len(buf.getbuffer())} bytes written") return 0 if __name__ == '__main__': exit(main()) # vim:set sw=4 ts=8 sts=4 et sr ft=python fdm=marker tw=0:
Python
0.000003
@@ -188,30 +188,50 @@ ort -PROBER as LEVEL_PROBER +Group%0Afrom distance.base import BaseObject %0Afro @@ -436,16 +436,49 @@ ber()%0A%0A%0A +PROBER.add_type('Group', Group)%0A%0A @PROBER. @@ -579,42 +579,18 @@ urn -None%0A%0A%0APROBER.extend(LEVEL_PROBER) +BaseObject %0A%0A%0AM @@ -4580,29 +4580,30 @@ -resul +conten t = matcher. @@ -5223,21 +5223,22 @@ data_of( -resul +conten t)%0A @@ -5550,21 +5550,22 @@ -resul +conten t.write(
1dbb1e0f8751f37271178665a727c4eefc49a88c
Remove subclassing of exception, since there is only one.
partner_firstname/exceptions.py
partner_firstname/exceptions.py
# -*- encoding: utf-8 -*- # Odoo, Open Source Management Solution # Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from openerp import _, exceptions class PartnerNameError(exceptions.ValidationError): def __init__(self, record, value=None): self.record = record self._value = value self.name = _("Error(s) with partner %d's name.") % record.id @property def value(self): raise NotImplementedError() class EmptyNames(PartnerNameError): @property def value(self): return _("No name is set.")
Python
0
@@ -817,32 +817,26 @@ %0A%0Aclass -PartnerNameError +EmptyNames (excepti @@ -898,12 +898,28 @@ lue= -None +_(%22No name is set.%22) ):%0A @@ -1048,185 +1048,4 @@ .id%0A -%0A @property%0A def value(self):%0A raise NotImplementedError()%0A%0A%0Aclass EmptyNames(PartnerNameError):%0A @property%0A def value(self):%0A return _(%22No name is set.%22)%0A
34ede8137f28db1836f2df2141d924f092d7372b
Remove debugging print statement
piecewise/piecewise/__main__.py
piecewise/piecewise/__main__.py
import argparse from sqlalchemy import create_engine, MetaData import piecewise.aggregate import piecewise.config import piecewise.ingest import piecewise.query def refine(config, args): modified_aggregations = [] for agg in config.aggregations: if args.only_compute is not None and not agg.name in args.only_compute: continue modified_bins = [] for b in agg.bins: if args.only_bins is not None and not b.label in args.only_bins: continue modified_bins.append(b) modified_stats = [] for s in agg.statistics: if args.only_statistics is not None and not s.label in args.only_statistics: continue modified_stats.append(s) modified_agg = piecewise.aggregate.Aggregation( name = agg.name, statistics_table_name = agg.statistics_table_name, bins = modified_bins, statistics = modified_stats) modified_aggregations.append(modified_agg) return piecewise.aggregate.Aggregator( database_uri = config.database_uri, cache_table_name = config.cache_table_name, filters = config.filters, aggregations = modified_aggregations) def do_ingest(args): config = piecewise.config.read_system_config() config = refine(config, args) if not args.debug: piecewise.ingest.ingest(config) else: print "Displaying bigquery SQL instead of performing query" print config.ingest_bigquery_query() def do_aggregate(args): config = piecewise.config.read_system_config() config = refine(config, args) if not args.debug: piecewise.aggregate.aggregate(config) else: print "Displaying Postgres SQL instead of performing query" piecewise.aggregate.aggregate(config, args.debug) def do_query(args): from piecewise.aggregate import AverageRTT config = piecewise.config.read_system_config() config = refine(config, args) aggregation = None for agg in config.aggregations: if agg.name == args.aggregation: aggregation = agg if args.stats is not None: statistics = [piecewise.config.known_statistics[s] for s in args.stats] else: statistics = aggregation.statistics if args.bins is not None: bins = args.bins else: bins = dict() if args.filters is not None: filters = args.filters else: filters = dict() if not args.debug: results = piecewise.query.query(config, name, statistics, bins, filters) for row in results: print row else: print args engine = create_engine(config.database_uri) metadata = MetaData() engine.metadata = metadata selection = aggregation.selection(engine, metadata, bins, filters, statistics) print selection.compile(engine) def do_load(args): do_ingest(args) do_aggregate(args) def do_display_config(args): config = piecewise.config.read_system_config() config = refine(config, args) print 'Postgres connection: {}'.format(config.database_uri) print 'Results cache table: {}'.format(config.cache_table_name) print 'Filters:' for filt in config.filters: print '\t{}'.format(filt) print print 'Aggregations:' for agg in config.aggregations: print '\t{}'.format(agg.name) print '\t* Bin dimensions' for b in agg.bins: print '\t\t{}: {}'.format(b.label, b) print '\t* Aggregate statistics' for s in agg.statistics: print '\t\t{}'.format(s) def add_ingest_args(parser): pass def add_aggregate_args(parser): pass def split_string(string): return string.split(',') def colon_dict(string): pairs = string.split(',') def as_pair(s): if ':' in s: return tuple(s.split(':', 1)) else: return (s, '') return dict(as_pair(p) for p in pairs) if __name__ == '__main__': parser = argparse.ArgumentParser(prog="piecewise", description="Download and aggregate m-lab internet performance data") parser.add_argument("--debug", action='store_true', help = 'Display rather than execute queries') parser.add_argument("--only-compute", type=split_string, help='Use only the named aggregations for this run') parser.add_argument("--only-bins", type=split_string, help='Use only the named bin dimensions for this run') parser.add_argument("--only-statistics", type=split_string, help='Use only the named statistics for this run') subparsers = parser.add_subparsers(help="Operation") ingest_parser = subparsers.add_parser('ingest', help='Pull data from BigQuery into postgres database') add_ingest_args(ingest_parser) ingest_parser.set_defaults(func=do_ingest) aggregate_parser = subparsers.add_parser('aggregate', help='Compute statistics from ingested internet performance data') add_aggregate_args(aggregate_parser) aggregate_parser.set_defaults(func=do_aggregate) display_config_parser = subparsers.add_parser("display-config", help='Display parsed configuration') display_config_parser.set_defaults(func=do_display_config) query_parser = subparsers.add_parser("query", help='Query statistics tables') query_parser.add_argument("-b", "--bins", help="Select and configure bins for query", type=colon_dict) query_parser.add_argument("-s", "--stats", help="Select statistics for query", type=split_string) query_parser.add_argument("-f", "--filters", help="Select and configure filters for query", type=colon_dict) query_parser.add_argument("aggregation", help="Select aggregation for query") query_parser.set_defaults(func=do_query) load_parser = subparsers.add_parser('load', help='Ingest and aggregate data in one run') add_ingest_args(load_parser) add_aggregate_args(load_parser) load_parser.set_defaults(func=do_load) args = parser.parse_args() args.func(args)
Python
0.000012
@@ -2696,27 +2696,8 @@ se:%0A - print args%0A
017889913a1dba443022ee032535bdc4cb40ddb6
Make nodepool git repo caching more robust
modules/openstack_project/files/nodepool/scripts/cache_git_repos.py
modules/openstack_project/files/nodepool/scripts/cache_git_repos.py
#!/usr/bin/env python # Copyright (C) 2011-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # # See the License for the specific language governing permissions and # limitations under the License. import re import urllib2 from common import run_local URL = ('http://git.openstack.org/cgit/openstack-infra/config/plain/' 'modules/openstack_project/files/review.projects.yaml') PROJECT_RE = re.compile('^-?\s+project:\s+(.*)$') def main(): # TODO(jeblair): use gerrit rest api when available data = urllib2.urlopen(URL).read() for line in data.split('\n'): # We're regex-parsing YAML so that we don't have to depend on the # YAML module which is not in the stdlib. m = PROJECT_RE.match(line) if m: project = 'git://git.openstack.org/%s' % m.group(1) print run_local(['git', 'clone', project, m.group(1)], cwd='/opt/git') if __name__ == '__main__': main()
Python
0.000122
@@ -620,18 +620,47 @@ %0Aimport -re +os.path%0Aimport re%0Aimport shutil %0Aimport @@ -882,16 +882,1158 @@ *)$')%0A%0A%0A +def clone_repo(project):%0A remote = 'git://git.openstack.org/%25s.git' %25 project%0A%0A # Clear out any existing target directory first, in case of a retry.%0A try:%0A shutil.rmtree(os.path.join('/opt/git', project))%0A except OSError:%0A pass%0A%0A # Try to clone the requested git repository.%0A (status, out) = run_local(%5B'git', 'clone', remote, project%5D,%0A status=True, cwd='/opt/git')%0A%0A # If it claims to have worked, make sure we can list branches.%0A if status == 0:%0A (status, moreout) = run_local(%5B'git', 'branch', '-a'%5D, status=True,%0A cwd=os.path.join('/opt/git', project))%0A out = '%5Cn'.join((out, moreout))%0A%0A # If that worked, try resetting to HEAD to make sure it's there.%0A if status == 0:%0A (status, moreout) = run_local(%5B'git', 'reset', '--hard', 'HEAD'%5D,%0A status=True,%0A cwd=os.path.join('/opt/git', project))%0A out = '%5Cn'.join((out, moreout))%0A%0A # Status of 0 imples all the above worked, 1 means something failed.%0A return (status, out)%0A%0A%0A def main @@ -2354,114 +2354,198 @@ -project = 'git://git.openstack.org/%25s' %25 m.group(1)%0A print run_local(%5B'git', 'clone', project, +(status, out) = clone_repo(m.group(1))%0A print out%0A if status != 0:%0A print 'Retrying to clone %25s' %25 m.group(1)%0A (status, out) = clone_repo( m.gr @@ -2550,18 +2550,17 @@ group(1) -%5D, +) %0A @@ -2560,32 +2560,42 @@ +print out%0A cwd= @@ -2590,30 +2590,105 @@ -cwd='/opt/git' + if status != 0:%0A raise Exception('Failed to clone %25s' %25 m.group(1) )%0A%0A%0Aif _
fc3bd4b8f6ce1f688afa86975619b11f64e3cd02
Initialize BaseStaticSiteRenderer.client to None
django_medusa/renderers/base.py
django_medusa/renderers/base.py
from __future__ import print_function from django.conf import settings from django.test.client import Client __all__ = ['COMMON_MIME_MAPS', 'BaseStaticSiteRenderer'] # Since mimetypes.get_extension() gets the "first known" (alphabetically), # we get supid behavior like "text/plain" mapping to ".bat". This list # overrides some file types we will surely use, to eliminate a call to # mimetypes.get_extension() except in unusual cases. COMMON_MIME_MAPS = { "text/plain": ".txt", "text/html": ".html", "text/javascript": ".js", "application/javascript": ".js", "text/json": ".json", "application/json": ".json", "text/css": ".css", } class RenderError(Exception): """ Exception thrown during a rendering error. """ pass class BaseStaticSiteRenderer(object): """ This default renderer writes the given URLs (defined in get_paths()) into static files on the filesystem by getting the view's response through the Django testclient. """ @classmethod def initialize_output(cls): """ Things that should be done only once to the output directory BEFORE rendering occurs (i.e. setting up a config file, creating dirs, creating an external resource, starting an atomic deploy, etc.) Management command calls this once before iterating over all renderer instances. """ pass @classmethod def finalize_output(cls): """ Things that should be done only once to the output directory AFTER rendering occurs (i.e. writing end of config file, setting up permissions, calling an external "deploy" method, finalizing an atomic deploy, etc.) Management command calls this once after iterating over all renderer instances. """ pass def get_paths(self): """ Override this in a subclass to define the URLs to process """ raise NotImplementedError @property def paths(self): """ Property that memoizes get_paths. """ p = getattr(self, "_paths", None) if not p: p = self.get_paths() self._paths = p return p def _render(self, path=None, view=None): client = self.client if not client: client = Client() response = client.get(path) if response.status_code != 200: raise RenderError( "Path {0} did not return status 200".format(path)) return response @classmethod def get_outpath(cls, path, content_type): # Get non-absolute path path = path[1:] if path.startswith('/') else path # Resolves to a file, not a directory if not path.endswith('/'): return path mime = content_type.split(';', 1)[0] return os.path.join(path, cls.get_dirsuffix(content_type)) @classmethod def get_dirsuffix(cls, content_type): return ('index' + (COMMON_MIME_MAPS.get(mime, mimetypes.guess_extension(mime)) or '.html')) def render_path(self, path=None, view=None): raise NotImplementedError def generate(self): if getattr(settings, "MEDUSA_MULTITHREAD", False): from multiprocessing import Pool, cpu_count print("Generating with up to %d processes..." % cpu_count()) pool = Pool(cpu_count()) generator = PageGenerator(self) retval = pool.map( generator, ((path, None) for path in self.paths), chunksize=1 ) pool.close() else: self.client = Client() retval = map(self.render_path, self.paths) return retval class PageGenerator(object): """ Helper class to bounce things back into the renderer instance, since multiprocessing is unable to transfer a bound method object into a pickle. """ def __init__(self, renderer): self.renderer = renderer def __call__(self, args): self.renderer.render_path(*args)
Python
0.000022
@@ -993,16 +993,67 @@ %0A %22%22%22 +%0A def __init__(self):%0A self.client = None %0A%0A @c
0b273ce13135e157267009631d460835387d9975
Add Django 3.1 + 3.2 (#327)
djangosnippets/settings/base.py
djangosnippets/settings/base.py
import os import dj_database_url from django.contrib import messages from django.urls import reverse def user_url(user): return reverse("cab_author_snippets", kwargs={"username": user.username}) PROJECT_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) SITE_ID = 1 SITE_NAME = "djangosnippets.org" ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "djangosnippets.org,www.djangosnippets.org").split(",") DEBUG = False TIME_ZONE = "America/Chicago" LANGUAGE_CODE = "en-us" USE_I18N = True USE_TZ = False DEFAULT_FROM_EMAIL = "no-reply@djangosnippets.org" SERVER_EMAIL = "no-reply@djangosnippets.org" EMAIL_SUBJECT_PREFIX = "[djangosnippets] " ABSOLUTE_URL_OVERRIDES = { "auth.user": user_url, } FORCE_WWW = False ROOT_URLCONF = "djangosnippets.urls" CACHE_KEY_PREFIX = "djangosnippets" CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_KEY_PREFIX CACHE_MIDDLEWARE_SECONDS = 60 INSTALLED_APPS = ( "django.contrib.auth", "django.contrib.admin", "django_comments", "django.contrib.contenttypes", "django.contrib.flatpages", "django.contrib.messages", "django.contrib.sessions", "django.contrib.staticfiles", "django.contrib.sites", "allauth", "allauth.account", "allauth.socialaccount", "allauth.socialaccount.providers.bitbucket", "allauth.socialaccount.providers.github", "allauth.socialaccount.providers.twitter", "cab", "comments_spamfighter", "ratings", "taggit", "captcha", "django_extensions", "rest_framework", ) MIDDLEWARE = ( "django.middleware.security.SecurityMiddleware", "whitenoise.middleware.WhiteNoiseMiddleware", # 'django.middleware.cache.UpdateCacheMiddleware', "django.middleware.common.CommonMiddleware", # 'django.middleware.cache.FetchFromCacheMiddleware', "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.contrib.flatpages.middleware.FlatpageFallbackMiddleware", "ratelimitbackend.middleware.RateLimitMiddleware", ) TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(PROJECT_ROOT, "templates")], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.contrib.auth.context_processors.auth", "django.template.context_processors.debug", "django.template.context_processors.media", "django.template.context_processors.static", "django.contrib.messages.context_processors.messages", "django.template.context_processors.request", ], }, } ] STATIC_URL = "/assets/static/" STATIC_ROOT = os.path.join(PROJECT_ROOT, "..", "assets", "static") STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, "static"),) SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage" ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7 ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = "mandatory" ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https" ACCOUNT_LOGOUT_ON_GET = True ACCOUNT_USERNAME_MIN_LENGTH = 3 ACCOUNT_ADAPTER = "djangosnippets.adapters.DjangoSnippetsAccountAdapter" SOCIALACCOUNT_ADAPTER = "djangosnippets.adapters.DjangoSnippetsSocialAccountAdapter" SOCIALACCOUNT_AUTO_SIGNUP = False LOGIN_REDIRECT_URL = "/" ACCOUNT_LOGOUT_REDIRECT_URL = "/" COMMENTS_APP = "cab" CAB_VERSIONS = ( ("3.0", "3.0"), ("2.2", "2.2"), ("2.1", "2.1"), ("2.0", "2.0"), ("1.11", "1.11"), ("1.10", "1.10"), ("1.9", "1.9"), ("1.8", "1.8"), ("1.7", "1.7"), ("1.6", "1.6"), ("1.5", "1.5"), ("1.4", "1.4"), ("1.3", "1.3"), ("1.2", "1.2"), ("1.1", "1.1"), ("1.0", "1.0"), ("0.96", ".96"), ("0.95", "Pre .96"), ("0", "Not specified"), ) # keys for localhost and 127.0.0.1 RECAPTCHA_PUBLIC_KEY = "6LcXj_oSAAAAAPQ3u23Y6MqQqd2yMYtnHqa7Zj61" RECAPTCHA_PRIVATE_KEY = "6LcXj_oSAAAAAFN31LR-F31lwFSQAcJgsg1pE5WP" RECAPTCHA_USE_SSL = True AUTHENTICATION_BACKENDS = ( "ratelimitbackend.backends.RateLimitModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ) DISQUS_WEBSITE_SHORTNAME = "djangosnippets" DISQUS_USE_SINGLE_SIGNON = True MESSAGE_TAGS = { messages.DEBUG: "secondary", messages.INFO: "info", messages.SUCCESS: "success", messages.WARNING: "warning", messages.ERROR: "alert", } DATABASES = {"default": dj_database_url.config(default="postgres:///djangosnippets")} DATABASES["default"]["ATOMIC_REQUESTS"] = True REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. "DEFAULT_PERMISSION_CLASSES": ["rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"] } DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
Python
0
@@ -3615,24 +3615,64 @@ ERSIONS = (%0A + (%223.2%22, %223.2%22),%0A (%223.1%22, %223.1%22),%0A (%223.0%22,
f963cb4edbfabb54eeff76e45ede391aaf5c482f
Fix bug in install script
install.py
install.py
#!/usr/bin/env python # Script to automatically set up the environment # link names are absolute (with '~' allowed, which will be expanded) # target names are relative with respect to this python script # target directories should have a trailing '/' links = { # tmux '~/.tmux.conf': 'tmux.conf', # screen '~/.screenrc': 'screenrc', # vim '~/.vimrc': 'vimrc', '~/.vim': 'vim/', # zsh '~/.zshrc': 'zshrc', '~/.zsh': 'zsh/', # git '~/.gitconfig': 'gitconfig', '~/.gitignore_global': 'gitignore_global', # sbt '~/.sbt': 'sbt/', # rbenv '~/.rbenv': 'rbenv/', # rubygems '~/.gemrc': 'gemrc', # rtorrent '~/.rtorrent.rc': 'rtorrent.rc', '~/.rtorrent': 'rtorrent/', # axel '~/.axelrc': 'axelrc' } # shell commands (array of (msg, cmd)) # shell commands to run before linking precmds = {} # shell commands to run after linking postcmds = { 'git update-submodules': 'Installing/updating submodules' } #################### import sys, os, subprocess # colors class colors: __tty = sys.stdout.isatty() NONE = '' MAGENTA = '\033[95m' if __tty else '' YELLOW = '\033[93m' if __tty else '' BLUE = '\033[94m' if __tty else '' GREEN = '\033[92m' if __tty else '' RED = '\033[91m' if __tty else '' RESET = '\033[0m' if __tty else '' def make_color_printer(color = colors.NONE): def color_print(msg, end = '\n'): sys.stdout.write(color + msg + colors.RESET + end) return color_print INFO = make_color_printer(colors.NONE) DETAIL = make_color_printer(colors.YELLOW) OK = make_color_printer(colors.BLUE) SUCC = make_color_printer(colors.GREEN) WARN = make_color_printer(colors.MAGENTA) FAIL = make_color_printer(colors.RED) NEWLINE = lambda: sys.stdout.write('\n') def self_path(): return os.path.dirname(os.path.realpath(__file__)) def exists(path): '''Returns true iff the path exists''' path = os.path.expanduser(path) return os.path.exists(path) def islink(path): '''Returns true iff the path is a symlink''' return os.path.islink(os.path.expanduser(path)) def linkdest(path): '''Returns the absolute path to the destination of the symlink''' path = os.path.expanduser(path) reldest = os.readlink(path) return os.path.join(os.path.dirname(path), reldest) def link(source, link_name): '''Returns true iff linking was unsuccessful''' unsuccessful = False source = os.path.join(self_path(), source) if not exists(link_name) and islink(link_name): WARN('[!] invalid link %s -> %s' % (link_name, linkdest(link_name))) unsuccessful = True elif not exists(link_name): OK('[*] creating link %s -> %s' % (link_name, source)) os.symlink(source, os.path.expanduser(link_name)) elif exists(link_name) and not islink(link_name): WARN('[!] %s already exists but is a regular file or directory' % link_name) unsuccessful = True elif not (linkdest(link_name) == source): WARN('[!] incorrect link %s -> %s' % (link_name, linkdest(link_name))) unsuccessful = True else: INFO('[ ] link exists %s -> %s' % (link_name, source)) return unsuccessful def process_links(): unsuccessful = False for dest, source in links.items(): if link(source, dest): unsuccessful = True NEWLINE() if unsuccessful: FAIL('FAILURE: some links were not successfully set up') else: SUCC('SUCCESS: all links have been set up') return unsuccessful def process_shell(cmds): if not cmds: return False unsuccessful = False for cmd, msg in cmds.items(): INFO('%s [' % msg, end = '') DETAIL('%s' % cmd, end = '') INFO(']... ', end = '') sys.stdout.flush() # force printing of above line ret = subprocess.call(cmd, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) OK('SUCCESS') if ret == 0 else FAIL('FAILURE') if ret != 0: unsuccessful = True NEWLINE() if unsuccessful: FAIL('FAILURE: some tasks were not run successfully') else: SUCC('SUCCESS: all tasks executed') def main(): pre_fail = process_shell(precmds) if precmds: NEWLINE() link_fail = process_links() if postcmds: NEWLINE() post_fail = process_shell(postcmds) NEWLINE() if any((pre_fail, link_fail, post_fail)): FAIL('FAILURE: environment has not been set up successfully') else: SUCC('SUCCESS: environment has been set up') if __name__ == '__main__': main()
Python
0
@@ -888,10 +888,10 @@ s = -%7B%7D +%5B%5D %0A%0A# @@ -937,22 +937,23 @@ tcmds = -%7B +%5B %0A +( 'git upd @@ -967,17 +967,17 @@ modules' -: +, 'Instal @@ -1001,18 +1001,19 @@ modules' -%0A%7D +)%0A%5D %0A%0A###### @@ -3690,24 +3690,16 @@ in cmds -.items() :%0A
ebfd3d465c376f0eb9eb664f93d6656b232c7867
Add 'styp' major brand to compatible brands, and make compatible_brands a set.
isobmff.py
isobmff.py
from base64 import b64encode import bitstring from bitstring import BitStream import json import struct def _to_json(o): if isinstance(o, bytes): try: return o.decode("ASCII") except: return b64encode(o) return o.__dict__ class Box(object): def __init__(self, type): if isinstance(type, str): type = type.encode("ASCII") self.type = type @property def size(self): return 8 @property def bytes(self): return struct.pack("!I4s", self.size, self.type) def __repr__(self): return json.dumps(self, default=_to_json) class StypBox(Box): def __init__(self, major_brand, minor_version=0, compatible_brands = None): super().__init__("styp") if isinstance(major_brand, str): major_brand = major_brand.encode("ASCII") self.major_brand = major_brand self.minor_version = minor_version self.compatible_brands = compatible_brands if compatible_brands else [] @property def size(self): return super().size + 8 + len(self.compatible_brands) * 4 @property def bytes(self): binary = super().bytes + struct.pack("!4sII", self.major_brand, self.minor_version, len(self.compatible_brands)) for brand in self.compatible_brands: binary += struct.pack("!4s", brand) return binary class FullBox(Box): def __init__(self, type, version, flags): super().__init__(type) self.version = version self.flags = flags @property def size(self): return Box.size.fget(self) + 4 @property def bytes(self): return Box.bytes.fget(self) + struct.pack("!BBH", self.version, self.flags >> 16, self.flags & 0xFF) return binary class SidxReference(object): class ReferenceType: MEDIA = 0 INDEX = 1 def __init__(self, reference_type): self.reference_type = reference_type self.referenced_size = 0 self.subsegment_duration = 0 self.starts_with_sap = 0 self.sap_type = 0 self.sap_delta_time = 0 @property def size(self): return 12 @property def bytes(self): return bitstring.pack("bool, uint:31, uint:32, bool, uint:3, uint:28", self.reference_type, self.referenced_size, self.subsegment_duration, self.starts_with_sap, self.sap_type, self.sap_delta_time).bytes class SidxBox(FullBox): def __init__(self, version=0): super().__init__("sidx", version, 0) self.reference_id = 0 self.timescale = 90000 self.earliest_presentation_time = 0 self.first_offset = 0 self.references = [] @property def size(self): total = super().size + 12 if self.version == 0: total += 8 else: total += 16 for reference in self.references: total += reference.size return total @property def bytes(self): binary = super().bytes + struct.pack("!II", self.reference_id, self.timescale) if self.version == 0: binary += struct.pack("!QQ", self.earliest_presentation_time, self.first_offset) else: binary += struct.pack("!II", self.earliest_presentation_time, self.first_offset) binary += struct.pack("!HH", 0, len(self.references)) for reference in self.references: binary += reference.bytes return binary
Python
0
@@ -243,16 +243,66 @@ code(o)%0A + if isinstance(o, set):%0A return list(o)%0A retu @@ -315,16 +315,16 @@ _dict__%0A - %0Aclass B @@ -776,19 +776,17 @@ e_brands - = += None):%0A @@ -987,32 +987,33 @@ = minor_version%0A +%0A self.com @@ -1033,28 +1033,82 @@ s = -compatible_brands if +set()%0A self.compatible_brands.add(major_brand)%0A for brand in com @@ -1126,15 +1126,145 @@ nds -else %5B%5D +or %5B%5D:%0A if isinstance(brand, str):%0A brand = brand.encode(%22ASCII%22)%0A self.compatible_brands.add(brand) %0A%0A
86304c4fd78a1ee86caf4f3e181b2d6e48578de1
Fix a bug on extracting phi/psi torsions.
ProteinFeatureAnalyzer/features/BackboneMicroEnvironmentFeature.py
ProteinFeatureAnalyzer/features/BackboneMicroEnvironmentFeature.py
import os import numpy as np import pandas as pd import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d from .Feature import Feature from . import Geometry class BackboneMicroEnvironmentFeature(Feature): '''The BackboneMicroEnvironmentFeature analyzes the micro environments of backbones of each residue. The micro environemt is formed by the residue it self and its nearest non-connected residue.''' def __init__(self): super().__init__() def extract(self, input_path, total_num_threads=1, my_id=0): '''Extract features from structures in the input path.''' for f in self.list_my_jobs(input_path, total_num_threads, my_id): if f.endswith('.pdb'): self.extract_from_one_file(os.path.join(input_path, f)) def extract_from_one_file(self, pdb_file): structure = self.structure_from_pdb_file(pdb_file) for model in structure: nearest_nb_list = Geometry.get_nearest_nonbonded_residues(model) for res1, res2 in nearest_nb_list: feature_dict ={} # Get the torsions try: feature_dict['phi1'] = Geometry.get_phi(res1.get_parent(), res1) feature_dict['psi1'] = Geometry.get_psi(res1.get_parent(), res1) feature_dict['phi2'] = Geometry.get_phi(res2.get_parent(), res1) feature_dict['psi2'] = Geometry.get_psi(res2.get_parent(), res1) except: continue # Get the relative position of the second residue s_matrix, origin = Geometry.get_residue_stub_matrix(res1) shift_global = res2['CA'].get_coord() - res1['CA'].get_coord() feature_dict['shift'] = np.matmul(np.array(s_matrix.T), np.array(shift_global)) # Get the relative orientation of the second residue s_matrix2, origin2 = Geometry.get_residue_stub_matrix(res2) rot_matrix = np.dot(s_matrix.T, s_matrix2) # Rotation matrix in the frame of the first residue feature_dict['theta_x'], feature_dict['theta_y'], feature_dict['theta_z'] = \ Geometry.rotation_matrix_to_euler_angles(np.array(rot_matrix)) self.feature_list.append(feature_dict) #print(self.feature_list) def save(self, data_path): '''Save the data into a csv file.''' data = [ (d['phi1'], d['psi1'], d['phi2'], d['psi2'], d['shift'][0], d['shift'][1], d['shift'][2], d['theta_x'], d['theta_y'], d['theta_z']) for d in self.feature_list ] df = pd.DataFrame(data) self.append_to_csv(df, os.path.join(data_path, 'bb_micro_env_features.csv')) def load(self, data_path): '''Load data from a csv file.''' df = pd.read_csv(os.path.join(data_path, 'bb_micro_env_features.csv'), header=None) for index, row in df.iterrows(): self.feature_list.append({'phi1':row[0], 'psi1':row[1], 'phi2':row[2], 'psi2':row[3], 'shift':np.array([row[4], row[5], row[6]]), 'theta_x':row[7], 'theta_y':row[8], 'theta_z':row[9] }) def visualize(self): pass def plot_nearst_nonbonded_list(self, nearest_nb_list): '''A debugging function to print the nearest nonbonded residue list.''' X = [pair[0]['CA'].get_coord()[0] for pair in nearest_nb_list] Y = [pair[0]['CA'].get_coord()[1] for pair in nearest_nb_list] Z = [pair[0]['CA'].get_coord()[2] for pair in nearest_nb_list] U = [pair[1]['CA'].get_coord()[0] - pair[0]['CA'].get_coord()[0] for pair in nearest_nb_list] V = [pair[1]['CA'].get_coord()[1] - pair[0]['CA'].get_coord()[1] for pair in nearest_nb_list] W = [pair[1]['CA'].get_coord()[2] - pair[0]['CA'].get_coord()[2] for pair in nearest_nb_list] fig = plt.figure() ax = fig.gca(projection='3d') ax.quiver(X, Y, Z, U, V, W) plt.show() def plot_shifts(self): '''Plot the distribution of the translational shifts from the CA atom of the first residue to the CA atom of the second residue. ''' # Data points X = [d['shift'][0] for d in self.feature_list] Y = [d['shift'][1] for d in self.feature_list] Z = [d['shift'][2] for d in self.feature_list] # Postions of N and C n_ca_c_angle = 110.86 * np.pi / 180 fig =plt.figure() ax = fig.gca(projection='3d') ax.scatter(X, Y, Z, c='green', s=5) ax.quiver([0], [0], [0], [1.32869], [0], [0], color='blue') ax.quiver([0], [0], [0], [1.52326 * np.cos(n_ca_c_angle)], [1.52326 * np.sin(n_ca_c_angle)], [0], color='red') plt.show() def plot_shift_length_histogram(self): '''Plot a histogram of the lengths of translational shifts.''' lengths = [np.linalg.norm(d['shift']) for d in self.feature_list] hist, bin_edges = np.histogram(lengths, bins=0.5 * np.arange(20)) plt.bar(bin_edges[0:-1] - 0.25, hist, width=0.5, edgecolor='black') plt.show() def scatter_plot_two_features(self, feature1_l, feature2_l, axis=None): '''Make a scatter plot of two features. feature1_l and feature2_l are lambda expressions for pick a feature. ''' f1 = [feature1_l(d) for d in self.feature_list] f2 = [feature2_l(d) for d in self.feature_list] plt.scatter(f1, f2, s=5) if axis: plt.axis(axis) plt.show() def scatter_plot_three_features(self, feature1_l, feature2_l, feature3_l, axis=None): '''Make a scatter plot of three features, given their lambda expressions.''' f1 = [feature1_l(d) for d in self.feature_list] f2 = [feature2_l(d) for d in self.feature_list] f3 = [feature3_l(d) for d in self.feature_list] fig = plt.figure() ax = fig.gca(projection='3d') ax.scatter(f1, f2, f3, s=5) if axis: ax.set_xlim(axis[0], axis[1]) ax.set_ylim(axis[2], axis[3]) ax.set_zlim(axis[4], axis[5]) plt.show()
Python
0
@@ -1350,33 +1350,33 @@ et_parent(), res -1 +2 ) %0A fea @@ -1426,33 +1426,33 @@ et_parent(), res -1 +2 ) %0A excep
b0133c948555c821a9dcae1df4119a2bfcc19304
fix building
packages/dependencies/librubberband.py
packages/dependencies/librubberband.py
{ 'repo_type' : 'git', 'url' : 'https://github.com/breakfastquay/rubberband.git', 'download_header' : [ 'https://raw.githubusercontent.com/DeadSix27/python_cross_compile_script/master/additional_headers/ladspa.h', ], 'env_exports' : { 'AR' : '{cross_prefix_bare}ar', 'CC' : '{cross_prefix_bare}gcc', 'PREFIX' : '{target_prefix}', 'RANLIB' : '{cross_prefix_bare}ranlib', 'LD' : '{cross_prefix_bare}ld', 'STRIP' : '{cross_prefix_bare}strip', 'CXX' : '{cross_prefix_bare}g++', }, 'configure_options' : '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static', 'build_options' : '{make_prefix_options}', 'needs_make_install' : False, 'run_post_build' : [ 'cp -fv lib/* "{target_prefix}/lib"', 'cp -frv rubberband "{target_prefix}/include"', 'cp -fv rubberband.pc.in "{pkg_config_path}/rubberband.pc"', 'sed -i.bak "s|%PREFIX%|{target_prefix_sed_escaped}|" "{pkg_config_path}/rubberband.pc"', 'sed -i.bak \'s/-lrubberband *$/-lrubberband -lfftw3 -lsamplerate -lstdc++/\' "{pkg_config_path}/rubberband.pc"', ], 'depends_on' : [ 'libsndfile', ], '_info' : { 'version' : '1.8.1', 'fancy_name' : 'librubberband' }, }
Python
0.000003
@@ -241,17 +241,16 @@ %7B%0A%09%09'AR' - : '%7Bcros @@ -274,17 +274,16 @@ ,%0A%09%09'CC' - : '%7Bcros @@ -312,17 +312,16 @@ 'PREFIX' - : '%7Btarg @@ -343,17 +343,16 @@ 'RANLIB' - : '%7Bcros @@ -384,13 +384,8 @@ 'LD' - : '%7B @@ -416,18 +416,16 @@ %09'STRIP' - : '%7Bcros @@ -457,12 +457,8 @@ CXX' - : '%7B @@ -481,16 +481,137 @@ e%7Dg++',%0A +%09%09# 'PKG_CONFIG': 'pkg-config --static',%0A%09%09'SNDFILE_LIBS': '-lsndfile -lopus -lFLAC -lvorbis -lvorbisenc -logg -lspeex',%0A %09%7D,%0A%09'co @@ -679,41 +679,8 @@ fix%7D - --disable-shared --enable-static ',%0A%09
a3cfc0b7fbda0d85b3c483793c258b4d0bf35c94
return back to symmetric724 for shore test
dipy/reconst/tests/test_shore_metrics.py
dipy/reconst/tests/test_shore_metrics.py
import numpy as np from scipy.special import genlaguerre from numpy.testing import (assert_almost_equal, assert_equal, run_module_suite) from dipy.data import get_gtab_taiwan_dsi, default_sphere from dipy.reconst.shore import (ShoreModel, shore_matrix, shore_indices, shore_order) from dipy.sims.voxel import (multi_tensor, all_tensor_evecs, multi_tensor_odf, multi_tensor_rtop, multi_tensor_msd, multi_tensor_pdf) def test_shore_metrics(): gtab = get_gtab_taiwan_dsi() mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angl = [(0, 0), (60, 0)] S, _ = multi_tensor(gtab, mevals, S0=100.0, angles=angl, fractions=[50, 50], snr=None) # test shore_indices n = 7 l = 6 m = -4 radial_order, c = shore_order(n, l, m) n2, l2, m2 = shore_indices(radial_order, c) assert_equal(n, n2) assert_equal(l, l2) assert_equal(m, m2) radial_order = 6 c = 41 n, l, m = shore_indices(radial_order, c) radial_order2, c2 = shore_order(n, l, m) assert_equal(radial_order, radial_order2) assert_equal(c, c2) # since we are testing without noise we can use higher order and lower # lambdas, with respect to the default. radial_order = 8 zeta = 700 lambdaN = 1e-12 lambdaL = 1e-12 asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta, lambdaN=lambdaN, lambdaL=lambdaL) asmfit = asm.fit(S) c_shore = asmfit.shore_coeff cmat = shore_matrix(radial_order, zeta, gtab) S_reconst = np.dot(cmat, c_shore) # test the signal reconstruction S = S / S[0] nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum()) assert_almost_equal(nmse_signal, 0.0, 4) # test if the analytical integral of the pdf is equal to one integral = 0 for n in range(int((radial_order)/2 + 1)): integral += c_shore[n] * (np.pi**(-1.5) * zeta ** (-1.5) * genlaguerre(n, 0.5)(0)) ** 0.5 assert_almost_equal(integral, 1.0, 10) # test if the integral of the pdf calculated on a discrete grid is equal to # one pdf_discrete = asmfit.pdf_grid(17, 40e-3) integral = pdf_discrete.sum() assert_almost_equal(integral, 1.0, 1) # compare the shore pdf with the ground truth multi_tensor pdf v = default_sphere.vertices radius = 10e-3 pdf_shore = asmfit.pdf(v * radius) pdf_mt = multi_tensor_pdf(v * radius, mevals=mevals, angles=angl, fractions=[50, 50]) nmse_pdf = np.sqrt(np.sum((pdf_mt - pdf_shore) ** 2)) / (pdf_mt.sum()) assert_almost_equal(nmse_pdf, 0.0, 2) # compare the shore rtop with the ground truth multi_tensor rtop rtop_shore_signal = asmfit.rtop_signal() rtop_shore_pdf = asmfit.rtop_pdf() assert_almost_equal(rtop_shore_signal, rtop_shore_pdf, 9) rtop_mt = multi_tensor_rtop([.5, .5], mevals=mevals) assert_equal(rtop_mt / rtop_shore_signal < 1.10 and rtop_mt / rtop_shore_signal > 0.95, True) # compare the shore msd with the ground truth multi_tensor msd msd_mt = multi_tensor_msd([.5, .5], mevals=mevals) msd_shore = asmfit.msd() assert_equal(msd_mt / msd_shore < 1.05 and msd_mt / msd_shore > 0.95, True) if __name__ == '__main__': run_module_suite()
Python
0.000028
@@ -229,22 +229,18 @@ an_dsi, -defaul +ge t_sphere @@ -2557,20 +2557,52 @@ -v = default_ +sphere = get_sphere('symmetric724')%0A v = sphe
a96cb89524f2fa17a015011d972d396e509a1079
Add code for getting and releasing a database connection
journal.py
journal.py
# -*- coding: utf-8 -*- from flask import Flask import os import psycopg2 from contextlib import closing DB_SCHEMA = """ DROP TABLE IF EXISTS entries; CREATE TABLE entries ( id serial PRIMARY KEY, title VARCHAR (127) NOT NULL, text TEXT NOT NULL, created TIMESTAMP NOT NULL ) """ app = Flask(__name__) @app.route('/') def hello(): return u'Hello world!' app.config['DATABASE'] = os.environ.get( 'DATABASE_URL', 'dbname=learning_journal user=elizabethrives' ) def connect_db(): """Return a connection to the configured database""" return psycopg2.connect(app.config['DATABASE']) def init_db(): """Initialize the database using DB_SCHEMA WARNING: executing this function will drop existing tables. """ with closing(connect_db()) as db: db.cursor().execute(DB_SCHEMA) db.commit() if __name__ == '__main__': app.run(debug=True)
Python
0
@@ -100,16 +100,36 @@ closing%0A +from flask import g%0A %0A%0ADB_SCH @@ -865,16 +865,404 @@ mmit()%0A%0A +def get_database_connection():%0A db = getattr(g, 'db', None)%0A if db is None:%0A g.db = db = connect_db()%0A return db%0A%0A@app.teardown_request%0Adef teardown_request(exception):%0A db = getattr(g, 'db', None)%0A if db is not None:%0A if exception and isinstance(exception, psycopg2.Error):%0A db.rollback()%0A else:%0A db.commit()%0A db.close()%0A%0A %0Aif __na
6fa8cfa6f02971f773724a212e3024c4a4e31e4f
add relevant unicity constraints to Checksum, SlocCount
web/models.py
web/models.py
# Copyright (C) 2013 Matthieu Caneill <matthieu.caneill@gmail.com> # Stefano Zacchiroli <zack@upsilon.cc> # # This file is part of Debsources. # # Debsources is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sqlalchemy import Column, ForeignKey, Integer, String, Index, Enum from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base # this list should be kept in sync with # http://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-VCS-fields VCS_TYPES = ("arch", "bzr", "cvs", "darcs", "git", "hg", "mtn", "svn") # this list should be kept in sync with languages supported by sloccount. A # good start is http://www.dwheeler.com/sloccount/sloccount.html (section # "Basic concepts"). Others have been added to the Debian package via patches. LANGUAGES = ( # sloccount 2.26 languages "ada", "asm", "awk", "sh", "ansic", "cpp", "cs", "csh", "cobol", "exp", "fortran", "f90", "haskell", "java", "lex", "lisp", "makefile", "ml", "modula3", "objc", "pascal", "perl", "php", "python", "ruby", "sed", "sql", "tcl", "yacc", # enhancements from Debian patches, version 2.26-5 "erlang", "jsp", "vhdl", "xml", ) Base = declarative_base() class Package(Base): """ a source package """ __tablename__ = 'packages' id = Column(Integer, primary_key=True) name = Column(String, index=True, unique=True) versions = relationship("Version", backref="package", cascade="all, delete-orphan", passive_deletes=True) def __init__(self, name): self.name = name def __repr__(self): return self.name class Version(Base): """ a version of a source package """ __tablename__ = 'versions' id = Column(Integer, primary_key=True) vnumber = Column(String) package_id = Column(Integer, ForeignKey('packages.id', ondelete="CASCADE"), nullable=False) area = Column(String(8), index=True) # main, contrib, non-free vcs_type = Column(Enum(*VCS_TYPES, name="vcs_types")) vcs_url = Column(String) vcs_browser = Column(String) def __init__(self, version, package): self.vnumber = version self.package_id = package.id def __repr__(self): return self.vnumber Index('ix_versions_package_id_vnumber', Version.package_id, Version.vnumber) class SuitesMapping(Base): """ Debian suites (squeeze, wheezy, etc) mapping with source package versions """ __tablename__ = 'suitesmapping' id = Column(Integer, primary_key=True) sourceversion_id = Column(Integer, ForeignKey('versions.id', ondelete="CASCADE"), nullable=False) suite = Column(String, index=True) class Checksum(Base): __tablename__ = 'checksums' id = Column(Integer, primary_key=True) version_id = Column(Integer, ForeignKey('versions.id', ondelete="CASCADE"), nullable=False) path = Column(String, nullable=False) # path/whitin/source/pkg sha256 = Column(String(64), nullable=False, index=True) def __init__(self, version, path, sha256): self.version_id = version.id self.path = path self.sha256 = sha256 class BinaryPackage(Base): __tablename__ = 'binarypackages' id = Column(Integer, primary_key=True) name = Column(String, index=True, unique=True) versions = relationship("BinaryVersion", backref="binarypackage", cascade="all, delete-orphan", passive_deletes=True) def __init__(self, name): self.name = name def __repr__(self): return self.name class BinaryVersion(Base): __tablename__ = 'binaryversions' id = Column(Integer, primary_key=True) vnumber = Column(String) binarypackage_id = Column(Integer, ForeignKey('binarypackages.id', ondelete="CASCADE"), nullable=False) sourceversion_id = Column(Integer, ForeignKey('versions.id', ondelete="CASCADE"), nullable=False) def __init__(self, vnumber, area="main"): self.vnumber = vnumber def __repr__(self): return self.vnumber class SlocCount(Base): __tablename__ = 'sloccounts' id = Column(Integer, primary_key=True) sourceversion_id = Column(Integer, ForeignKey('versions.id', ondelete="CASCADE"), nullable=False) language = Column(Enum(*LANGUAGES, name="language_names"), nullable=False) count = Column(Integer, nullable=False) def __init__(self, version, lang, locs): self.sourceversion_id = version.id self.language = lang self.count = locs
Python
0
@@ -856,16 +856,56 @@ eignKey, + UniqueConstraint%0Afrom sqlalchemy import Integer @@ -3520,16 +3520,79 @@ ecksums' +%0A __table_args__ = (UniqueConstraint('version_id', 'path'),) %0A%0A id @@ -5181,16 +5181,89 @@ counts'%0A + __table_args__ = (UniqueConstraint('sourceversion_id', 'language'),)%0A %0A
69705079398391cdc392b18dcd440fbc3b7404fd
Set celery to ignore results
celery_cgi.py
celery_cgi.py
import os import logging from celery import Celery from temp_config.set_environment import DeployEnv runtime_env = DeployEnv() runtime_env.load_deployment_environment() redis_server = os.environ.get('REDIS_HOSTNAME') redis_port = os.environ.get('REDIS_PORT') celery_tasks = [ 'hms_flask.modules.hms_controller', 'pram_flask.tasks' ] redis = 'redis://' + redis_server + ':' + redis_port + '/0' logging.info("Celery connecting to redis server: " + redis) celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks) celery.conf.update( CELERY_ACCEPT_CONTENT=['json'], CELERY_TASK_SERIALIZER='json', CELERY_RESULT_SERIALIZER='json', CELERY_IGNORE_RESULT=False, CELERY_TRACK_STARTED=True, )
Python
0
@@ -697,12 +697,11 @@ ULT= -Fals +Tru e,%0A
9bdc1dbc37a67d726f808e724b862e7de84fa06a
Change function name
changedate.py
changedate.py
""" Calcular Data a partir de uma quantidade de minutos """ def alterar_data(dataEnt, op, minutosEnt): """ Calcular nova data """ dataEnt, horaEnt = dataEnt.split(" ", 2) diaIni, mesIni, anoIni = dataEnt.split("/", 3) horaIni, minuIni = horaEnt.split(":", 2) # transformar tudo em minutos # converter horas em minutos totais minutosTotais = (int(horaIni) * 60) + int(minuIni) + minutosEnt print("Total de Minutos: ", minutosTotais) # 5415 / 60 minutos = 90.25 => separar inteiro de casas decimais 0.25 * 60 = 15 horas_minutos_conv = minutosTotais / 60 print(int(horas_minutos_conv)) # 90h e 15 min i, d = divmod(horas_minutos_conv, 1) resto_minutos = d * 60 print(int(resto_minutos)) # 90h / 24h = 3.75 => separar inteiro de casas decimais = 0.75 / 24 total_dias = horas_minutos_conv / 24 print(total_dias) i, d = divmod(total_dias, 1) xtotal_dias = i xtotal_minutos = d print("Total Dias", int(xtotal_dias)) # 3d 3.75 (0.75 * 24) = 18 h minutosHora = xtotal_minutos * 24 print(int(xtotal_dias), " Dias", int(minutosHora), " horas", int(resto_minutos), " minutos") # data_alterada = '01/01/2012 12:00' essa data sera calculada # print(data_alterada) if __name__ == ("__main__"): alterar_data("31/12/2016 23:35", "+", 25)
Python
0.000092
@@ -59,28 +59,27 @@ %22%0A%0A%0Adef -alterar +change _dat -a +e (dataEnt @@ -1300,20 +1300,19 @@ -alterar +change _dat -a +e (%2231 @@ -1334,12 +1334,13 @@ , %22+%22, 2 -5 +00 )%0A%0A
ec29f12fb69ff115cc29702f7230355547de2271
fix getting owners
src/python/expedient/clearinghouse/project/models.py
src/python/expedient/clearinghouse/project/models.py
''' @author jnaous ''' from django.db import models from expedient.common.permissions.models import Permittee, ObjectPermission from expedient.common.permissions.utils import permissions_save_override,\ permissions_delete_override from expedient.clearinghouse.aggregate.models import Aggregate from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User class ProjectManager(models.Manager): """Manager for L{Project} instances. Add methods to retrieve project querysets. """ def get_for_user(self, user): """Return projects for which C{user} has some permission. @param user: The user whose projects we are looking for. @type user: C{User}. """ if user.is_superuser: return self.all() permittee = Permittee.objects.get_as_permittee(user) proj_ids = ObjectPermission.objects.filter_for_class( klass=Project, permittees=permittee).values_list( "object_id", flat=True) return self.filter(id__in=list(proj_ids)) class Project(models.Model): ''' A project is a collection of users working on the same set of slices. @cvar objects: A L{ProjectManager} instance. @ivar name: The name of the project @type name: L{str} @ivar description: Short description of the project @type description: L{str} @ivar aggregates: Read-only property returning all aggregates that can be used by the project (i.e. for which the project has the "can_use_aggregate" permission). @type aggregates: C{QuerySet} of L{Aggregate}s @ivar researchers: Read-only property returning all users that have the 'researcher' role for the project. @type researchers: C{QuerySet} of C{User}s. @ivar owners: Read-only property returning all users that have the 'owner' role for the project. @type owners: C{QuerySet} of C{User}s. @ivar members: Read-only property returning all users that have some permission in the project. @type members: C{QuerySet} of C{User}s. @ivar members_as_permittees: Read-only property returning all users that have some permission in the project as Permittee instances. @type members_as_permittees: C{QuerySet} of L{Permittee}s. ''' objects = ProjectManager() name = models.CharField(max_length=200, unique=True) description = models.TextField() save = permissions_save_override( permittee_kw="user", model_func=lambda: Project, create_perm="can_create_project", edit_perm="can_edit_project", delete_perm="can_delete_project", ) delete = permissions_delete_override( permittee_kw="user", model_func=lambda: Project, delete_perm="can_delete_project", ) def _get_aggregates(self): """Get all aggregates that can be used by the project (i.e. for which the project has the "can_use_aggregate" permission). """ return ObjectPermission.objects.get_permitted_objects( klass=Aggregate, perm_names=["can_use_aggregate"], permittee=self, ) aggregates=property(_get_aggregates) def _get_researchers(self): """Get all users who have the 'researcher' role for the project""" from expedient.clearinghouse.roles.models import ProjectRole return ProjectRole.objects.get_users_with_role('researcher', self) researchers=property(_get_researchers) def _get_owners(self): """Get all users who have the 'owner' role for the project""" from expedient.clearinghouse.roles.models import ProjectRole return ProjectRole.objects.get_users_with_role('owner', self) owners=property(_get_owners) def _get_members(self): """Get all users who have some permission in the project.""" user_ids = self._get_permittees().values_list("object_id", flat=True) return User.objects.filter(pk__in=list(user_ids)) members=property(_get_members) def _get_permittees(self): """Get all permittees that have some permission in the project.""" return Permittee.objects.filter_for_class(User).filter( objectpermission__object_type= ContentType.objects.get_for_model(Project), objectpermission__object_id=self.id, ).distinct() members_as_permittees=property(_get_permittees) def __unicode__(self): s = u"Project %s" % self.name return s
Python
0
@@ -393,16 +393,90 @@ ort User +%0Afrom expedient.clearinghouse.aggregate.utils import get_aggregate_classes %0A%0Aclass @@ -3142,31 +3142,148 @@ -return ObjectPermission +# Permissions are given to the leaf classes%0A agg_ids = %5B%5D%0A agg_classes = get_aggregate_classes()%0A permittee = Permittee .obj @@ -3291,24 +3291,27 @@ cts.get_ +as_ permitte d_object @@ -3306,48 +3306,180 @@ itte -d_objects(%0A klass=Aggregate,%0A +e(self)%0A for agg_class in agg_classes:%0A agg_ids.extend(%0A ObjectPermission.objects.filter_for_class(%0A agg_class,%0A @@ -3494,16 +3494,21 @@ perm +ission_ _name -s=%5B += %22can @@ -3522,17 +3522,16 @@ gregate%22 -%5D ,%0A @@ -3528,32 +3528,40 @@ e%22,%0A + + permittee=self,%0A @@ -3557,23 +3557,143 @@ ttee -=self,%0A +s=permittee,%0A ).values_list(%22object_id%22, flat=True)%0A )%0A return Aggregate.objects.filter(pk__in=agg_ids )%0A
a10729414971ee454276960fcc1a736c08b3aef7
Fix syntax error
corehq/tests/noseplugins/uniformresult.py
corehq/tests/noseplugins/uniformresult.py
"""A plugin to format test names uniformly for easy comparison Usage: # collect django tests COLLECT_ONLY=1 ./manage.py test -v2 --settings=settings 2> tests-django.txt # collect nose tests ./manage.py test -v2 --collect-only 2> tests-nose.txt # clean up django test output: s/skipped\ \'.*\'$/ok/ # sort each output file # diff tests-django.txt tests-nose.txt """ from inspect import isfunction from types import ModuleType from nose.case import FunctionTestCase from nose.plugins import Plugin def uniform_description(test): if type(test).__name__ == "DocTestCase": return test._dt_test.name if isinstance(test, ModuleType): return test.__name__ if isinstance(test, type) or isfunction(test): return "%s:%s" % (test.__module__, test.__name__) if isinstance(test, FunctionTestCase): descriptor = test.descriptor or test.test return "%s:%s %s" % ( descriptor.__module__, descriptor.__name__, test.arg, ) name = "%s:%s.%s" % ( test.__module__, type(test).__name__, test._testMethodName ) return name #return sys.modules[test.__module__].__file__ class UniformTestResultPlugin(Plugin): """Format test descriptions for easy comparison """ name = "uniform-results" enabled = True def configure(self, options, conf): """Do not call super (always enabled)""" def describeTest(self, test): return uniform_description(test.test)
Python
0.999991
@@ -1,8 +1,9 @@ +r %22%22%22A plu
50805c2da2889c13485096f53de27af27a06391a
Implement tree preorder traversal
all-domains/data-structures/trees/tree-order-traversal/solution.py
all-domains/data-structures/trees/tree-order-traversal/solution.py
# https://www.hackerrank.com/challenges/tree-preorder-traversal # Python 2 """ Node is defined as self.left (the left child of the node) self.right (the right child of the node) self.data (the value of the node) """ def preOrder(tree): if tree is None: return print(tree.data) return preOrder(tree.left) or preOrder(tree.right) class Node: def __init__(self, left=None, right=None, data=None): self.left = left self.right = right self.data = data one = Node(data=1) four = Node(data=4) six = Node(data=6) five = Node(left=one, right=four, data=5) two = Node(left=six, data=2) three = Node(left=five, right=two, data=3) preOrder(three)
Python
0.000002
@@ -210,16 +210,17 @@ de)%0A%22%22%22%0A +%0A def preO @@ -279,16 +279,17 @@ ee.data) +, %0A ret @@ -337,16 +337,20 @@ right)%0A%0A +%22%22%22%0A class No @@ -680,8 +680,12 @@ (three)%0A +%22%22%22%0A
1097889524cf7deb4b87722d3aedd27c071117c1
Simplify exception logging in template render method.
app/soc/views/template.py
app/soc/views/template.py
#!/usr/bin/env python2.5 # # Copyright 2011 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing the boiler plate required to construct templates """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', ] import logging import traceback from django.template import loader from soc.views.helper import context as context_helper class Template(object): """Template class that facilitates the rendering of templates. """ def __init__(self, data): self.data = data def render(self): """Renders the template to a string. Uses the context method to retrieve the appropriate context, uses the self.templatePath() method to retrieve the template that should be used. """ try: context = context_helper.default(self.data) context.update(self.context()) rendered = loader.render_to_string(self.templatePath(), dictionary=context) except Exception, e: logging.error(traceback.format_exc(e)) raise e return rendered def context(self): """Returns the context for the current template. """ return {} def templatePath(self): """Returns the path to the template that should be used in render(). Subclasses should override this method. """ raise NotImplementedError()
Python
0
@@ -764,25 +764,8 @@ ging -%0Aimport traceback %0A%0Afr @@ -1444,37 +1444,19 @@ ng.e -rror(traceback.format_exc +xception (e) -) %0A
5c84f6b733da72f1bab67631b6e7a8638a42e298
Remove help_text of "How did you hear about ..." question
applications/questions.py
applications/questions.py
from collections import OrderedDict from django import forms from django.core.urlresolvers import reverse def get_organiser_menu(city): """ Get menu entries for organiser-visible pages """ menu = [ { 'title': 'Applications', 'url': reverse('applications:applications', args=[city]) }, { 'title': 'Messaging', 'url': reverse('applications:communication', args=[city]) }, ] return menu def generate_form_from_questions(questions): fields = OrderedDict() for question in questions: options = { 'label': question.title, 'help_text': question.help_text or None, 'required': question.is_required, } name = 'question_{}'.format(question.pk) if question.question_type == 'text': options['widget'] = forms.Textarea if question.question_type == 'choices': choices = ((x, x) for x in question.choices.split(';')) options['choices'] = choices if question.question_type in ['paragraph', 'text']: fields[name] = forms.CharField(**options) elif question.question_type == 'choices': if question.is_multiple_choice: options['widget'] = forms.CheckboxSelectMultiple fields[name] = forms.MultipleChoiceField(**options) else: options['widget'] = forms.RadioSelect fields[name] = forms.ChoiceField(**options) if question.question_type == 'email': fields[name] = forms.EmailField(**options) fields['newsletter_optin'] = forms.ChoiceField( widget=forms.RadioSelect, label='Do you want to receive news from the Django Girls team?', help_text='No spam, pinky swear! Only helpful programming tips and ' 'latest news from the Django Girls world. We send it once every two weeks.', required=True, choices=(('yes', 'Yes, please!'), ('no', 'No, thank you.')) ) return fields def get_applications_for_event(event, state=None, rsvp_status=None, order=None): """ Return a QuerySet of Application objects for a given event. Raises Form.DoesNotExist if Form for event does not yet exist. """ from applications.models import Application # circular import applications = ( Application.objects .filter(form__event=event) .order_by('id') .select_related('form') .prefetch_related('answer_set', 'scores', 'scores__user', 'form__event', 'scores__application') ) if rsvp_status: applications = applications.filter( state='accepted', rsvp_status__in=rsvp_status ) elif state: applications = applications.filter(state__in=state) if order: is_reversed = True if order[0] == '-' else False order = order[1:] if order[0] == '-' else order if order == 'average_score': # here is an exception for the average_score, because we also want to get # the standard deviation into account in this sorting applications = sorted( applications, key=lambda app: (getattr(app, order), -app.stdev()), reverse=is_reversed) else: applications = sorted( applications, key=lambda app: getattr(app, order), reverse=is_reversed) return applications def random_application(request, event, prev_application): """ Get a new random application for a particular event, that hasn't been scored by the request user. """ from applications.models import Application # circular import return Application.objects.filter( form__event=event ).exclude( pk=prev_application.id ).exclude( scores__user=request.user ).order_by('?').first() DEFAULT_QUESTIONS = [ { "title": "What's your name?", "question_type": "paragraph", }, { "title": "Your e-mail address:", "question_type": "email", }, { "title": "Your phone number:", "help_text": "Include your country prefix", "question_type": "paragraph", }, { "title": "Where are you from?", "help_text": "City, Country", "question_type": "paragraph", }, { "title": "How old are you?", "question_type": "paragraph", "is_required": False, }, { "title": "Which operating system do you use?", "question_type": "choices", "choices": "Mac OS X; Windows; Linux", "is_multiple_choice": True, }, { "title": "What is your current level of experience with programming?", "question_type": "choices", "choices": "I'm a total beginner, I don't know anything about it; " "I've tried some HTML or CSS before; I've tried some JavaScript " "before; I've done a few lessons of Python; I've built a website " "before; I work as a programmer", "is_multiple_choice": True, }, { "title": "If you checked anything other than beginner, could you " "tell us a bit more about your programming knowledge?", "question_type": "text", "is_required": False, }, { "title": "What is your current occupation?", "help_text": "What is your current job? Are you a student?", "question_type": "text", }, { "title": "Why do you want to attend the workshop?", "help_text": "Tell us about your motivations and aspirations.", "question_type": "text", }, { "title": "How are you planning to share what you've learnt with " "others?", "help_text": "Django Girls is a volunteer-run organisation and we " "look for people who are active and can help us help more women get " "into the field. We want you to share what you learn at the workshop " "with others in different ways: by organising a Django Girls event " "in your city, talking about Django Girls on your local meetups, " "writing a blog or simply teaching your friends.", "question_type": "text", "is_required": False }, { "title": "How did you hear about Django Girls?", "help_text": "Django Girls is a volunteer-run organisation and we " "look for people who are active and can help us help more women get " "into the field. We want you to share what you learn at the workshop " "with others in different ways: by organising a Django Girls event " "in your city, talking about Django Girls on your local meetups, " "writing a blog or simply teaching your friends.", "question_type": "choices", "choices": "Facebook; Twitter; From a friend; PyLadies", "is_required": False, "is_multiple_choice": True, }, { "title": "I acknowledge that some of my data will be used on Third Party Sites and Service.", "help_text": "Data collected through this form is used only for the " "purpose of Django Girls events. We're using Third Party Sites " "and Services to make it happen: for example, we're using " "Mandrill to send you emails. Don't worry: We don't share your data with spammers, " "and we don't sell it! More info on our Privacy policy " "<a href='/privacy-cookies/'>here</a>.", "question_type": "choices", "choices": "Yes", "is_required": True, "is_multiple_choice": True, }, { "title": "It is important that all attendees comply with the " "<a href='/coc/'>Django Girls Code of Conduct</a>", "question_type": "choices", "choices": "I've read and understood the Django Girls Code of Conduct", "is_required": True, "is_multiple_choice": True, } ]
Python
0.000001
@@ -6393,452 +6393,8 @@ ?%22,%0A - %22help_text%22: %22Django Girls is a volunteer-run organisation and we %22%0A %22look for people who are active and can help us help more women get %22%0A %22into the field. We want you to share what you learn at the workshop %22%0A %22with others in different ways: by organising a Django Girls event %22%0A %22in your city, talking about Django Girls on your local meetups, %22%0A %22writing a blog or simply teaching your friends.%22,%0A
a6ed56b37bba3f5abff73c297a8a20271d73cab2
Add configure call to random_agent
example/random-agent/random-agent.py
example/random-agent/random-agent.py
#!/usr/bin/env python import argparse import logging import sys import gym import universe # register the universe environments from universe import wrappers logger = logging.getLogger() def main(): parser = argparse.ArgumentParser(description=None) parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.') args = parser.parse_args() if args.verbosity == 0: logger.setLevel(logging.INFO) elif args.verbosity >= 1: logger.setLevel(logging.DEBUG) env = gym.make('flashgames.NeonRace-v0') # Restrict the valid random actions. (Try removing this and see # what happens when the agent is given full control of the # keyboard/mouse.) env = wrappers.SafeActionSpace(env) observation_n = env.reset() while True: # your agent here # # Try sending this instead of a random action: ('KeyEvent', 'ArrowUp', True) action_n = [env.action_space.sample() for ob in observation_n] observation_n, reward_n, done_n, info = env.step(action_n) env.render() return 0 if __name__ == '__main__': sys.exit(main())
Python
0.000001
@@ -576,16 +576,100 @@ ce-v0')%0A + env.configure(remotes=1) # automatically creates a local docker container%0A %0A # Re
cd23780fdc39003f2affe7352bc3253f958faaa5
Change assertion so it works with pytest (don't know what its problem is...)
src/zeit/content/cp/browser/tests/test_centerpage.py
src/zeit/content/cp/browser/tests/test_centerpage.py
import mock import zeit.cms.testing import zeit.content.cp import zope.testbrowser.testing class PermissionsTest(zeit.cms.testing.BrowserTestCase): layer = zeit.content.cp.testing.layer def setUp(self): super(PermissionsTest, self).setUp() zeit.content.cp.browser.testing.create_cp(self.browser) self.browser.getLink('Checkin').click() self.producing = zope.testbrowser.testing.Browser() self.producing.addHeader('Authorization', 'Basic producer:producerpw') def test_normal_user_may_not_delete(self): b = self.browser b.open( 'http://localhost/++skin++vivi/repository/online/2007/01/island') self.assertNotEllipsis('...<a...island/@@delete.html...', b.contents) def test_producing_may_delete(self): b = self.producing b.open( 'http://localhost/++skin++vivi/repository/online/2007/01/island') self.assertEllipsis('...<a...island/@@delete.html...', b.contents) def test_normal_user_may_not_retract(self): b = self.browser with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi: pi().published = True b.open( 'http://localhost/++skin++vivi/repository/online/2007/01/' 'island') self.assertNotEllipsis('...<a...island/@@retract...', b.contents) def test_producing_may_retract(self): b = self.producing with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi: pi().published = True b.open( 'http://localhost/++skin++vivi/repository/online/2007/01/' 'island') self.assertEllipsis('...<a...island/@@retract...', b.contents)
Python
0
@@ -694,34 +694,20 @@ ssertNot -Ellipsis('...%3Ca... +In(' island/@ @@ -710,35 +710,32 @@ nd/@@delete.html -... ', b.contents)%0A%0A @@ -1302,34 +1302,20 @@ ssertNot -Ellipsis('...%3Ca... +In(' island/@ @@ -1314,35 +1314,32 @@ island/@@retract -... ', b.contents)%0A%0A