commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
8b9a67d92377e44d6f53ab32661160543e4c3912
Return 0 rather than None from get_curr_stat
ichnaea/content/tasks.py
ichnaea/content/tasks.py
from datetime import timedelta, datetime from sqlalchemy import func from sqlalchemy.exc import IntegrityError from ichnaea.content.models import ( Stat, STAT_TYPE, ) from ichnaea.models import ( Cell, CellMeasure, Wifi, WifiMeasure, ) from ichnaea.tasks import ( DatabaseTask, daily_task_days, ) from ichnaea.worker import celery def histogram_query(session, model, min_day, max_day): query = session.query( func.count(model.id)).filter( model.created < max_day).filter( model.created >= min_day) return query.first()[0] def add_stat(session, name, day, value): stat_key = STAT_TYPE[name] query = session.query(Stat.value).filter( Stat.key == stat_key).filter( Stat.time == day - timedelta(days=1)) result = query.first() before = 0 if result is not None: before = int(result[0]) # on duplicate key, do a no-op change stmt = Stat.__table__.insert( on_duplicate='time=time').values( key=stat_key, time=day, value=before + int(value)) session.execute(stmt) def incr_stat(session, name, incr, date=datetime.utcnow().date()): stat_key = STAT_TYPE[name] result = get_curr_stat(session, name) cumulative = incr if result is not None: cumulative += result # on duplicate key, update existing stmt = Stat.__table__.insert( on_duplicate='value=%s' % cumulative).values( key=stat_key, time=date, value=cumulative) session.execute(stmt) def get_curr_stat(session, name, date=datetime.utcnow().date()): stat_key = STAT_TYPE[name] query = session.query(Stat.value).filter( Stat.key == stat_key).filter( Stat.time <= date).order_by( Stat.time.desc()) result = query.first() if result is not None: return int(result[0]) else: return None @celery.task(base=DatabaseTask, bind=True) def cell_histogram(self, ago=1): day, max_day = daily_task_days(ago) try: with self.db_session() as session: value = histogram_query(session, CellMeasure, day, max_day) add_stat(session, 'cell', day, value) session.commit() return 1 except IntegrityError as exc: self.heka_client.raven('error') return 0 except Exception as exc: # pragma: no cover raise self.retry(exc=exc) @celery.task(base=DatabaseTask, bind=True) def wifi_histogram(self, ago=1): day, max_day = daily_task_days(ago) try: with self.db_session() as session: value = histogram_query(session, WifiMeasure, day, max_day) add_stat(session, 'wifi', day, value) session.commit() return 1 except IntegrityError as exc: self.heka_client.raven('error') return 0 except Exception as exc: # pragma: no cover raise self.retry(exc=exc) @celery.task(base=DatabaseTask, bind=True) def unique_cell_histogram(self, ago=1): day, max_day = daily_task_days(ago) try: with self.db_session() as session: value = histogram_query(session, Cell, day, max_day) add_stat(session, 'unique_cell', day, value) session.commit() return 1 except IntegrityError as exc: self.heka_client.raven('error') return 0 except Exception as exc: # pragma: no cover raise self.retry(exc=exc) @celery.task(base=DatabaseTask, bind=True) def unique_wifi_histogram(self, ago=1): day, max_day = daily_task_days(ago) try: with self.db_session() as session: value = histogram_query(session, Wifi, day, max_day) add_stat(session, 'unique_wifi', day, value) session.commit() return 1 except IntegrityError as exc: self.heka_client.raven('error') return 0 except Exception as exc: # pragma: no cover raise self.retry(exc=exc)
Python
0.007913
@@ -1195,22 +1195,26 @@ me%5D%0A -result +cumulative = get_c @@ -1240,86 +1240,15 @@ ame) -%0A cumulative = incr%0A if result is not None:%0A cumulative += result + + incr %0A%0A @@ -1807,20 +1807,17 @@ return -None +0 %0A%0A@celer
84bd860ec54a1adfcce0584a23263080af635372
Remove debug code
nats_helper/__init__.py
nats_helper/__init__.py
import asyncio import signal import time from nats.aio.client import Client as NatsClient def require_connect_async(func): async def wrapper(self, *args, **kwargs): try: if not self.connected and self._connect_params is not None: await self.connect_async(**self._connect_params) return await func(self, *args, **kwargs) except: print('ERRA') import traceback traceback.print_exc() raise return wrapper def require_connect_threadsafe(func): def wrapper(self, *args, **kwargs): if not self.connected and self._connect_params is not None: asyncio.run_coroutine_threadsafe(self.connect_async(**self._connect_params), self._loop).result() return func(self, *args, **kwargs) return wrapper class NatsHelper(object): __slots__ = ['_nc', '_loop', '_log', '_name', '_connect_params', '_run_exclusive', '_reconnect_count', '_reconnect_timeout', '_subscriptions', '_threaded'] def __init__(self, loop, logger, name=None, connect_params=None, reconnect_count=10, reconnect_timeout=5): """ :param loop: Asyncio event loop :param logger: logger instance, logging.getLogger(...) :param name: Client name """ self._name = name self._nc = NatsClient() self._loop = loop self._log = logger self._connect_params = connect_params self._reconnect_count = reconnect_count self._reconnect_timeout = reconnect_timeout self._run_exclusive = None self._threaded = False self._subscriptions = {} @property def connected(self): return self._nc.is_connected @property def threaded(self): return self._threaded @threaded.setter def threaded(self, value): self._threaded = value async def connect_async(self, *args, **kwargs): # callbacks async def error_cb(e): self._log.error("NATS error: %s" % str(e)) async def close_cb(): self._log.warning("connection to NATS closed.") if self._nc.is_closed: return if self._reconnect_count != 0 and self._connect_params is not None: for i in range(1, self._reconnect_count): try: self._log.info("Try to reconnecting, %d of %d..." % (i, self._reconnect_count)) await self.connect_async(**self._connect_params) except Exception as e: self._log.info("Cannot auto-reconnect: %r, sleeping for %ds" % (e, self._reconnect_timeout)) await asyncio.sleep(self._reconnect_timeout) else: self._log.info("Reconnected over %d try!" % i) for sub_name, sub_params in self._subscriptions.items(): self._log.info("Resubscribe for %s" % sub_name) await self._subscribe(*sub_params[0], **sub_params[1]) return else: self.shutdown('') async def disconnected_cb(): self._log.error("NATS disconnected!") async def reconnected_cb(): self._log.info("NATS reconnected!") options = { 'name': self._name, 'servers': ['nats://{username}:{password}@{host}:{port}'.format(**kwargs)], 'io_loop': self._loop, 'closed_cb': close_cb, 'reconnected_cb': reconnected_cb, 'error_cb': error_cb, 'disconnected_cb': disconnected_cb } self._connect_params = kwargs await self._nc.connect(**options) def connect(self, *args, **kwargs): """ :param args: not used :param kwargs: username - NATS username password - NATS password host - NATS hostname (domain or IP) port - NATS port (without default, must be specified) """ self._loop.run_until_complete(self.connect_async(*args, **kwargs)) @require_connect_async async def _subscribe(self, *args, **kwargs): self._subscriptions[args[0]] = (args, kwargs) await self._nc.subscribe(*args, **kwargs, is_async=True) def subscribe(self, *args, **kwargs): orig_sub = kwargs['cb'] async def subscriber(msg): await orig_sub(msg, self) kwargs['cb'] = subscriber self._loop.run_until_complete(self._subscribe(*args, **kwargs)) def start(self, exclusive=True, use_sig=True): if use_sig: for sig in ("SIGHUP", "SIGTERM", "SIGINT"): self._loop.add_signal_handler(getattr(signal, sig), self.shutdown, sig) self._run_exclusive = exclusive if exclusive: self._loop.run_forever() # Signal handler def shutdown(self, sig): if self._run_exclusive is None: self._log.info("nats-helper isn't started!") return if not self._nc.is_closed: if self._threaded: asyncio.run_coroutine_threadsafe(self._nc.close(), self._loop) else: self._reconnect_count = 0 try: asyncio.get_event_loop().run_until_complete(self._nc.close()) except: pass else: self._log.info("nats-helper was already closed!") if self._loop.is_running(): self._log.info("nats-helper closing...") time.sleep(1) self._loop.call_soon_threadsafe(self._loop.stop) self._log.info("loop stopped, wait 1s for shutdown...") time.sleep(1) self._run_exclusive = None self._log.info("bye!") # timed_request @require_connect_threadsafe def timed_request_threadsafe(self, *args, **kwargs): return asyncio.run_coroutine_threadsafe(self._nc.timed_request(*args, **kwargs), self._loop).result() @require_connect_async async def timed_request_async(self, *args, **kwargs): return await self._nc.timed_request(*args, **kwargs) # publish @require_connect_threadsafe def publish_threadsafe(self, *args, **kwargs): return asyncio.run_coroutine_threadsafe(self._nc.publish(*args, **kwargs), self._loop).result() @require_connect_async async def publish_async(self, *args, **kwargs): return await self._nc.publish(*args, **kwargs) # publish_request @require_connect_threadsafe def publish_request_threadsafe(self, *args, **kwargs): return asyncio.run_coroutine_threadsafe(self._nc.publish_request(*args, **kwargs), self._loop).result() @require_connect_async async def publish_request_async(self, *args, **kwargs): return await self._nc.publish_request(*args, **kwargs)
Python
0.000299
@@ -165,33 +165,16 @@ wargs):%0A - try:%0A @@ -237,36 +237,32 @@ ne:%0A - - await self.conne @@ -298,28 +298,24 @@ ms)%0A - return await @@ -346,131 +346,8 @@ rgs) -%0A except:%0A print('ERRA')%0A import traceback%0A traceback.print_exc()%0A raise %0A%0A
4fa4645b7802cc358a99888391b47d8ce82bbcae
fix custom.intrahealth.tests.test_fluffs:TestFluffs.test_taux_de_satifisfaction_fluff
custom/intrahealth/tests/test_fluffs.py
custom/intrahealth/tests/test_fluffs.py
from __future__ import absolute_import from __future__ import unicode_literals import os from django.core import management from corehq.apps.receiverwrapper.auth import AuthContext from corehq.apps.receiverwrapper.util import submit_form_locally from corehq.util.test_utils import softer_assert import xml.etree.ElementTree as ElementTree import sqlalchemy from custom.intrahealth.tests.test_utils import IntraHealthTestCase, TEST_DOMAIN from testapps.test_pillowtop.utils import real_pillow_settings from io import open DATAPATH = os.path.join(os.path.dirname(__file__), 'data') class TestFluffs(IntraHealthTestCase): @classmethod @softer_assert() def setUpClass(cls): super(TestFluffs, cls).setUpClass() cls.table = cls.taux_sat_table cls.couverture = cls.couverture_table with open(os.path.join(DATAPATH, 'taux.xml'), encoding='utf-8') as f: xml = f.read() xml_obj = ElementTree.fromstring(xml) xml_obj[2][4].text = cls.mobile_worker.get_id xml = ElementTree.tostring(xml_obj) cls.taux = submit_form_locally( xml, TEST_DOMAIN, auth_context=AuthContext( user_id=cls.mobile_worker.get_id, domain=TEST_DOMAIN, authenticated=True ) ).xform with open(os.path.join(DATAPATH, 'operateur.xml'), encoding='utf-8') as f: xml = f.read() cls.couverture_form = submit_form_locally( xml, TEST_DOMAIN, auth_context=AuthContext( user_id=cls.mobile_worker.get_id, domain=TEST_DOMAIN, authenticated=True ) ).xform def test_taux_de_satifisfaction_fluff(self): with real_pillow_settings(): management.call_command( 'ptop_reindexer_fluff', 'IntraHealthFormFluffPillow', ) query = sqlalchemy.select( [ self.table.c.region_id, self.table.c.district_id, self.table.c.product_id, self.table.c.product_name, self.table.c.commandes_total, self.table.c.recus_total ], from_obj=self.table, order_by=[self.table.c.doc_id] ) with self.engine.begin() as connection: results = list(connection.execute(query).fetchall()) self.assertEqual(len(results), 2) self.assertListEqual( [ self.region.get_id, self.district.get_id, self.product2.get_id, self.product2.name, 26, 23 ], list(results[0]) ) self.assertListEqual( [ self.region.get_id, self.district.get_id, self.product.get_id, self.product.name, 25, 25 ], list(results[1]) ) def test_couverture_fluff(self): with real_pillow_settings(): management.call_command( 'ptop_reindexer_fluff', 'IntraHealthFormFluffPillow', ) query = sqlalchemy.select( [ self.couverture.c.pps_name, self.couverture.c.registered_total_for_region, self.couverture.c.registered_total_for_district ], from_obj=self.couverture, order_by=[self.couverture.c.doc_id] ) with self.engine.begin() as connection: results = list(connection.execute(query).fetchall()) self.assertEqual(len(results), 1) self.assertListEqual( [ self.pps.name, 1, 1 ], list(results[0]) )
Python
0.000001
@@ -2397,32 +2397,33 @@ ry).fetchall())%0A +%0A self.ass @@ -2429,62 +2429,38 @@ sert +Items Equal( -len(results), 2)%0A%0A self.assertListEqual(%0A +%0A %5B%0A @@ -2477,32 +2477,36 @@ + self.region.get_ @@ -2501,32 +2501,36 @@ .region.get_id,%0A + @@ -2559,32 +2559,36 @@ + self.product2.ge @@ -2583,17 +2583,16 @@ .product -2 .get_id, @@ -2600,32 +2600,36 @@ + self.product2.na @@ -2624,17 +2624,16 @@ .product -2 .name,%0A @@ -2651,24 +2651,24 @@ -26,%0A +25,%0A @@ -2667,15 +2667,8 @@ - 23%0A @@ -2667,35 +2667,39 @@ -%5D,%0A +25%0A list @@ -2694,32 +2694,18 @@ -list(results%5B0%5D) +%5D, %0A @@ -2705,27 +2705,24 @@ -)%0A%0A self.ass @@ -2713,37 +2713,17 @@ -self.assertListEqual( +%5B %0A @@ -2718,35 +2718,32 @@ %5B%0A - %5B%0A @@ -2727,39 +2727,32 @@ - self.region.get_ @@ -2747,32 +2747,36 @@ .region.get_id,%0A + @@ -2805,32 +2805,36 @@ + + self.product.get @@ -2821,32 +2821,33 @@ self.product +2 .get_id,%0A @@ -2831,32 +2831,36 @@ roduct2.get_id,%0A + @@ -2863,32 +2863,33 @@ self.product +2 .name,%0A @@ -2887,34 +2887,38 @@ -25 + 26 ,%0A @@ -2911,34 +2911,57 @@ -25 + 23%0A %5D, %0A %5D,%0A @@ -2964,32 +2964,33 @@ %5D,%0A +%5B list(results%5B1%5D) @@ -2988,13 +2988,32 @@ sult -s%5B1%5D) +) for result in results%5D %0A
cd06aa9d2625472237b0b9b96f6efa46494e82a5
Clean up leftover code
advgoogle/advgoogle.py
advgoogle/advgoogle.py
from discord.ext import commands from random import choice import aiohttp import re import urllib class AdvancedGoogle: def __init__(self, bot): self.bot = bot @commands.command(pass_context=True) @commands.cooldown(5, 60, commands.BucketType.user) async def google(self, ctx, text): """Its google, you search with it. Example: google A magical pug Special search options are available; Image, Images, Maps Example: google image You know, for kids! > Returns first image Another example: google maps New York Another example: google images cats > Returns a random image based on the query LEGACY EDITION! SEE HERE! https://twentysix26.github.io/Red-Docs/red_cog_approved_repos/#refactored-cogs Originally made by Kowlin https://github.com/Kowlin/refactored-cogs edited by Aioxas""" result = await self.get_response(ctx) await self.bot.say(result) async def images(self, ctx, regex, option, images: bool=False): print(ctx) uri = "https://www.google.com/search?hl=en&tbm=isch&tbs=isz:m&q=" num = 7 if images: num = 8 if isinstance(ctx, str): quary = str(ctx[num-1:].lower()) else: quary = str(ctx.message.content [len(ctx.prefix+ctx.command.name)+num:].lower()) print(quary) encode = urllib.parse.quote_plus(quary, encoding='utf-8', errors='replace') uir = uri+encode url = None async with aiohttp.request('GET', uir, headers=option) as resp: test = await resp.content.read() unicoded = test.decode("unicode_escape") query_find = regex[0].findall(unicoded) try: if images: url = choice(query_find) elif not images: url = query_find[0] error = False except IndexError: error = True return url, error def parsed(self, find, regex, found: bool=True): find = find[:5] for r in find: if regex[3].search(r): m = regex[3].search(r) r = r[:m.start()] + r[m.end():] r = self.unescape(r) for i in range(len(find)): if i == 0: find[i] = find[i] + "\n\n**You might also want to check these out:**" else: find[i] = "<{}>".format(find[i]) return find def unescape(self, msg): regex = ["<br \/>", "(?:\\\\[rn])", "(?:\\\\['])", "%25", "\(", "\)"] subs = ["\n", "", "'", "%", "%28", "%29"] for i in range(len(regex)): sub = re.sub(regex[i], subs[i], msg) msg = sub return msg async def get_response(self, ctx): if isinstance(ctx, str): search_type = ctx.lower().split(" ") search_valid = str(ctx.lower()) else: search_type = ctx.message.content[len(ctx.prefix + ctx.command.name) + 1:].lower().split(" ") search_valid = str(ctx.message.content [len(ctx.prefix + ctx.command.name) + 1:].lower()) option = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1' } regex = [ re.compile(",\"ou\":\"([^`]*?)\""), re.compile("<h3 class=\"r\"><a href=\"\/url\?url=([^`]*?)&amp;"), re.compile("<h3 class=\"r\"><a href=\"([^`]*?)\""), re.compile("\/url?url=") ] # Start of Image if search_type[0] == "image" or search_type[0] == "images": msg = "Your search yielded no results." if search_valid == "image" or search_valid == "images": msg = "Please actually search something" return msg else: if search_type[0] == "image": url, error = await self.images(ctx, regex, option) elif search_type[0] == "images": url, error = await self.images(ctx, regex, option, images=True) if url and not error: return url elif error: return msg # End of Image # Start of Maps elif search_type[0] == "maps": if search_valid == "maps": msg = "Please actually search something" return msg else: uri = "https://www.google.com/maps/search/" if isinstance(ctx, str): quary = str(ctx[5:].lower()) else: quary = str(ctx.message.content [len(ctx.prefix + ctx.command.name) + 6:].lower()) encode = urllib.parse.quote_plus(quary, encoding='utf-8', errors='replace') uir = uri + encode return uir # End of Maps # Start of generic search else: uri = "https://www.google.com/search?hl=en&q=" if isinstance(ctx, str): quary = str(ctx) else: quary = str(ctx.message.content [len(ctx.prefix + ctx.command.name) + 1:]) encode = urllib.parse.quote_plus(quary, encoding='utf-8', errors='replace') uir = uri + encode async with aiohttp.request('GET', uir, headers=option) as resp: test = str(await resp.content.read()) query_find = regex[1].findall(test) if not query_find: query_find = regex[2].findall(test) try: query_find = self.parsed(query_find, regex) except IndexError: return IndexError elif regex[3].search(query_find[0]): query_find = self.parsed(query_find, regex) else: query_find = self.parsed(query_find, regex, found=False) query_find = "\n".join(query_find) return query_find # End of generic search async def on_message(self, message): channel = message.channel str2find = "ok google " text = message.clean_content if not text.startswith(str2find): return text = text.replace(str2find, "", 1) await self.bot.send_typing(channel) try: result = await self.get_response(text) await self.bot.send_message(channel, result) except IndexError: await self.bot.send_message(channel, "Your search yielded no results.") def setup(bot): n = AdvancedGoogle(bot) bot.add_cog(n)
Python
0
@@ -1047,27 +1047,8 @@ e):%0A - print(ctx)%0A @@ -1385,29 +1385,8 @@ ())%0A - print(quary)%0A
ce96495cddee0503511df6015b2a0b1c3181a802
Revert "missing s"
advgoogle/advgoogle.py
advgoogle/advgoogle.py
import discord import aiohttp from discord.ext import commands from .utils import checks from __main__ import send_cmd_help from random import choice import re from cogs.utils.chat_formatting import * import urllib class AdvancedGoogle: def __init__(self, bot): self.bot = bot self.option = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'} self.break_regex = re.compile("<br \/>") self.CR_LF_removal_regex = re.compile("(?:\\\\[rn])") self.single_quote_regex = re.compile("(?:\\\\['])") @commands.command(name = "advgoogle", pass_context=True, no_pm=True) async def _advgoogle(self, ctx, text): """Its google, you search with it. Example: google A french pug Special search options are available; Image, Images, Maps Example: google image You know, for kids! > Returns first image Another example: google maps New York Another example: google images cats > Returns a random image based on the query LEGACY EDITION! SEE HERE! https://twentysix26.github.io/Red-Docs/red_cog_approved_repos/#refactored-cogs Originally made by Kowlin https://github.com/Kowlin/refactored-cogs edited by Axioxas""" search_type = ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower().split(" ") #Start of Image if search_type[0] == "image": search_valid = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower()) if search_valid == "image": await self.bot.say("Please actually search something") else: uri = "https://www.google.com/search?tbm=isch&tbs=isz:m&q=" quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+7:].lower()) encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace') uir = uri+encode async with aiohttp.get(uir, headers = self.option) as resp: test = await resp.content.read() imageregex = re.compile(",\"ou\":\"([^`]*?)\"") unicoded = test.decode("unicode_escape") query_find = imageregex.findall(unicoded) try: url = query_find[0] await self.bot.say(url) except IndexError: await self.bot.say("Your search yielded no results.") #End of Image #Start of Image random elif search_type[0] == "images": search_valid = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower()) if search_valid == "image": await self.bot.say("Please actually search something") else: uri = "https://www.google.com/search?tbm=isch&tbs=isz:m&q=" quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+7:].lower()) encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace') uir = uri+encode async with aiohttp.get(uir, headers = self.option) as resp: test = await resp.content.read() imageregex = re.compile(",\"ou\":\"([^`]*?)\"") unicoded = test.decode("unicode_escape") query_find = imageregex.findall(unicoded) try: url = choice(query_find) await self.bot.say(url) except IndexError: await self.bot.say("Your search yielded no results.") #End of Image random #Start of Maps elif search_type[0] == "maps": search_valid = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower()) if search_valid == "maps": await self.bot.say("Please actually search something") else: uri = "https://www.google.com/maps/search/" quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+6:].lower()) encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace') uir = uri+encode await self.bot.say(uir, headers = self.option) #End of Maps #Start of generic search else: uri = "https://www.google.com/search?q=" quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:]) encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace') uir = uri+encode async with aiohttp.get(uir, headers = self.option) as resp: test = await resp.content.read() searchregex = re.compile("<h3 class=\"r\"><a href=\"\/url\?url=([^`]*?)&amp;") searchregex2 = re.compile("<h3 class=\"r\"><a href=\"([^`]*?)\"") searchregex3 = re.compile("<h3 class=\"r\"><a href=\"http:\/\/www.google.com\/url\?url=([^`]*?)&amp;") query_find = searchregex.findall("{}".format(test)) if query_find == []: query_find = searchregex2.findall("{}".format(test)) try: if re.search("\/url?url=", query_find[0]) == True: query_find = query_find[0] m = re.search("\/url?url=", query_find) query_find = query_find[:m.start()] + query_find[m.end():] decode = self.unescape(query_find) await self.bot.say("Here is your link: {}".format(decode)) else: decode = self.unescape(query_find[0]) await self.bot.say("Here is your link: {}".format(decode)) except IndexError: await self.bot.say("Your search yielded no results.") elif re.search("\/url?url=", query_find[0]) == True: query_find = query_find[0] m = re.search("\/url?url=", query_find) query_find = query_find[:m.start()] + query_find[m.end():] decode = self.unescape(query_find) await self.bot.say("Here is your link: {}".format(decode)) else: query_find = query_find[0] decode = self.unescape(query_find) await self.bot.say("Here is your link: {} ".format(decode)) #End of generic search def unescape(self, query): break_sub = self.break_regex.sub("\n", query) CR_LF_sub = self.CR_LF_removal_regex.sub("", break_sub) single_quote_sub = self.single_quote_regex.sub("'", CR_LF_sub) percent_sub = re.sub("%25", "%", single_quote_sub) left_parentheses_sub = re.sub("\(", "%28", percent_sub) right_parentheses_sub = re.sub("\)", "%29", left_parentheses_sub) return right_parentheses_sub def setup(bot): n = AdvancedGoogle(bot) bot.add_cog(n)
Python
0
@@ -4319,33 +4319,32 @@ .say(uir, header -s = self.option)%0A
82660913a5f3902e3536b2bc557a019560a88a1a
Add column for Item Name
erpnext/stock/report/item_price_stock/item_price_stock.py
erpnext/stock/report/item_price_stock/item_price_stock.py
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ def execute(filters=None): columns, data = [], [] columns=get_columns() data=get_data(filters,columns) return columns, data def get_columns(): return [ { "label": _("Item Name"), "fieldname": "item_name", "fieldtype": "Link", "options": "Item", "width": 120 }, { "label": _("Brand"), "fieldname": "brand", "fieldtype": "Data", "width": 100 }, { "label": _("Warehouse"), "fieldname": "warehouse", "fieldtype": "Link", "options": "Warehouse", "width": 120 }, { "label": _("Stock Available"), "fieldname": "stock_available", "fieldtype": "Float", "width": 120 }, { "label": _("Buying Price List"), "fieldname": "buying_price_list", "fieldtype": "Link", "options": "Price List", "width": 120 }, { "label": _("Buying Rate"), "fieldname": "buying_rate", "fieldtype": "Currency", "width": 120 }, { "label": _("Selling Price List"), "fieldname": "selling_price_list", "fieldtype": "Link", "options": "Price List", "width": 120 }, { "label": _("Selling Rate"), "fieldname": "selling_rate", "fieldtype": "Currency", "width": 120 } ] def get_data(filters, columns): item_price_qty_data = [] item_price_qty_data = get_item_price_qty_data(filters) return item_price_qty_data def get_item_price_qty_data(filters): conditions = "" if filters.get("item_code"): conditions += "where a.item_code=%(item_code)s" item_results = frappe.db.sql("""select a.item_code as item_name, a.name as price_list_name, a.brand as brand, b.warehouse as warehouse, b.actual_qty as actual_qty from `tabItem Price` a left join `tabBin` b ON a.item_code = b.item_code {conditions}""" .format(conditions=conditions), filters, as_dict=1) price_list_names = list(set([frappe.db.escape(item.price_list_name) for item in item_results])) buying_price_map = get_price_map(price_list_names, buying=1) selling_price_map = get_price_map(price_list_names, selling=1) result = [] if item_results: for item_dict in item_results: data = { 'item_name': item_dict.item_name, 'brand': item_dict.brand, 'warehouse': item_dict.warehouse, 'stock_available': item_dict.actual_qty or 0, 'buying_price_list': "", 'buying_rate': 0.0, 'selling_price_list': "", 'selling_rate': 0.0 } price_list = item_dict["price_list_name"] if buying_price_map.get(price_list): data["buying_price_list"] = buying_price_map.get(price_list)["Buying Price List"] or "" data["buying_rate"] = buying_price_map.get(price_list)["Buying Rate"] or 0 if selling_price_map.get(price_list): data["selling_price_list"] = selling_price_map.get(price_list)["Selling Price List"] or "" data["selling_rate"] = selling_price_map.get(price_list)["Selling Rate"] or 0 result.append(data) return result def get_price_map(price_list_names, buying=0, selling=0): price_map = {} if not price_list_names: return price_map rate_key = "Buying Rate" if buying else "Selling Rate" price_list_key = "Buying Price List" if buying else "Selling Price List" price_list_condition = " and buying=1" if buying else " and selling=1" pricing_details = frappe.db.sql(""" select name,price_list,price_list_rate from `tabItem Price` where name in ({price_list_names}) {price_list_condition} """.format(price_list_names=', '.join(['%s']*len(price_list_names)), price_list_condition=price_list_condition), price_list_names, as_dict=1) for d in pricing_details: name = d["name"] price_map[name] = { price_list_key :d["price_list"], rate_key :d["price_list_rate"] } return price_map
Python
0.000001
@@ -374,11 +374,11 @@ tem -Nam +Cod e%22), @@ -400,19 +400,19 @@ : %22item_ -nam +cod e%22,%0A%09%09%09%22 @@ -470,32 +470,138 @@ %22: 120%0A%09%09%7D,%0A%09%09%7B%0A +%09%09%09%22label%22: _(%22Item Name%22),%0A%09%09%09%22fieldname%22: %22item_name%22,%0A%09%09%09%22fieldtype%22: %22Data%22,%0A%09%09%09%22width%22: 120%0A%09%09%7D,%0A%09%09%7B%0A %09%09%09%22label%22: _(%22B @@ -1795,20 +1795,20 @@ tem_code +, a -s +. item_nam @@ -2361,16 +2361,54 @@ ata = %7B%0A +%09%09%09%09'item_code': item_dict.item_code,%0A %09%09%09%09'ite
1bfab9dd43fc52bfdea0943703ee530e3b0f98de
remove SpecsParser
neurodocker/__init__.py
neurodocker/__init__.py
# Author: Jakub Kaczmarzyk <jakubk@mit.edu> from __future__ import absolute_import import logging import sys LOG_FORMAT = '[NEURODOCKER %(asctime)s %(levelname)s]: %(message)s' logging.basicConfig(stream=sys.stdout, datefmt='%H:%M:%S', level=logging.INFO, format=LOG_FORMAT) from neurodocker import interfaces SUPPORTED_SOFTWARE = {'afni': interfaces.AFNI, 'ants': interfaces.ANTs, 'freesurfer': interfaces.FreeSurfer, 'fsl': interfaces.FSL, 'miniconda': interfaces.Miniconda, 'mrtrix3': interfaces.MRtrix3, 'neurodebian': interfaces.NeuroDebian, 'spm': interfaces.SPM, } from neurodocker.docker import DockerContainer, DockerImage from neurodocker.dockerfile import Dockerfile from neurodocker.parser import SpecsParser def _get_version(): """Return version string.""" import os BASE_PATH = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(BASE_PATH, "VERSION"), 'r') as fp: return fp.read().strip() __version__ = _get_version()
Python
0.000001
@@ -296,484 +296,8 @@ T)%0A%0A -%0Afrom neurodocker import interfaces%0A%0ASUPPORTED_SOFTWARE = %7B'afni': interfaces.AFNI,%0A 'ants': interfaces.ANTs,%0A 'freesurfer': interfaces.FreeSurfer,%0A 'fsl': interfaces.FSL,%0A 'miniconda': interfaces.Miniconda,%0A 'mrtrix3': interfaces.MRtrix3,%0A 'neurodebian': interfaces.NeuroDebian,%0A 'spm': interfaces.SPM,%0A %7D%0A%0A from @@ -402,51 +402,8 @@ ile%0A -from neurodocker.parser import SpecsParser%0A %0A%0Ade
44d74984bd4168eddb4cc5f9c0e77aad4e498a02
fix broken plots
moca/plotter/__init__.py
moca/plotter/__init__.py
from .seqstats import perform_t_test from .seqstats import get_pearson_corr from .plotter import create_plot
Python
0.000006
@@ -1,80 +1,4 @@ -from .seqstats import perform_t_test%0Afrom .seqstats import get_pearson_corr%0A from
d2f1595fbb9e8d29e2126aa9453f4159e9b85a0d
add event to receive panel on focus
guicomm/events.py
guicomm/events.py
import wx.lib.newevent # plot data (NewPlotEvent, EVT_NEW_PLOT) = wx.lib.newevent.NewEvent() # print the messages on statusbar (StatusEvent, EVT_STATUS) = wx.lib.newevent.NewEvent() #create a panel slicer (SlicerPanelEvent, EVT_SLICER_PANEL) = wx.lib.newevent.NewEvent() #print update paramaters for panel slicer (SlicerParamUpdateEvent, EVT_SLICER_PARS_UPDATE) = wx.lib.newevent.NewEvent() #update the slicer from the panel (SlicerParameterEvent, EVT_SLICER_PARS) = wx.lib.newevent.NewEvent() #slicer event (SlicerEvent, EVT_SLICER) = wx.lib.newevent.NewEvent() # event containinG A DICTIONARY OF NAME and errors of selected data (ErrorDataEvent, ERR_DATA) = wx.lib.newevent.NewEvent() ## event that that destroy a page associate with Data1D removed from the graph (RemoveDataEvent, EVT_REMOVE_DATA) = wx.lib.newevent.NewEvent()
Python
0
@@ -834,28 +834,152 @@ = wx.lib.newevent.NewEvent() +%0D%0A##event that allow to add more that to the same plot%0D%0A(AddManyDataEvent, EVT_ADD_MANY_DATA) = wx.lib.newevent.NewEvent()
1c51ca868a3a1a2b3110b76ec7b563aa7d9d9c58
update tests
hs_core/tests/api/native/test_folder_download_zip.py
hs_core/tests/api/native/test_folder_download_zip.py
import os from django.contrib.auth.models import Group from django.test import TestCase from hs_core.hydroshare.users import create_account from hs_core.hydroshare.resource import add_resource_files, create_resource from hs_core.models import GenericResource from hs_core.tasks import create_temp_zip from django_irods.storage import IrodsStorage from hs_core.models import ResourceFile class TestFolderDownloadZip(TestCase): def setUp(self): super(TestFolderDownloadZip, self).setUp() self.group, _ = Group.objects.get_or_create(name='Hydroshare Author') self.user = create_account( 'shauntheta@gmail.com', username='shaun', first_name='Shaun', last_name='Livingston', superuser=False, groups=[] ) self.res = create_resource(resource_type='CompositeResource', owner=self.user, title='Test Resource', metadata=[]) ResourceFile.create_folder(self.res, 'foo') # create files self.n1 = "test1.txt" test_file = open(self.n1, 'w') test_file.write("Test text file in test1.txt") test_file.close() self.test_file = open(self.n1, "r") add_resource_files(self.res.short_id, self.test_file, folder='foo') # copy refts file into new file to be added to the resource as an aggregation reft_data_file = open('hs_core/tests/data/multi_sites_formatted_version1.0.refts.json', 'r') refts_file = open('multi_sites_formatted_version1.0.refts.json', 'w') refts_file.writelines(reft_data_file.readlines()) refts_file.close() self.refts_file = open('multi_sites_formatted_version1.0.refts.json', 'r') add_resource_files(self.res.short_id, self.refts_file) self.res.create_aggregation_xml_documents() def tearDown(self): super(TestFolderDownloadZip, self).tearDown() if self.res: self.res.delete() if self.test_file: os.remove(self.test_file.name) if self.refts_file: os.remove(self.refts_file.name) GenericResource.objects.all().delete() istorage = IrodsStorage() if istorage.exists("zips"): istorage.delete("zips") def test_create_temp_zip(self): input_path = "{}/data/contents/foo".format(self.res.short_id) output_path = "zips/rand/foo.zip" self.assertTrue(create_temp_zip(self.res.short_id, input_path, output_path, False)) self.assertTrue(IrodsStorage().exists(output_path)) # test aggregation input_path = "{}/data/contents/multi_sites_formatted_version1.0.refts.json"\ .format(self.res.short_id) output_path = "zips/rand/multi_sites_formatted_version1.0.refts.json.zip" self.assertTrue(create_temp_zip(self.res.short_id, input_path, output_path, True)) self.assertTrue(IrodsStorage().exists(output_path))
Python
0
@@ -3066,16 +3066,29 @@ t_path, +True, sf_zip= True))%0A
53f13515511e6851d59228e8cc9df24a74149eee
add test
corehq/apps/domain/tests/test_dbaccessors.py
corehq/apps/domain/tests/test_dbaccessors.py
import uuid from django.test import TestCase from corehq.apps.commtrack.models import CommtrackConfig from corehq.apps.domain.dbaccessors import get_doc_ids_in_domain_by_class, get_domain_ids_by_names, \ get_docs_in_domain_by_class from corehq.apps.domain.models import Domain from corehq.apps.groups.models import Group from corehq.apps.users.models import UserRole from couchforms.models import XFormInstance from corehq.apps.domain.dbaccessors import get_doc_ids_in_domain_by_type from dimagi.utils.couch.database import get_db class DBAccessorsTest(TestCase): @classmethod def setUpClass(cls): cls.domain = 'domain-domain' cls.db = get_db() def test_get_doc_ids_in_domain_by_class(self): user_role = UserRole(domain=self.domain) group = Group(domain=self.domain) xform = XFormInstance(domain=self.domain) user_role.save() group.save() xform.save() self.addCleanup(user_role.delete) self.addCleanup(group.delete) self.addCleanup(xform.delete) [doc_id] = get_doc_ids_in_domain_by_class(self.domain, UserRole) self.assertEqual(doc_id, user_role.get_id) def test_get_docs_in_domain_by_class(self): commtrack_config = CommtrackConfig(domain=self.domain) group = Group(domain=self.domain) xform = XFormInstance(domain=self.domain) commtrack_config.save() group.save() xform.save() self.addCleanup(commtrack_config.delete) self.addCleanup(group.delete) self.addCleanup(xform.delete) [commtrack_config_2] = get_docs_in_domain_by_class(self.domain, CommtrackConfig) self.assertEqual(commtrack_config_2.to_json(), commtrack_config.to_json()) def test_get_doc_ids_in_domain_by_type_initial_empty(self): self.assertEqual(0, len(get_doc_ids_in_domain_by_type('some-domain', 'some-doc-type', self.db))) def test_get_doc_ids_in_domain_by_type_match(self): id = uuid.uuid4().hex doc = { '_id': id, 'domain': 'match-domain', 'doc_type': 'match-type', } self.db.save_doc(doc) ids = get_doc_ids_in_domain_by_type('match-domain', 'match-type', self.db) self.assertEqual(1, len(ids)) self.assertEqual(id, ids[0]) self.db.delete_doc(doc) def test_get_doc_ids_in_domain_by_type_nomatch(self): id = uuid.uuid4().hex doc = { '_id': id, 'domain': 'match-domain', 'doc_type': 'nomatch-type', } self.db.save_doc(doc) ids = get_doc_ids_in_domain_by_type('match-domain', 'match-type', self.db) self.assertEqual(0, len(ids)) self.db.delete_doc(doc) def test_get_domain_ids_by_names(self): def _create_domain(name): domain = Domain(name=name) domain.save() self.addCleanup(domain.delete) return domain._id names = ['b', 'a', 'c'] expected_ids = [_create_domain(name) for name in names] ids = get_domain_ids_by_names(names) self.assertEqual(ids, expected_ids) ids = get_domain_ids_by_names(names[:-1]) self.assertEqual(ids, expected_ids[:-1])
Python
0.000002
@@ -134,32 +134,38 @@ sors import +(%0A get_doc_ ids_in_domai @@ -144,35 +144,37 @@ t (%0A get_doc_ -ids +count _in_domain_by_cl @@ -181,35 +181,44 @@ ass, +%0A get_do -main +c _ids_ -by_names, %5C +in_domain_by_class, %0A @@ -245,16 +245,48 @@ by_class +,%0A get_domain_ids_by_names,%0A) %0Afrom co @@ -692,16 +692,16 @@ domain'%0A - @@ -719,16 +719,374 @@ t_db()%0A%0A + def get_doc_count_in_domain_by_class(self):%0A group = Group(domain=self.domain)%0A group.save()%0A self.addCleanup(group.delete)%0A group2 = Group(domain=self.domain)%0A group2.save()%0A self.addCleanup(group2.delete)%0A count = get_doc_count_in_domain_by_class(self.domain, Group)%0A self.assertEqual(count, 2)%0A%0A def
a0e07c3ecf84219b79889509e29da0b800e36a97
fix angle normalization in get_draw_angles()
src/ezdxf/addons/drawing/utils.py
src/ezdxf/addons/drawing/utils.py
# Created: 06.2020 # Copyright (c) 2020, Matthew Broadway # License: MIT License import enum import math from math import tau from typing import Union, List from ezdxf.addons.drawing.type_hints import Radians from ezdxf.entities import Face3d, Solid, Trace from ezdxf.math import Vector, Z_AXIS, OCS def normalize_angle(theta: Radians) -> Radians: # have to mod tau twice to obtain [0, tau), because some angles once normalised become exactly equal to tau # e.g. (-1e-16 % tau) == tau # so (-1e-16 % tau) % tau == 0.0 return (theta % tau) % tau def get_draw_angles(start: float, end: float, extrusion: Vector): if extrusion.isclose(Z_AXIS): return start, end else: ocs = OCS(extrusion) s = ocs.to_wcs(Vector.from_angle(start)) e = ocs.to_wcs(Vector.from_angle(end)) return e.angle % math.tau, s.angle % math.tau def get_tri_or_quad_points(shape: Union[Face3d, Solid, Trace]) -> List[Vector]: d = shape.dxf vertices: List[Vector] = [d.vtx0, d.vtx1, d.vtx2] if d.vtx3 != d.vtx2: # when the face is a triangle, vtx2 == vtx3 vertices.append(d.vtx3) if not vertices[0].isclose(vertices[-1]): vertices.append(vertices[0]) return vertices
Python
0.000001
@@ -839,46 +839,58 @@ urn -e.angle %25 math.tau, s.angle %25 math.tau +normalize_angle(e.angle), normalize_angle(s.angle) %0A%0A%0Ad
272bb8da7a44e5a0ccb7953e0ccdfd7ab473e9f1
Fix test_postgresql dependency analysis.
test/units/module_utils/test_postgresql.py
test/units/module_utils/test_postgresql.py
# Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import sys from units.compat import unittest from units.compat.mock import patch, MagicMock from ansible.module_utils.six.moves import builtins from ansible.module_utils._text import to_native import pprint realimport = builtins.__import__ class TestPostgres(unittest.TestCase): def clear_modules(self, mods): for mod in mods: if mod in sys.modules: del sys.modules[mod] @patch.object(builtins, '__import__') def test_postgres_pg2_missing_ensure_libs(self, mock_import): def _mock_import(name, *args, **kwargs): if name == 'psycopg2': raise ImportError return realimport(name, *args, **kwargs) self.clear_modules(['psycopg2', 'ansible.module_utils.postgres']) mock_import.side_effect = _mock_import mod = builtins.__import__('ansible.module_utils.postgres') self.assertFalse(mod.module_utils.postgres.HAS_PSYCOPG2) with self.assertRaises(mod.module_utils.postgres.LibraryError) as context: mod.module_utils.postgres.ensure_libs(sslrootcert=None) self.assertIn('psycopg2 is not installed', to_native(context.exception)) @patch.object(builtins, '__import__') def test_postgres_pg2_found_ensure_libs(self, mock_import): def _mock_import(name, *args, **kwargs): if 'psycopg2' in name: return MagicMock() return realimport(name, *args, **kwargs) self.clear_modules(['psycopg2', 'ansible.module_utils.postgres']) mock_import.side_effect = _mock_import mod = builtins.__import__('ansible.module_utils.postgres') self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2) ensure_ret = mod.module_utils.postgres.ensure_libs(sslrootcert=None) self.assertFalse(ensure_ret) pprint.pprint(ensure_ret) @patch.object(builtins, '__import__') def test_postgres_pg2_found_ensure_libs_old_version(self, mock_import): def _mock_import(name, *args, **kwargs): if 'psycopg2' in name: m = MagicMock() m.__version__ = '2.4.1' return m return realimport(name, *args, **kwargs) self.clear_modules(['psycopg2', 'ansible.module_utils.postgres']) mock_import.side_effect = _mock_import mod = builtins.__import__('ansible.module_utils.postgres') self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2) with self.assertRaises(mod.module_utils.postgres.LibraryError) as context: mod.module_utils.postgres.ensure_libs(sslrootcert='yes') self.assertIn('psycopg2 must be at least 2.4.3 in order to use', to_native(context.exception))
Python
0
@@ -543,16 +543,252 @@ s%5Bmod%5D%0A%0A + def test_import(self):%0A # this import makes dependency analysis work so the tests will run when the module_utils change%0A from ansible.module_utils.postgres import HAS_PSYCOPG2%0A%0A assert HAS_PSYCOPG2 is not None%0A%0A @pat
15c68d6f9232b84c35ac29f6ff8e9ba25c86ab17
Improve check for single-survey redirect even more
indico/modules/events/surveys/controllers/display.py
indico/modules/events/surveys/controllers/display.py
# This file is part of Indico. # Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from flask import redirect, flash, session, request from werkzeug.exceptions import Forbidden from indico.core.db import db from indico.modules.auth.util import redirect_to_login from indico.modules.events.surveys.fields.simple import StaticTextField from indico.modules.events.surveys.models.submissions import SurveyAnswer, SurveySubmission from indico.modules.events.surveys.models.surveys import Survey, SurveyState from indico.modules.events.surveys.util import make_survey_form, was_survey_submitted, save_submitted_survey_to_session from indico.modules.events.surveys.views import WPDisplaySurvey from indico.util.i18n import _ from indico.web.flask.util import url_for from MaKaC.webinterface.rh.conferenceDisplay import RHConferenceBaseDisplay class RHSurveyBaseDisplay(RHConferenceBaseDisplay): def _checkParams(self, params): RHConferenceBaseDisplay._checkParams(self, params) self.event = self._conf class RHShowSurveyMainInformation(RHSurveyBaseDisplay): def _can_redirect_to_survey_form(self, survey): return survey.is_active and not was_survey_submitted(survey) def _process(self): surveys = Survey.find_all(event_id=int(self.event.id), is_deleted=False) if len(surveys) == 1 and self._can_redirect_to_survey_form(surveys[0]): return redirect(url_for('.display_survey_form', surveys[0])) return WPDisplaySurvey.render_template('surveys_list.html', self.event, surveys=surveys, states=SurveyState, was_survey_submitted=was_survey_submitted) class RHSubmitSurveyForm(RHSurveyBaseDisplay): CSRF_ENABLED = True normalize_url_spec = { 'locators': { lambda self: self.survey } } def _checkProtection(self): RHSurveyBaseDisplay._checkProtection(self) if self.survey.require_user and not session.user: raise Forbidden(response=redirect_to_login(reason=_('You are trying to answer a survey ' 'that requires you to be logged in'))) def _checkParams(self, params): RHSurveyBaseDisplay._checkParams(self, params) self.survey = Survey.find_one(id=request.view_args['survey_id'], is_deleted=False) if not self.survey.is_active: flash(_('This survey is not active'), 'error') return redirect(url_for('.display_survey_list', self.event)) elif was_survey_submitted(self.survey): flash(_('You have already answered this survey'), 'error') return redirect(url_for('.display_survey_list', self.event)) def _process(self): form = make_survey_form(self.survey.questions)() if form.validate_on_submit(): submission = self._save_answers(form) save_submitted_survey_to_session(submission) flash(_('Your answers has been saved'), 'success') return redirect(url_for('.display_survey_list', self.event)) single_survey = Survey.find(event_id=int(self.event.id), is_deleted=False).count() return WPDisplaySurvey.render_template('survey_submission.html', self.event, form=form, event=self.event, survey=self.survey, show_back_button=single_survey) def _save_answers(self, form): survey = self.survey submission = SurveySubmission(survey=survey) if survey.anonymous: submission.is_anonymous = True else: submission.user = session.user for question in survey.questions: if isinstance(question.field, StaticTextField): continue answer = SurveyAnswer(question=question, data=getattr(form, 'question_{}'.format(question.id)).data) submission.answers.append(answer) db.session.flush() return submission
Python
0
@@ -1367,16 +1367,58 @@ ySurvey%0A +from indico.util.date_time import now_utc%0A from ind @@ -1565,510 +1565,764 @@ y%0A%0A%0A -class RHSurveyBaseDisplay(RHConferenceBaseDisplay):%0A def _checkParams(self, params):%0A RHConferenceBaseDisplay._checkParams(self, params)%0A self.event = self._conf%0A%0A%0Aclass RHShowSurveyMainInformation(RHSurveyBaseDisplay):%0A def _can_redirect_to_survey_form(self, survey):%0A return survey.is_active and not was_survey_submitted(survey)%0A%0A def _process(self):%0A surveys = Survey.find_all(event_id=int(self.event.id), is_deleted=False)%0A if len(surveys) == 1 and self. +def _get_active_surveys(event):%0A query = Survey.find(Survey.event_id == int(event.id),%0A Survey.start_dt != None, # noqa%0A Survey.start_dt %3C= now_utc(),%0A ~Survey.is_deleted)%0A return %5Bs for s in query if s.is_active%5D%0A%0A%0Adef _can_redirect_to_single_survey(surveys):%0A return len(surveys) == 1 and surveys%5B0%5D.is_active and not was_survey_submitted(surveys%5B0%5D)%0A%0A%0Aclass RHSurveyBaseDisplay(RHConferenceBaseDisplay):%0A def _checkParams(self, params):%0A RHConferenceBaseDisplay._checkParams(self, params)%0A self.event = self._conf%0A%0A%0Aclass RHShowSurveyMainInformation(RHSurveyBaseDisplay):%0A def _process(self):%0A surveys = _get_active_surveys(self.event)%0A if _can @@ -2339,26 +2339,28 @@ to_s +ingle_s urvey -_form (surveys %5B0%5D) @@ -2355,19 +2355,16 @@ (surveys -%5B0%5D ):%0A @@ -4084,88 +4084,92 @@ s -ingle_survey = Survey.find(event_id=int(self.event.id), is_deleted=False).count( +how_back_button = not _can_redirect_to_single_survey(_get_active_surveys(self.event) )%0A @@ -4365,28 +4365,31 @@ button=s -ingle_survey +how_back_button )%0A%0A d
9bf5096ee78c9b71db37c7d6b3e1a806e65bc493
Fix render_to_response deprecation warning for django >= 1.9
annoying/decorators.py
annoying/decorators.py
from django.shortcuts import render_to_response from django import forms from django import VERSION as DJANGO_VERSION from django.template import RequestContext from django.db.models import signals as signalmodule from django.http import HttpResponse from django.conf import settings from django.core.serializers.json import DjangoJSONEncoder # Try to be compatible with Django 1.5+. try: import json except ImportError: from django.utils import simplejson as json # Basestring no longer exists in Python 3 try: basestring except: basestring = str import os __all__ = ['render_to', 'signals', 'ajax_request', 'autostrip'] try: from functools import wraps except ImportError: def wraps(wrapped, assigned=('__module__', '__name__', '__doc__'), updated=('__dict__',)): def inner(wrapper): for attr in assigned: setattr(wrapper, attr, getattr(wrapped, attr)) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) return wrapper return inner def render_to(template=None, content_type=None, mimetype=None): """ Decorator for Django views that sends returned dict to render_to_response function. Template name can be decorator parameter or TEMPLATE item in returned dictionary. RequestContext always added as context instance. If view doesn't return dict then decorator simply returns output. Parameters: - template: template name to use - content_type: content type to send in response headers - mimetype: content type to send in response headers (deprecated) Examples: # 1. Template name in decorator parameters @render_to('template.html') def foo(request): bar = Bar.object.all() return {'bar': bar} # equals to def foo(request): bar = Bar.object.all() return render_to_response('template.html', {'bar': bar}, context_instance=RequestContext(request)) # 2. Template name as TEMPLATE item value in return dictionary. if TEMPLATE is given then its value will have higher priority than render_to argument. @render_to() def foo(request, category): template_name = '%s.html' % category return {'bar': bar, 'TEMPLATE': template_name} #equals to def foo(request, category): template_name = '%s.html' % category return render_to_response(template_name, {'bar': bar}, context_instance=RequestContext(request)) """ def renderer(function): @wraps(function) def wrapper(request, *args, **kwargs): output = function(request, *args, **kwargs) if not isinstance(output, dict): return output tmpl = output.pop('TEMPLATE', template) if tmpl is None: template_dir = os.path.join(*function.__module__.split('.')[:-1]) tmpl = os.path.join(template_dir, function.func_name + ".html") # Explicit version check to avoid swallowing other exceptions if DJANGO_VERSION[0] >= 1 and DJANGO_VERSION[1] >= 5: return render_to_response(tmpl, output, context_instance=RequestContext(request), content_type=content_type or mimetype) else: return render_to_response(tmpl, output, context_instance=RequestContext(request), mimetype=content_type or mimetype) return wrapper return renderer class Signals(object): ''' Convenient wrapper for working with Django's signals (or any other implementation using same API). Example of usage:: # connect to registered signal @signals.post_save(sender=YourModel) def sighandler(instance, **kwargs): pass # connect to any signal signals.register_signal(siginstance, signame) # and then as in example above or @signals(siginstance, sender=YourModel) def sighandler(instance, **kwargs): pass In any case defined function will remain as is, without any changes. (c) 2008 Alexander Solovyov, new BSD License ''' def __init__(self): self._signals = {} # register all Django's default signals for k, v in signalmodule.__dict__.items(): # that's hardcode, but IMHO it's better than isinstance if not k.startswith('__') and k != 'Signal': self.register_signal(v, k) def __getattr__(self, name): return self._connect(self._signals[name]) def __call__(self, signal, **kwargs): def inner(func): signal.connect(func, **kwargs) return func return inner def _connect(self, signal): def wrapper(**kwargs): return self(signal, **kwargs) return wrapper def register_signal(self, signal, name): self._signals[name] = signal signals = Signals() FORMAT_TYPES = { 'application/json': lambda response: json.dumps(response, cls=DjangoJSONEncoder), 'text/json': lambda response: json.dumps(response, cls=DjangoJSONEncoder), } try: import yaml except ImportError: pass else: FORMAT_TYPES.update({ 'application/yaml': yaml.dump, 'text/yaml': yaml.dump, }) def ajax_request(func): """ If view returned serializable dict, returns response in a format requested by HTTP_ACCEPT header. Defaults to JSON if none requested or match. Currently supports JSON or YAML (if installed), but can easily be extended. example: @ajax_request def my_view(request): news = News.objects.all() news_titles = [entry.title for entry in news] return {'news_titles': news_titles} """ @wraps(func) def wrapper(request, *args, **kwargs): for accepted_type in request.META.get('HTTP_ACCEPT', '').split(','): if accepted_type in FORMAT_TYPES.keys(): format_type = accepted_type break else: format_type = 'application/json' response = func(request, *args, **kwargs) if not isinstance(response, HttpResponse): if hasattr(settings, 'FORMAT_TYPES'): format_type_handler = settings.FORMAT_TYPES[format_type] if hasattr(format_type_handler, '__call__'): data = format_type_handler(response) elif isinstance(format_type_handler, basestring): mod_name, func_name = format_type_handler.rsplit('.', 1) module = __import__(mod_name, fromlist=[func_name]) function = getattr(module, func_name) data = function(response) else: data = FORMAT_TYPES[format_type](response) response = HttpResponse(data, content_type=format_type) response['content-length'] = len(data) return response return wrapper def autostrip(cls): """ strip text fields before validation example: class PersonForm(forms.Form): name = forms.CharField(min_length=2, max_length=10) email = forms.EmailField() PersonForm = autostrip(PersonForm) #or you can use @autostrip in python >= 2.6 Author: nail.xx """ fields = [(key, value) for key, value in cls.base_fields.items() if isinstance(value, forms.CharField)] for field_name, field_object in fields: def get_clean_func(original_clean): return lambda value: original_clean(value and value.strip()) clean_func = get_clean_func(getattr(field_object, 'clean')) setattr(field_object, 'clean', clean_func) return cls
Python
0.000394
@@ -21,16 +21,24 @@ s import + render, render_ @@ -3258,20 +3258,158 @@ SION -%5B0%5D %3E= -1 and +(1, 9):%0A return render(request, tmpl, output,%0A content_type=content_type or mimetype)%0A elif DJA @@ -3423,16 +3423,18 @@ SION -%5B1%5D %3E= -5 +(1, 5) :%0A
ad42d5df34074bfb21229a962d4b2a548a796e9a
Update data_validation/jellyfish_distance.py
data_validation/jellyfish_distance.py
data_validation/jellyfish_distance.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jellyfish def extractClosestMatch(search_key, target_list, score_cutoff=0): """Return str value from target list with highest score using Jaro for String distance. search_key (str): A string used to search for cloest match. target_list (list): A list of strings for comparison. score_cutoff (float): A scorre cutoff (betwen 0 and 1) to be met. """ highest_score = score_cutoff highest_value_key = None for target_key in target_list: score = jellyfish.jaro_distance(search_key, target_key) if score >= highest_score: highest_score = score highest_value_key = target_key return highest_value_key
Python
0
@@ -602,16 +602,18 @@ ract -C +_c losest -M +_m atch
7c2f915b0ca89db2c44a73af8db3f803687f068b
reimplement load_library and backup_library
unskipper.py
unskipper.py
#! /usr/bin/env python3 # unskipper.py will prune all skipcounts from your Quod Libet library; # the resulting lack of '~#skipcount' in your per-song entries will all # be interpreted by QL as being skipcount 0. import os import sys import shutil import pickle HOME = os.getenv("HOME") QLDIR = ".quodlibet" PATH_TO_SONGS = os.path.join( HOME, QLDIR, "songs", ) PATH_TO_BKUP = os.path.join( HOME, QLDIR, "unpruned", ) def load_library(): raise NotImplementedError sfh = open(PATH_TO_SONGS, 'r') songs = pickle.load(sfh) sfh.close() return songs def backup_library(): raise NotImplementedError sfh = open(PATH_TO_SONGS, "rb") bfh = open(PATH_TO_BKUP, "wb") shutil.copyfileobj(sfh, bfh) sfh.close() bfh.close() return 0 def prune_skips(song_pickle): """Main function for pruning skips from a pickle.""" raise NotImplementedError found_skips = False skipfmt = "prune {:d} skips on ``{:s}.''" for song in song_pickle: try: skipmsg = skipfmt.format(song.pop("~#skipcount"), song["title"]) found_skips = True print(skipmsg) except KeyError: continue # write the finished pickle down try: pickle.dump(song_pickle, open(PATH_TO_SONGS, "w")) except pickle.PicklingError: print ("NANISORE?") return 1 return 0 def query_library_by_tag(lib, val, tag="artist", corr="~#playcount", rkey="title"): """ query the library "lib" by the tag "tag," searching for entries with tag value "val." returns a dictionary of the results. """ raise NotImplementedError retv = {} for song in lib: if tag in song and val in song[tag] and corr in song: try: lkey = song[rkey] except KeyError: lkey = None if lkey in retv: if not isinstance(retv[lkey], list): retv[lkey] = [retv[lkey],] retv[lkey].append(song[corr]) else: retv[lkey] = song[corr] return retv def main(): """The main entry point.""" raise NotImplementedError songs = load_library() backup_library() return prune_skips(songs) ##### EXECUTION BEGINS HEEEERREEEEE ##### if __name__ == "__main__": ret = main() sys.exit(ret)
Python
0.000001
@@ -250,22 +250,33 @@ %0Aimport -pickle +quodlibet.library %0A%0AHOME = @@ -315,29 +315,26 @@ dlibet%22%0A -PATH_TO_ SONGS +_PATH = os.pa @@ -378,28 +378,25 @@ ngs%22,%0A)%0A -PATH_TO_BKUP +BKUP_PATH = os.pa @@ -434,16 +434,16 @@ %22 -unpruned +songs.bk %22,%0A) @@ -473,112 +473,66 @@ -raise NotImplementedError%0A sfh = open(PATH_TO_SONGS, 'r')%0A songs = pickle.load(sfh)%0A sfh.close( +songs = quodlibet.library.init()%0A songs.load(SONGS_PATH )%0A @@ -547,16 +547,17 @@ songs%0A%0A +%0A def back @@ -578,105 +578,78 @@ -raise NotImplementedError%0A sfh = open(PATH_TO_SONGS, %22rb%22)%0A bfh = open(PATH_TO_BKUP, %22wb%22)%0A +with open(SONGS_PATH, %22rb%22) as spt, open(BKUP_PATH, %22wb%22) as bpt:%0A @@ -672,61 +672,17 @@ bj(s -fh, bfh)%0A sfh.close()%0A bfh.close()%0A return 0 +pt, bpt)%0A %0A%0Ade @@ -1177,21 +1177,18 @@ pen( -PATH_TO_ SONGS +_PATH , %22w @@ -1288,728 +1288,8 @@ 0%0A%0A%0A -def query_library_by_tag(lib, val, tag=%22artist%22, corr=%22~#playcount%22, rkey=%22title%22):%0A %22%22%22%0A query the library %22lib%22 by the tag %22tag,%22 searching for entries with tag%0A value %22val.%22%0A returns a dictionary of the results.%0A %22%22%22%0A raise NotImplementedError%0A retv = %7B%7D%0A for song in lib:%0A if tag in song and val in song%5Btag%5D and corr in song:%0A try:%0A lkey = song%5Brkey%5D%0A except KeyError:%0A lkey = None%0A if lkey in retv:%0A if not isinstance(retv%5Blkey%5D, list):%0A retv%5Blkey%5D = %5Bretv%5Blkey%5D,%5D%0A retv%5Blkey%5D.append(song%5Bcorr%5D)%0A else:%0A retv%5Blkey%5D = song%5Bcorr%5D%0A return retv%0A%0A def
ba8e7f03469b55e8517361ade605804cb87757e3
Update res_partner.py
l10n_ro_fiscal_validation/models/res_partner.py
l10n_ro_fiscal_validation/models/res_partner.py
# Copyright (C) 2018 Forest and Biomass Romania # Copyright (C) 2020 NextERP Romania # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). import time import requests from odoo import api, fields, models CEDILLATRANS = bytes.maketrans( u"\u015f\u0163\u015e\u0162".encode("utf8"), u"\u0219\u021b\u0218\u021a".encode("utf8"), ) headers = { "User-Agent": "Mozilla/5.0 (compatible; MSIE 7.01; Windows NT 5.0)", "Content-Type": "application/json;", } ANAF_BULK_URL = "https://webservicesp.anaf.ro/AsynchWebService/api/v5/ws/tva" ANAF_CORR = "https://webservicesp.anaf.ro/AsynchWebService/api/v5/ws/tva?id=%s" class ResPartner(models.Model): _inherit = "res.partner" @api.model def update_vat_subjected(self): anaf_dict = [] check_date = fields.Date.to_string(fields.Date.today()) # Build list of vat numbers to be checked on ANAF for partner in self: anaf_dict.append(partner.vat_number) chunk = [] chunks = [] # Process 500 vat numbers once max_no = 499 for position in range(0, len(anaf_dict), max_no): chunk = anaf_dict[position : position + max_no] chunks.append(chunk) for chunk in chunks: anaf_ask = [] for item in chunk: anaf_ask.append({"cui": int(item), "data": check_date}) res = requests.post(ANAF_BULK_URL, json=anaf_ask, headers=headers) if res.status_code == 200: res = res.json() if res["correlationId"]: time.sleep(3) resp = requests.get(ANAF_CORR % res["correlationId"]) if resp.status_code == 200: resp = resp.json() for res in resp["found"] + resp["notfound"]: partner = self.search( [ ("vat_number", "=", res["cui"]), ("is_company", "=", True), ] ) if partner: data = partner._Anaf_to_Odoo(res) partner.update(data) @api.model def update_vat_subjected_all(self): partners = self.search( [ ("vat", "!=", False), ("country_id", "=", self.env.ref("base.ro").id), ("is_company", "=", True), ] ) partners.update_vat_subjected() @api.model def _update_vat_subjected_all(self): self.update_vat_subjected_all()
Python
0
@@ -540,25 +540,25 @@ ervice/api/v -5 +6 /ws/tva%22%0AANA @@ -618,17 +618,17 @@ ce/api/v -5 +6 /ws/tva?
a889d4726189d1a7c9a9fbd074ca2c1d6eca9d98
delete unnecessary constraint
chainercv/links/model/extraction_chain.py
chainercv/links/model/extraction_chain.py
import chainer import collections class ExtractionChain(chainer.Chain): def __init__(self, layers, layer_names=None): super(ExtractionChain, self).__init__() if not isinstance(layers, collections.OrderedDict): if layer_names is not None: raise ValueError('`layer_names` needs to be `None` unless ' '`layers` is OrderedDict.') layers = collections.OrderedDict( [(str(i), function) for i, function in enumerate(layers)]) self._layers = layers if layer_names is None: layer_names = self._layers.keys()[-1] if (not isinstance(layer_names, str) and all([isinstance(name, str) for name in layer_names])): return_tuple = True else: return_tuple = False layer_names = [layer_names] self._return_tuple = return_tuple self._layer_names = list(layer_names) with self.init_scope(): for name, function in self._layers.items(): if isinstance(function, chainer.Link): setattr(self, name, function) def __call__(self, x): features = {} h = x for name, function in self._layers.items(): h = function(h) if name in self._layer_names: features[name] = h if self._return_tuple: features = tuple( [features[name] for name in self._layer_names]) else: features = list(features.values())[0] return features def copy(self): ret = super(ExtractionChain, self).copy() layers = [] for name, function in self._layers.items(): if name in self._children: function = ret[name] layers.append((name, function)) ret.layers = collections.OrderedDict(layers) return ret
Python
0.000024
@@ -231,185 +231,8 @@ t):%0A - if layer_names is not None:%0A raise ValueError('%60layer_names%60 needs to be %60None%60 unless '%0A '%60layers%60 is OrderedDict.')%0A
ce143f40f3131bbd04e40cacec50cae3e725b598
use new package module
updatecmd.py
updatecmd.py
# # Copyright (c) 2004 Specifix, Inc. # All rights reserved # import package import files import shutil import pwd import grp import files def doUpdate(cfg, root, pkgName, binaries = 1, sources = 0): if root == "/": print "using srs to update to your actual system is dumb." import sys sys.exit(0) if pkgName[0] != "/": pkgName = cfg.packagenamespace + "/" + pkgName pkgSet = package.PackageSet(cfg.reppath, pkgName) if (not len(pkgSet.versionList())): raise KeyError, "no versions exist of %s" % pkgName (version, pkg) = pkgSet.getLatest() fileList = [] packageFiles = [] if binaries: packageFiles = packageFiles + pkg.fileList() if sources: packageFiles = packageFiles + pkg.sourceList() for (fileName, version) in packageFiles: infoFile = files.FileDB(cfg.reppath, cfg.reppath + fileName) fileList.append(infoFile) for infoFile in fileList: f = infoFile.getVersion(version) f.restore(cfg.reppath, cfg.sourcepath, root)
Python
0
@@ -531,23 +531,12 @@ %0A - (version, pkg -) = p @@ -550,17 +550,41 @@ etLatest -( +Package(cfg.defaultbranch )%0A%0A f
6ebf6e6f2e8c4e2be5e4778089a8d4a66432c88b
update ProgressHook
chainercv/utils/iterator/progress_hook.py
chainercv/utils/iterator/progress_hook.py
from __future__ import division import sys import time class ProgressHook(object): """A hook class reporting the progress of iteration. This is a hook class designed for :func:`~chainercv.utils.apply_prediction_to_iterator`. Args: n_total (int): The number of images. This argument is optional. """ def __init__(self, n_total=None): self.n_total = n_total self.start = time.time() self.n_processed = 0 def __call__(self, imgs, pred_values, gt_values): self.n_processed += len(imgs) fps = self.n_processed / (time.time() - self.start) if self.n_total is not None: sys.stdout.write( '\r{:d} of {:d} images, {:.2f} FPS'.format( self.n_processed, self.n_total, fps)) else: sys.stdout.write( '\r{:d} images, {:.2f} FPS'.format( self.n_processed, fps)) sys.stdout.flush()
Python
0
@@ -486,17 +486,21 @@ f, i -mgs, pred +n_values, out _val @@ -504,17 +504,19 @@ values, -g +res t_values @@ -555,11 +555,19 @@ en(i -mgs +n_values%5B0%5D )%0A @@ -719,28 +719,29 @@ :d%7D of %7B:d%7D -imag +sampl es, %7B:.2f%7D F @@ -731,35 +731,43 @@ samples, %7B:.2f%7D -FPS +samples/sec '.format(%0A @@ -886,20 +886,21 @@ '%5Cr%7B:d%7D -imag +sampl es, %7B:.2 @@ -906,11 +906,19 @@ 2f%7D -FPS +samples/sec '.fo
75f8cad103cc3d78864358d7940465ea4caf9964
correct MissingStartTimeError Exception error message
corehq/apps/auditcare/utils/migration.py
corehq/apps/auditcare/utils/migration.py
import logging from datetime import datetime, timedelta from django.core.cache import cache from dimagi.utils.dates import force_to_datetime from corehq.apps.auditcare.models import AuditcareMigrationMeta from corehq.apps.auditcare.utils.export import get_sql_start_date CUTOFF_TIME = datetime(2013, 1, 1) CACHE_TTL = 14 * 24 * 60 * 60 # 14 days logger = logging.getLogger(__name__) class AuditCareMigrationUtil(): def __init__(self): self.start_key = "auditcare_migration_2021_next_batch_time" self.start_lock_key = "auditcare_migration_batch_lock" def get_next_batch_start(self): return cache.get(self.start_key) def generate_batches(self, worker_count, batch_by): batches = [] with cache.lock(self.start_lock_key, timeout=10): start_datetime = self.get_next_batch_start() if not start_datetime: if AuditcareMigrationMeta.objects.count() != 0: raise MissingStartTimeError() # For first run set the start_datetime to the event_time of the first record # in the SQL. If there are no records in SQL, start_time would be set as # current time start_datetime = get_sql_start_date() if not start_datetime: start_datetime = datetime.now() if start_datetime < CUTOFF_TIME: logger.info("Migration Successfull") return start_time = start_datetime end_time = None for index in range(worker_count): end_time = _get_end_time(start_time, batch_by) if end_time < CUTOFF_TIME: break batches.append([start_time, end_time]) start_time = end_time self.set_next_batch_start(end_time) return batches def set_next_batch_start(self, value): cache.set(self.start_key, value, CACHE_TTL) def get_errored_keys(self, limit): errored_keys = (AuditcareMigrationMeta.objects .filter(state=AuditcareMigrationMeta.ERRORED) .values_list('key', flat=True)[:limit]) return [get_datetimes_from_key(key) for key in errored_keys] def log_batch_start(self, key): if AuditcareMigrationMeta.objects.filter(key=key): return AuditcareMigrationMeta.objects.create( key=key, state=AuditcareMigrationMeta.STARTED, created_at=datetime.now() ) def set_batch_as_finished(self, key, count, other_doc_type_count=0): AuditcareMigrationMeta.objects.filter(key=key).update( state=AuditcareMigrationMeta.FINISHED, record_count=count, other_doc_type_count=other_doc_type_count, finished_at=datetime.now() ) def set_batch_as_errored(self, key, last_doc=None, other_doc_type_count=0): AuditcareMigrationMeta.objects.filter(key=key).update( state=AuditcareMigrationMeta.ERRORED, last_doc_processed=last_doc, other_doc_type_count=other_doc_type_count ) def get_existing_count(self, key): counts = AuditcareMigrationMeta.objects.filter(key=key).values_list( 'record_count', 'other_doc_type_count', ).first() return list(counts) if counts else [0, 0] def get_formatted_datetime_string(datetime_obj): return datetime_obj.strftime("%Y-%m-%d %H:%M:%S") def get_datetimes_from_key(key): start, end = key.split("_") return [force_to_datetime(start), force_to_datetime(end)] def _get_end_time(start_time, batch_by): delta = timedelta(hours=1) if batch_by == 'h' else timedelta(days=1) end_time = start_time - delta if batch_by == 'h': return end_time.replace(minute=0, second=0, microsecond=0) else: return end_time.replace(hour=0, minute=0, second=0, microsecond=0) class MissingStartTimeError(Exception): message = """The migration process has already been started before But we are unable to determine start key. You can manually set the start key using start_key = "2021-06-02_2021-06-01" AuditCareMigraionUtil.set_next_batch_start(start_key)""" def __init__(self, message=message): super().__init__(message)
Python
0.000009
@@ -4194,43 +4194,166 @@ -start_key = %222021-06-02_2021-06-01%22 +%0A from datetime import datetime%0A from corehq.apps.auditcare.utils.migration import AuditCareMigrationUtil%0A start_key = datetime(2021,6,1) %0A @@ -4371,23 +4371,26 @@ areMigra +t ionUtil +() .set_nex
f26afa6ed467ba475a7d2d7572d93a41f84b794a
Create automatic form for the option of the TableGraph object
dataedit/forms.py
dataedit/forms.py
from django import forms from django.db import models from django.forms import ModelForm from dataedit.models import Tag # This structure maps postgresql data types to django forms typemap = [ (["smallint"], models.SmallIntegerField), (["integer", "serial"], forms.IntegerField), (["bigint", "bigserial"], models.BigIntegerField), (["decimal", "numeric", "real", "double precision", "money"], models.DecimalField), ( ["character varying(", "varying(", "varchar(", "character(", "char(", "text"], forms.CharField, ), (["timestamp", "date", "time"], forms.DateTimeField), (["bytea"], forms.CharField), (["interval"], models.DurationField), (["boolean"], models.BooleanField), (["point", "line", "lseg", "box", "path", "polygon", "circle"], forms.CharField), (["cidr", "inet", "macaddr"], forms.CharField), (["bit(", "bit varying("], forms.CharField), (["uuid"], models.UUIDField), (["xml"], forms.CharField), ] # TODO: add missing types: Textsearch, Enumeration, \ # Composite types, Object Identifier Types, Pseudo-Types def type2field(typename: str): additionals = {} resField = None typename = typename.lower() for (keyList, field) in typemap: if any(typename.startswith(key) for key in keyList): resField = field if "[" in typename: return resField if not resField: raise Exception( "type '{0}' does not \ translate to a django field".format( typename ) ) for _ in range(typename.count("[") - 1): resField = ArrayField(resField()) if "[" in typename: resField = lambda **x: ArrayField(resField(), **x) return resField class InputForm(forms.Form): def __init__(self, *args, **kwargs): fields = kwargs.pop("fields", []) values = kwargs.pop("values", []) super(forms.Form, self).__init__(*args, **kwargs) for (name, typename) in fields: self.fields[name] = type2field(typename)(label=name) if name in values: self.fields[name].initial = values[name] class UploadFileForm(forms.Form): title = forms.CharField(max_length=50) file = forms.FileField() class UploadMapForm(forms.Form): """calculates the largest common subsequence of two strings""" def _lcs(self, s1, s2): m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))] longest, x_longest = 0, 0 for x in range(1, 1 + len(s1)): for y in range(1, 1 + len(s2)): if s1[x - 1] == s2[y - 1]: m[x][y] = m[x - 1][y - 1] + 1 if m[x][y] > longest: longest = m[x][y] x_longest = x else: m[x][y] = 0 return longest def __init__(self, *args, **kwargs): fields = kwargs.pop("fields") headers = kwargs.pop("headers") super(forms.Form, self).__init__(*args, **kwargs) for (name, typename) in fields: self.fields[name] = forms.ChoiceField( label=name, choices=((x, x) for x in ["---"] + headers) ) if any(self._lcs(name, x) / min(len(name), len(x)) > 0.7 for x in headers): self.fields[name].initial = max( headers, key=lambda x: self._lcs(name, x) ) else: self.fields[name].initial = "---" class TagForm(ModelForm): class Meta: model = Tag fields = ["label"]
Python
0
@@ -114,16 +114,28 @@ port Tag +, TableGraph %0A%0A# This @@ -3510,16 +3510,489 @@ %22---%22%0A%0A%0A +class TableGraphForm(ModelForm):%0A%0A class Meta:%0A model = TableGraph%0A fields = '__all__'%0A exclude = ('table', 'schema')%0A%0A def __init__(self, *args, **kwargs):%0A columns = kwargs.pop('columns', None)%0A super(TableGraphForm, self).__init__(*args, **kwargs)%0A%0A if columns is not None:%0A self.fields%5B'column_x'%5D = forms.ChoiceField(choices=columns)%0A self.fields%5B'column_y'%5D = forms.ChoiceField(choices=columns)%0A%0A%0A class Ta
5f38b5423846255e2265166f926a2e072e833d89
Fix typo in message
datajoint/jobs.py
datajoint/jobs.py
import hashlib import os import pymysql from .base_relation import BaseRelation ERROR_MESSAGE_LENGTH = 2047 TRUNCATION_APPENDIX = '...truncated' def key_hash(key): """ 32-byte hash used for lookup of primary keys of jobs """ hashed = hashlib.md5() for k, v in sorted(key.items()): hashed.update(str(v).encode()) return hashed.hexdigest() class JobTable(BaseRelation): """ A base relation with no definition. Allows reserving jobs """ def __init__(self, arg, database=None): super().__init__() if isinstance(arg, JobTable): # copy constructor self.database = arg.database self._connection = arg._connection self._definition = arg._definition self._user = arg._user return self.database = database self._connection = arg self._definition = """ # job reservation table for `{database}` table_name :varchar(255) # className of the table key_hash :char(32) # key hash --- status :enum('reserved','error','ignore') # if tuple is missing, the job is available key=null :blob # structure containing the key error_message="" :varchar({error_messsage_length}) # error message returned if failed error_stack=null :blob # error stack if failed user="" :varchar(255) # database user host="" :varchar(255) # system hostname pid=0 :int unsigned # system process id timestamp=CURRENT_TIMESTAMP :timestamp # automatic timestamp """.format(database=database, error_messsage_length=ERROR_MESSAGE_LENGTH) if not self.is_declared: self.declare() self._user = self.connection.get_user() @property def definition(self): return self._definition @property def table_name(self): return '~jobs' def delete(self): """bypass interactive prompts and dependencies""" self.delete_quick() def drop(self): """bypass interactive prompts and dependencies""" self.drop_quick() def reserve(self, table_name, key): """ Reserve a job for computation. When a job is reserved, the job table contains an entry for the job key, identified by its hash. When jobs are completed, the entry is removed. :param table_name: `database`.`table_name` :param key: the dict of the job's primary key :return: True if reserved job successfully. False = the jobs is already taken """ job = dict( table_name=table_name, key_hash=key_hash(key), status='reserved', host=os.uname().nodename, pid=os.getpid(), user=self._user) try: self.insert1(job) except pymysql.err.IntegrityError: return False return True def complete(self, table_name, key): """ Log a completed job. When a job is completed, its reservation entry is deleted. :param table_name: `database`.`table_name` :param key: the dict of the job's primary key """ job_key = dict(table_name=table_name, key_hash=key_hash(key)) (self & job_key).delete_quick() def error(self, table_name, key, error_message): """ Log an error message. The job reservation is replaced with an error entry. if an error occurs, leave an entry describing the problem :param table_name: `database`.`table_name` :param key: the dict of the job's primary key :param error_message: string error message """ if len(error_message) > ERROR_MESSAGE_LENGTH: error_message = error_message[:ERROR_MESSAGE_LENGTH-len(TRUNCATION_APPENDIX)] + TRUNCATION_APPENDIX job_key = dict(table_name=table_name, key_hash=key_hash(key)) self.insert1( dict(job_key, status="error", host=os.uname().nodename, pid=os.getpid(), user=self._user, error_message=error_message), replace=True, ignore_extra_fields=True) class JobManager: """ A container for all job tables (one job table per schema). """ def __init__(self, connection): self.connection = connection self._jobs = {} def __getitem__(self, database): if database not in self._jobs: self._jobs[database] = JobTable(self.connection, database) return self._jobs[database]
Python
0.998587
@@ -1247,33 +1247,32 @@ char(%7Berror_mess -s age_length%7D) # @@ -1627,17 +1627,16 @@ ror_mess -s age_leng
fd6463dfa31802c1b35f5c91db591d9b0ce47c7e
fix config with empty dict
dataset/config.py
dataset/config.py
""" Config class""" class Config: """ Class for configs that can be represented as nested dicts with easy indexing by slashes. """ def __init__(self, config=None, **kwargs): if config is None: self.config = dict() elif isinstance(config, dict): self.config = self.parse(config) else: self.config = config.config self.update(**self.parse(kwargs)) def pop(self, variables, config=None, **kwargs): """ Return variables and remove them from config. Parameters ---------- variables : str or list of strs names of variables. '/' is used to get value from nested dict. config : dict, Config or None if None variables will be getted from self.config. Returns ------- single value or a tuple """ if isinstance(config, Config): return config.pop(variables, None, **kwargs) else: return self._get(variables, config, pop=True, **kwargs) def get(self, variables, config=None, default=None): """ Return variables from config. Parameters ---------- variables : str or list of str names of variables. '/' is used to get value from nested dict. config : dict, Config or None if None variables will be getted from self.config. Returns ------- single value or a tuple """ if isinstance(config, Config): val = config.get(variables, default=default) else: val = self._get(variables, config, default=default, pop=False) return val def _get(self, variables, config=None, **kwargs): if config is None: config = self.config pop = kwargs.get('pop', False) has_default = 'default' in kwargs default = kwargs.get('default') unpack = False if not isinstance(variables, (list, tuple)): variables = list([variables]) unpack = True ret_vars = [] for variable in variables: _config = config if '/' in variable: var = variable.split('/') prefix = var[:-1] var_name = var[-1] else: prefix = [] var_name = variable for p in prefix: if p in _config: _config = _config[p] else: _config = None break if _config: if pop: if has_default: val = _config.pop(var_name, default) else: val = _config.pop(var_name) else: if has_default: val = _config.get(var_name, default) else: val = _config[var_name] else: if has_default: val = default else: raise KeyError("Key '%s' not found" % variable) ret_vars.append(val) if unpack: ret_vars = ret_vars[0] else: ret_vars = tuple(ret_vars) return ret_vars def put(self, variable, value, config=None): """ Put a new variable into config. Parameters ---------- variable : str variable to add. '/' is used to put value into nested dict. value : masc config : dict, Config or None if None value will be putted into self.config. """ if config is None: config = self.config elif isinstance(config, Config): config = config.config variable = variable.strip('/') if '/' in variable: var = variable.split('/') prefix = var[:-1] var_name = var[-1] else: prefix = [] var_name = variable for p in prefix: if p not in config: config[p] = dict() config = config[p] if var_name in config and isinstance(config[var_name], dict) and isinstance(value, dict): config[var_name].update(value) else: config[var_name] = value def parse(self, config): """ Parse flatten config with slashes. Parameters ---------- config : dict or Config Returns ------- new_config : dict """ if isinstance(config, Config): return config.config new_config = dict() for key, value in config.items(): if isinstance(value, dict): value = self.parse(value) self.put(key, value, new_config) return new_config def flatten(self, config=None): """ Transform nested dict into flatten dict. Parameters ---------- config : dict, Config or None if None self.config will be parsed else config. Returns ------- new_config : dict """ if config is None: config = self.config elif isinstance(config, Config): config = config.config new_config = dict() for key, value in config.items(): if isinstance(value, dict): value = self.flatten(value) for _key, _value in value.items(): new_config[key+'/'+_key] = _value else: new_config[key] = value return new_config def __add__(self, other): if isinstance(other, dict): other = Config(other) return Config({**self.flatten(), **other.flatten()}) def __radd__(self, other): if isinstance(other, dict): other = Config(other) return other.__add__(self) def __getitem__(self, key): value = self._get(key) return value def __setitem__(self, key, value): self.put(key, value) def __delitem__(self, key): self.pop(key) def __len__(self): return len(self.config) def items(self, flatten=False): """ Return config items. """ if flatten: return self.flatten().items() else: return self.config.items() def keys(self, flatten=False): """ Return config keys. """ if flatten: return self.flatten().keys() else: return self.config.keys() def values(self, flatten=False): """ Return config values. """ if flatten: return self.flatten().values() else: return self.config.values() def update(self, other=None, **kwargs): """ Update config with values from other. """ other = dict() if other is None else other if hasattr(other, 'keys'): for key in other: self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwargs.items(): self[key] = value def __iter__(self): return iter(self.config) def __repr__(self): return "Config(" + str(self.config) + ")"
Python
0.000005
@@ -5400,32 +5400,51 @@ nce(value, dict) + and len(value) %3E 0 :%0A
39c50ed20cbd282c1e351057cc67d7234ca0a37d
incorrect histogram
assignment-1/utilities.py
assignment-1/utilities.py
from random import expovariate, randint from queue import Queue import matplotlib.pyplot as plt import numpy as np class Packet: def __init__(self, arrival_time, service_time): self.arrival_time = arrival_time self.service_time = service_time class Generator: def __init__(self, arrival_lambda=0.3, service_lambda=1.4): self.arrival_lambda = arrival_lambda self.service_lambda = service_lambda self.next_arrival_time = 0.0 + expovariate(arrival_lambda) def next(self): pack = Packet(self.next_arrival_time, expovariate(self.service_lambda)) self.next_arrival_time += expovariate(self.arrival_lambda) return pack class Line: def __init__(self, identity, generator=Generator()): self.id = identity self.queue = Queue() self.generator = Generator() self.waiting_time_samples = [] def log_waiting_time(self, waiting_time): self.waiting_time_samples.append(waiting_time) class MultipleQueue: def __init__(self, queue_count): if type(queue_count) == list: lambdas = queue_count queue_count = len(lambdas) self.queue_count = queue_count self.next_queue_index = 0 self.lines = [Line(i) for i in range(queue_count)] self.line_with_minimal_arrival_time = None def next_arrival_line(self): return min(self.lines, key=lambda x: x.generator.next_arrival_time) class Histogram: def __init__(self): pass @staticmethod def plot(data): counts, bin_edges = np.histogram(data, bins=100, density=True) cdf = [sum(counts[i:]) for i in range(len(counts))] bins_avg = (bin_edges[:-1] + bin_edges[1:]) / 2 plt.plot(bins_avg, cdf) @staticmethod def show(): plt.title('Waiting Time Cumulative Distribution') plt.xlabel('Time Epoch') plt.ylabel('Probability') plt.semilogy() plt.show()
Python
0.999014
@@ -1652,16 +1652,48 @@ nts%5Bi:%5D) + * (bin_edges%5B1%5D - bin_edges%5B0%5D) for i i
6612e22cb4f001c3b3e5230c49988e232e37ffad
use Exception as a base class for classes inheriting from Exception
astroid/brain/brain_gi.py
astroid/brain/brain_gi.py
# Copyright (c) 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr> # Copyright (c) 2014 Google, Inc. # Copyright (c) 2014 Cole Robinson <crobinso@redhat.com> # Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com> # Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com> # Copyright (c) 2015 David Shea <dshea@redhat.com> # Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net> # Copyright (c) 2016 Giuseppe Scrivano <gscrivan@redhat.com> # Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html # For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER """Astroid hooks for the Python 2 GObject introspection bindings. Helps with understanding everything imported from 'gi.repository' """ import inspect import itertools import sys import re import warnings from astroid import MANAGER, AstroidBuildingError, nodes from astroid.builder import AstroidBuilder _inspected_modules = {} _identifier_re = r'^[A-Za-z_]\w*$' def _gi_build_stub(parent): """ Inspect the passed module recursively and build stubs for functions, classes, etc. """ classes = {} functions = {} constants = {} methods = {} for name in dir(parent): if name.startswith("__"): continue # Check if this is a valid name in python if not re.match(_identifier_re, name): continue try: obj = getattr(parent, name) except: continue if inspect.isclass(obj): classes[name] = obj elif (inspect.isfunction(obj) or inspect.isbuiltin(obj)): functions[name] = obj elif (inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)): methods[name] = obj elif (str(obj).startswith("<flags") or str(obj).startswith("<enum ") or str(obj).startswith("<GType ") or inspect.isdatadescriptor(obj)): constants[name] = 0 elif isinstance(obj, (int, str)): constants[name] = obj elif callable(obj): # Fall back to a function for anything callable functions[name] = obj else: # Assume everything else is some manner of constant constants[name] = 0 ret = "" if constants: ret += "# %s constants\n\n" % parent.__name__ for name in sorted(constants): if name[0].isdigit(): # GDK has some busted constant names like # Gdk.EventType.2BUTTON_PRESS continue val = constants[name] strval = str(val) if isinstance(val, str): strval = '"%s"' % str(val).replace("\\", "\\\\") ret += "%s = %s\n" % (name, strval) if ret: ret += "\n\n" if functions: ret += "# %s functions\n\n" % parent.__name__ for name in sorted(functions): ret += "def %s(*args, **kwargs):\n" % name ret += " pass\n" if ret: ret += "\n\n" if methods: ret += "# %s methods\n\n" % parent.__name__ for name in sorted(methods): ret += "def %s(self, *args, **kwargs):\n" % name ret += " pass\n" if ret: ret += "\n\n" if classes: ret += "# %s classes\n\n" % parent.__name__ for name in sorted(classes): ret += "class %s(object):\n" % name classret = _gi_build_stub(classes[name]) if not classret: classret = "pass\n" for line in classret.splitlines(): ret += " " + line + "\n" ret += "\n" return ret def _import_gi_module(modname): # we only consider gi.repository submodules if not modname.startswith('gi.repository.'): raise AstroidBuildingError(modname=modname) # build astroid representation unless we already tried so if modname not in _inspected_modules: modnames = [modname] optional_modnames = [] # GLib and GObject may have some special case handling # in pygobject that we need to cope with. However at # least as of pygobject3-3.13.91 the _glib module doesn't # exist anymore, so if treat these modules as optional. if modname == 'gi.repository.GLib': optional_modnames.append('gi._glib') elif modname == 'gi.repository.GObject': optional_modnames.append('gi._gobject') try: modcode = '' for m in itertools.chain(modnames, optional_modnames): try: with warnings.catch_warnings(): # Just inspecting the code can raise gi deprecation # warnings, so ignore them. try: from gi import PyGIDeprecationWarning, PyGIWarning warnings.simplefilter("ignore", PyGIDeprecationWarning) warnings.simplefilter("ignore", PyGIWarning) except Exception: pass __import__(m) modcode += _gi_build_stub(sys.modules[m]) except ImportError: if m not in optional_modnames: raise except ImportError: astng = _inspected_modules[modname] = None else: astng = AstroidBuilder(MANAGER).string_build(modcode, modname) _inspected_modules[modname] = astng else: astng = _inspected_modules[modname] if astng is None: raise AstroidBuildingError(modname=modname) return astng def _looks_like_require_version(node): # Return whether this looks like a call to gi.require_version(<name>, <version>) # Only accept function calls with two constant arguments if len(node.args) != 2: return False if not all(isinstance(arg, nodes.Const) for arg in node.args): return False func = node.func if isinstance(func, nodes.Attribute): if func.attrname != 'require_version': return False if isinstance(func.expr, nodes.Name) and func.expr.name == 'gi': return True return False if isinstance(func, nodes.Name): return func.name == 'require_version' return False def _register_require_version(node): # Load the gi.require_version locally try: import gi gi.require_version(node.args[0].value, node.args[1].value) except Exception: pass return node MANAGER.register_failed_import_hook(_import_gi_module) MANAGER.register_transform(nodes.Call, _register_require_version, _looks_like_require_version)
Python
0.001088
@@ -3318,32 +3318,37 @@ e__%0A for name +, obj in sorted(class @@ -3349,19 +3349,121 @@ (classes +.items() ):%0A + base = %22object%22%0A if issubclass(obj, Exception):%0A base = %22Exception%22%0A @@ -3483,22 +3483,18 @@ %25s( -object +%25s ):%5Cn%22 %25 name @@ -3489,20 +3489,28 @@ ):%5Cn%22 %25 +( name +, base) %0A%0A @@ -3537,29 +3537,19 @@ ld_stub( -classes%5Bname%5D +obj )%0A
d31f1455f872c97104da24948bbdc459021ff03e
Comment spacing
dataactcore/scripts/get_fsrs_updates.py
dataactcore/scripts/get_fsrs_updates.py
import argparse import re import logging import os import csv import boto3 import datetime import json from dataactcore.logging import configure_logging from dataactcore.interfaces.db import GlobalDB from dataactcore.config import CONFIG_BROKER from dataactvalidator.health_check import create_app logger = logging.getLogger(__name__) ''' This script is used to pull updated financial assistance records (from --date to present) for FSRS. It can also run with --auto to poll the specified S3 bucket (BUCKET_NAME/BUCKET_PREFIX}) for the most recent file that was uploaded, and use the boto3 response for --date. ''' BUCKET_NAME = CONFIG_BROKER['data_extracts_bucket'] BUCKET_PREFIX = 'fsrs_award_extracts/' def main(): now = datetime.datetime.now() parser = argparse.ArgumentParser(description='Pull') parser.add_argument('--date', help='Specify modified date in mm/dd/yyyy format. Overrides --auto option.', nargs=1, type=str) parser.add_argument('--auto', help='Polls S3 for the most recently uploaded FABS_for_FSRS file, ' + 'and uses that as the modified date.', action='store_true') args = parser.parse_args() metrics_json = { 'script_name': 'get_fsrs_updates.py', 'start_time': str(now), 'records_provided': 0, 'start_date': '' } if args.auto: s3_resource = boto3.resource('s3', region_name='us-gov-west-1') extract_bucket = s3_resource.Bucket(BUCKET_NAME) all_fsrs_extracts = extract_bucket.objects.filter(Prefix=BUCKET_PREFIX) mod_date = max(all_fsrs_extracts, key=lambda k: k.last_modified).last_modified.strftime("%m/%d/%Y") if args.date: arg_date = args.date[0] given_date = arg_date.split('/') if not re.match('^\d{2}$', given_date[0]) or not re.match('^\d{2}$', given_date[1])\ or not re.match('^\d{4}$', given_date[2]): logger.error("Date " + arg_date + " not in proper mm/dd/yyyy format") return mod_date = arg_date if not mod_date: logger.error("Date or auto setting is required.") return metrics_json['start_date'] = mod_date logger.info("Starting SQL query of financial assistance records from {} to present...".format(mod_date)) sess = GlobalDB.db().session """ Query Summary: Each row is the *latest transaction of an award* with the transaction’s modified_date being within the past day and also includes summary data about the award associated with the transaction. """ results = sess.execute(""" WITH base_transaction AS ( SELECT fain, MIN(pafa_b.action_date) as base_date, MIN(pafa_b.period_of_performance_star) as earliest_start, MAX(pafa_b.period_of_performance_curr) as latest_end, MAX(pafa_b.modified_at) as max_mod, SUM(CASE WHEN pafa_b.is_active = True THEN pafa_b.federal_action_obligation ELSE 0 END) as obligation_sum, CASE WHEN EXISTS (SELECT 1 FROM published_award_financial_assistance AS sub_pafa_b WHERE is_active = True AND pafa_b.fain = sub_pafa_b.fain) THEN True ELSE False END AS currently_active FROM published_award_financial_assistance AS pafa_b WHERE assistance_type IN ('02', '03', '04', '05') AND record_type != 1 GROUP BY fain), only_base AS (SELECT pafa.*, base_date, earliest_start, latest_end, currently_active, obligation_sum FROM published_award_financial_assistance AS pafa JOIN base_transaction AS bt ON bt.fain = pafa.fain AND bt.max_mod = pafa.modified_at AND pafa.record_type != 1) SELECT ob.fain AS federal_award_id, CASE WHEN currently_active THEN 'active' ELSE 'inactive' END AS status, CASE WHEN CAST(ob.obligation_sum as double precision) > 25000 AND CAST(ob.base_date as DATE) > '10/01/2010' THEN 'Eligible' ELSE 'Ineligible' END AS eligibility, ob.sai_number, ob.awarding_sub_tier_agency_c AS agency_code, ob.awardee_or_recipient_uniqu AS duns_no, NULL AS dunsplus4, ob.uei AS uei, ob.place_of_performance_city AS principal_place_cc, CASE WHEN UPPER(LEFT(ob.place_of_performance_code, 2)) ~ '[A-Z]{2}' THEN UPPER(LEFT(ob.place_of_performance_code, 2)) ELSE NULL END AS principal_place_state_code, ob.place_of_perform_country_c AS principal_place_country_code, ob.place_of_performance_zip4a AS principal_place_zip, ob.cfda_number AS cfda_program_num, ob.earliest_start AS starting_date, ob.latest_end AS ending_date, ob.obligation_sum as total_fed_funding_amount, ob.base_date AS base_obligation_date, ob.award_description AS project_description, ob.modified_at AS last_modified_date FROM only_base AS ob WHERE modified_at >= '""" + mod_date + "'") logger.info("Completed SQL query, starting file writing") full_file_path = os.path.join(os.getcwd(), "fsrs_update.csv") with open(full_file_path, 'w', newline='') as csv_file: out_csv = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') # write headers to file headers = ['federal_award_id', 'status', 'eligibility', 'sai_number', 'agency_code', 'duns_no', 'dunsplus4', 'uei', 'principal_place_cc', 'principal_place_state_code', 'principal_place_country_code', 'principal_place_zip', 'cfda_program_num', 'starting_date', 'ending_date', 'total_fed_funding_amount', 'base_obligation_date', 'project_description', 'last_modified_date'] out_csv.writerow(headers) for row in results: metrics_json['records_provided'] += 1 out_csv.writerow(row) # close file csv_file.close() metrics_json['duration'] = str(datetime.datetime.now() - now) with open('get_fsrs_updates_metrics.json', 'w+') as metrics_file: json.dump(metrics_json, metrics_file) logger.info("Script complete") if __name__ == '__main__': configure_logging() with create_app().app_context(): main()
Python
0.000001
@@ -2414,24 +2414,28 @@ ion%0A %22%22%22%0A + Query Su @@ -2441,16 +2441,20 @@ ummary:%0A + Each @@ -2564,16 +2564,20 @@ day - and %0A + and als @@ -2648,17 +2648,16 @@ saction. - %0A %22%22%22
b6368d368368b27a8883fa27fa3bda6929067a2c
fix minor typo in docstring
astroquery/xmatch/core.py
astroquery/xmatch/core.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.extern import six from astropy.io import ascii from astropy.units import arcsec from astropy.table import Table from . import conf from ..query import BaseQuery from ..utils import (commons, url_helpers, prepend_docstr_noreturns, async_to_sync, ) @async_to_sync class XMatchClass(BaseQuery): URL = conf.url TIMEOUT = conf.timeout def query(self, cat1, cat2, max_distance, colRA1=None, colDec1=None, colRA2=None, colDec2=None): """Query the `CDS cross-match service <http://cdsxmatch.u-strasbg.fr/xmatch>`_ by finding matches between two (potentially big) catalogues. Parameters ---------- cat1 : str, file or `~astropy.table.Table` Identifier of the first table. It can either be a URL, the payload of a local file being uploaded, a CDS table identifier (either *simbad* for a view of SIMBAD data / to point out a given VizieR table or a an AstroPy table. If the table is uploaded or accessed through a URL, it must be in VOTable or CSV format with the positions in J2000 equatorial frame and as decimal degrees numbers. cat2 : str or file Identifier of the second table. Follows the same rules as *cat1*. max_distance : `~astropy.units.arcsec` Maximum distance in arcsec to look for counterparts. Maximum allowed value is 180. colRA1 : str Name of the column holding the right ascension. Only required if `cat1` is an uploaded table or a pointer to a URL. colDec1 : str Name of the column holding the declination. Only required if `cat1` is an uploaded table or a pointer to a URL. colRA2 : str Name of the column holding the right ascension. Only required if `cat2` is an uploaded table or a pointer to a URL. colDec2 : str Name of the column holding the declination. Only required if `cat2` is an uploaded table or a pointer to a URL. Returns ------- table : `~astropy.table.Table` Query results table """ response = self.query_async( cat1, cat2, max_distance, colRA1, colDec1, colRA2, colDec2) return ascii.read(response.text, format='csv') @prepend_docstr_noreturns(query.__doc__) def query_async( self, cat1, cat2, max_distance, colRA1=None, colDec1=None, colRA2=None, colDec2=None): """ Returns ------- response : `requests.Response` The HTTP response returned from the service. """ if max_distance > 180 * arcsec: raise ValueError( 'max_distance argument must not be greater than 180') payload = { 'request': 'xmatch', 'distMaxArcsec': max_distance.value, 'RESPONSEFORMAT': 'csv', } kwargs = {} self._prepare_sending_table(1, payload, kwargs, cat1, colRA1, colDec1) self._prepare_sending_table(2, payload, kwargs, cat2, colRA2, colDec2) response = commons.send_request( self.URL, payload, self.TIMEOUT, **kwargs) return response def _prepare_sending_table(self, i, payload, kwargs, cat, colRA, colDec): '''Check if table is a string, a `astropy.table.Table`, etc. and set query parameters accordingly. ''' catstr = 'cat{0}'.format(i) if isinstance(cat, six.string_types): payload[catstr] = cat elif isinstance(cat, Table): # write the Table's content into a new, temporary CSV-file # so that it can be pointed to via the `files` option # file will be closed when garbage-collected fp = six.StringIO() cat.write(fp, format='ascii.csv') fp.seek(0) kwargs['files'] = {catstr: fp} else: # assume it's a file-like object, support duck-typing kwargs['files'] = {catstr: cat} if not self.is_table_available(cat): if ((colRA is None) or (colDec is None)): raise ValueError('Specify the name of the RA/Dec columns in' + ' the input table.') # if `cat1` is not a VizieR table, # it is assumed it's either a URL or an uploaded table payload['colRA{0}'.format(i)] = colRA payload['colDec{0}'.format(i)] = colDec def is_table_available(self, table_id): """Return True if the passed CDS table identifier is one of the available VizieR tables, otherwise False. """ if isinstance(table_id, six.string_types) and (table_id[:7] == 'vizier:'): table_id = table_id[7:] return table_id in self.get_available_tables() def get_available_tables(self): """Get the list of the VizieR tables which are available in the xMatch service and return them as a list of strings. """ response = self._request( 'GET', url_helpers.urljoin_keep_path(self.URL, 'tables'), {'action': 'getVizieRTableNames', 'RESPONSEFORMAT': 'txt'}) content = response.text return content.splitlines() XMatch = XMatchClass()
Python
0.000546
@@ -1068,24 +1068,25 @@ VizieR table +) or a an Ast @@ -1298,17 +1298,16 @@ umbers.%0A -
378aa9b7b8fa74187c16f7bbd446a78c3b5658a8
Fix error message typo
utils/api.py
utils/api.py
#!/usr/bin/env python ''' THIS APP IS NOT PRODUCTION READY!! DO NOT USE! Flask app that provides a RESTful API to the multiscanner. Proposed supported operations: GET / ---> Test functionality. {'Message': 'True'} GET /api/v1/tasks/list ---> Receive list of tasks in multiscanner GET /api/v1/tasks/list/<task_id> ---> receive task in JSON format GET /api/v1/reports/list/<report_id> ---> receive report in JSON GET /api/v1/reports/delete/<report_id> ----> delete report_id POST /api/v1/tasks/create ---> POST file and receive report id Sample POST usage: curl -i -X POST http://localhost:8080/api/v1/tasks/create/ -F file=@/bin/ls TODO: * Add a backend DB to store reports * Make this app agnostic to choice of backend DB * Add doc strings to functions ''' import os import sys import uuid from flask import Flask, jsonify, make_response, request, abort MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.path.join(MS_WD, 'storage') not in sys.path: sys.path.insert(0, os.path.join(MS_WD, 'storage')) import sqlite_driver as database from storage import Storage TASK_NOT_FOUND = {'Message': 'No task with that ID not found!'} INVALID_REQUEST = {'Message': 'Invalid request parameters'} UPLOAD_FOLDER = 'tmp/' HTTP_OK = 200 HTTP_CREATED = 201 HTTP_BAD_REQUEST = 400 HTTP_NOT_FOUND = 404 FULL_DB_PATH = os.path.join(MS_WD, 'sqlite.db') app = Flask(__name__) db = database.Database(FULL_DB_PATH) db_store = Storage.get_storage() @app.errorhandler(HTTP_BAD_REQUEST) def invalid_request(error): '''Return a 400 with the INVALID_REQUEST message.''' return make_response(jsonify(INVALID_REQUEST), HTTP_BAD_REQUEST) @app.errorhandler(HTTP_NOT_FOUND) def not_found(error): '''Return a 404 with a TASK_NOT_FOUND message.''' return make_response(jsonify(TASK_NOT_FOUND), HTTP_NOT_FOUND) @app.route('/') def index(): ''' Return a default standard message for testing connectivity. ''' return jsonify({'Message': 'True'}) @app.route('/api/v1/tasks/list/', methods=['GET']) def task_list(): ''' Return a JSON dictionary containing all the tasks in the DB. ''' return jsonify({'Tasks': db.get_all_tasks()}) @app.route('/api/v1/tasks/list/<int:task_id>', methods=['GET']) def get_task(task_id): ''' Return a JSON dictionary corresponding to the given task ID. ''' task = db.get_task(task_id) if task: return jsonify({'Task': task}) else: abort(HTTP_NOT_FOUND) @app.route('/api/v1/tasks/delete/<int:task_id>', methods=['GET']) def delete_task(task_id): ''' Delete the specified task. Return deleted message. ''' result = db.delete_task(task_id) if not result: abort(HTTP_NOT_FOUND) return jsonify({'Message': 'Deleted'}) @app.route('/api/v1/tasks/create/', methods=['POST']) def create_task(): ''' Create a new task. Save the submitted file to UPLOAD_FOLDER. Return task id and 201 status. ''' file_ = request.files['file'] extension = os.path.splitext(file_.filename)[1] f_name = str(uuid.uuid4()) + extension file_path = os.path.join(UPLOAD_FOLDER, f_name) file_.save(file_path) # TODO: run multiscan on the file, have it update the # DB when done # output = multiscanner.multiscan([file_path]) # report = multiscanner.parseReports task_id = db.add_task() return make_response( jsonify({'Message': {'task_id': task_id}}), HTTP_CREATED ) @app.route('/api/v1/reports/list/<report_id>', methods=['GET']) def get_report(report_id): ''' Return a JSON dictionary corresponding to the given report ID. ''' report = db_store.get_report(report_id) if report: return jsonify({'Report': report}) else: abort(HTTP_NOT_FOUND) @app.route('/api/v1/reports/delete/<report_id>', methods=['GET']) def delete_report(report_id): ''' Delete the specified report. Return deleted message. ''' if db_store.delete(report_id): return jsonify({'Message': 'Deleted'}) else: abort(HTTP_NOT_FOUND) if __name__ == '__main__': db.init_sqlite_db() if not os.path.isdir(UPLOAD_FOLDER): print 'Creating upload dir' os.makedirs(UPLOAD_FOLDER) app.run(host='0.0.0.0', port=8080)
Python
0.000384
@@ -1146,20 +1146,16 @@ that ID -not found!'%7D
22f293ff16dd977c6a37b64566b37405d81cb767
Make the KeyIdentifier.key_id field a property.
atlassian_jwt_auth/key.py
atlassian_jwt_auth/key.py
import os import re import requests class KeyIdentifier(object): """ This class represents a key identifier """ def __init__(self, identifier): self.key_id = validate_key_identifier(identifier) def validate_key_identifier(identifier): """ returns a validated key identifier. """ regex = re.compile('^[\w.\-\+/]*$') _error_msg = 'Invalid key identifier %s' % identifier if not identifier: raise ValueError(_error_msg) if not regex.match(identifier): raise ValueError(_error_msg) normalised = os.path.normpath(identifier) if normalised != identifier: raise ValueError(_error_msg) if normalised.startswith('/'): raise ValueError(_error_msg) if '..' in normalised: raise ValueError(_error_msg) return identifier class HTTPSPublicKeyRetriever(object): """ This class retrieves public key from a https location based upon the given key id. """ def __init__(self, base_url): if not base_url.startswith('https://'): raise ValueError('The base url must start with https://') if not base_url.endswith('/'): base_url += '/' self.base_url = base_url def retrieve(self, key_identifier): """ returns the public key for given key_identifier. """ if not isinstance(key_identifier, KeyIdentifier): key_identifier = KeyIdentifier(key_identifier) PEM_FILE_TYPE = 'application/x-pem-file' url = self.base_url + key_identifier.key_id resp = requests.get(url, headers={'accept': PEM_FILE_TYPE}) resp.raise_for_status() if resp.headers['content-type'] != PEM_FILE_TYPE: raise ValueError("Invalid content-type, '%s', for url '%s' ." % (resp.headers['content-type'], url)) return resp.text
Python
0
@@ -163,16 +163,18 @@ self. +__ key_id = @@ -211,16 +211,82 @@ ifier)%0A%0A + @property%0A def key_id(self):%0A return self.__key_id%0A%0A %0Adef val
a8d6959d32b50cab41e05ae9e1eed75c1b7d3fa7
Add replace to routes.all
respite/urls/routes.py
respite/urls/routes.py
from respite.inflector import pluralize, cc2us class Route(object): """A route instance connects a path and method to a view.""" def __init__(self, regex, view, method, name): """ Initialize a route. Arguments: regex -- A string describing a regular expression to which the request path will be matched, or a function that accepts the parent resource's 'prefix' argument and returns it. view -- A string describing the name of the view to delegate the request to. method -- A string describing the HTTP method that this action accepts. name -- A string describing the name of the URL pattern, or a function that accepts the parent resource's 'views' argument and returns it. """ self.regex = regex self.view = view self.method = method self.name = name route = Route index = route( regex = lambda prefix: r'^%s(?:$|index(?:\.[a-zA-Z]+)?$)' % prefix, view = 'index', method = 'GET', name = lambda views: '%s_%s' % (views.model._meta.app_label, pluralize(cc2us(views.model.__name__))) ) create = route( regex = lambda prefix: r'^%s(?:$|index(?:\.[a-zA-Z]+)?$)' % prefix, view = 'create', method = 'POST', name = lambda views: '%s_%s' % (views.model._meta.app_label, pluralize(cc2us(views.model.__name__))) ) show = route( regex = lambda prefix: r'^%s(?P<id>[0-9]+)(?:\.[a-zA-Z]+)?$' % prefix, view = 'show', method = 'GET', name = lambda views: '%s_%s' % (views.model._meta.app_label, cc2us(views.model.__name__)) ) update = route( regex = lambda prefix: r'^%s(?P<id>[0-9]+)(?:\.[a-zA-Z]+)?$' % prefix, view = 'update', method = 'PATCH', name = lambda views: '%s_%s' % (views.model._meta.app_label, cc2us(views.model.__name__)) ) replace = route( regex = lambda prefix: r'^%s(?P<id>[0-9]+)(?:\.[a-zA-Z]+)?$' % prefix, view = 'replace', method = 'PUT', name = lambda views: '%s_%s' % (views.model._meta.app_label, cc2us(views.model.__name__)) ) delete = route( regex = lambda prefix: r'^%s(?P<id>[0-9]+)(?:\.[a-zA-Z]+)?$' % prefix, view = 'destroy', method = 'DELETE', name = lambda views: '%s_%s' % (views.model._meta.app_label, cc2us(views.model.__name__)) ) edit = route( regex = lambda prefix: r'^%s(?P<id>[0-9]+)/edit(?:\.[a-zA-Z]+)?$' % prefix, view = 'edit', method = 'GET', name = lambda views: 'edit_%s_%s' % (views.model._meta.app_label, cc2us(views.model.__name__)) ) new = route( regex = lambda prefix: r'^%snew(?:\.[a-zA-Z]+)?$' % prefix, view = 'new', method = 'GET', name = lambda views: 'new_%s_%s' % (views.model._meta.app_label, cc2us(views.model.__name__)) ) all = [index, create, show, update, delete, edit, new]
Python
0.000006
@@ -2815,14 +2815,23 @@ e, edit, + replace, new%5D%0A
fab58f03eaf09b9f286a10f5a91a945f53a92a29
Drop native specification
splauncher/core.py
splauncher/core.py
from __future__ import print_function __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>" __date__ = "$May 18, 2015 16:52:18 EDT$" import datetime import os import logging drmaa_logger = logging.getLogger(__name__) try: import drmaa except ImportError: # python-drmaa is not installed. drmaa_logger.error( "Was not able to import drmaa. " + "If this is meant to be run using the OpenGrid submission " + "system, then drmaa needs to be installed via pip or " + "easy_install." ) raise except RuntimeError: # The drmaa library was not specified, but python-drmaa is installed. drmaa_logger.error( "Was able to import drmaa. " + "However, the drmaa library could not be found. Please " + "either specify the location of libdrmaa.so using the " + "DRMAA_LIBRARY_PATH environment variable or disable/remove " + "use_drmaa from the config file." ) raise def main(*argv): job_time = datetime.datetime.utcnow() job_time_str = job_time.isoformat().replace(":", ".") job_name = "splaunch_" + argv[1].replace("/", "-") + "_" + job_time_str s = drmaa.Session() s.initialize() session_name = s.contact job_template = s.createJobTemplate() job_template.jobName = job_name job_template.remoteCommand = argv[1] job_template.args = argv[2:] job_template.jobEnvironment = os.environ job_template.inputPath = "localhost:" + os.devnull job_template.outputPath = "localhost:" + job_name + ".out" job_template.errorPath = "localhost:" + job_name + ".err" job_template.workingDirectory = os.getcwd() job_template.nativeSpecification = "-pe batch " + str(1) process_id = s.runJob(job_template) s.deleteJobTemplate(job_template) s.exit() print( "From context \"%s\" launched job \"%s\" with process ID \"%s\"." % ( session_name, job_name, process_id ) ) return(0)
Python
0
@@ -1656,69 +1656,8 @@ wd() -%0A job_template.nativeSpecification = %22-pe batch %22 + str(1) %0A%0A
e61247230b291bcf9f9dcc3050876b9f812c6541
change url for methodcheck thanks steve steiner http://www.atxconsulting.com/blog/tjfontaine/2010/02/09/updated-linode-api#comment-195
methodcheck.py
methodcheck.py
#!/usr/bin/python """ A quick script to verify that api.py is in sync with Linode's published list of methods. Copyright (c) 2009 Ryan Tucker <rtucker@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # URL of API documentation apidocurl = 'http://beta.linode.com/api/autodoc.cfm' import api import re import urllib tmpfile, httpheaders = urllib.urlretrieve(apidocurl) tmpfd = open(tmpfile) local_methods = api.Api.valid_commands() remote_methods = [] # Read in the list of methods Linode has rg = re.compile('.*?\\?method=((?:[a-z][a-z\\.\\d\\-]+)\\.(?:[a-z][a-z\\-]+))(?![\\w\\.])') for i in tmpfd.readlines(): m = rg.search(i) if m: remote_methods.append(m.group(1).replace('.','_')) # Cross-check! for i in local_methods: if i not in remote_methods: print 'REMOTE Missing: ' + i for i in remote_methods: if i not in local_methods: print 'LOCAL Missing: ' + i
Python
0
@@ -1236,12 +1236,11 @@ p:// -beta +www .lin
cc413b49ce9dd63fcbe9396a5ac1c8c68872a6c1
Update information in pkginfo, including the version information.
astroid/__pkginfo__.py
astroid/__pkginfo__.py
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of astroid. # # astroid is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 2.1 of the License, or (at your # option) any later version. # # astroid is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License along # with astroid. If not, see <http://www.gnu.org/licenses/>. """astroid packaging information""" distname = 'astroid' modname = 'astroid' numversion = (1, 2, 1) version = '.'.join([str(num) for num in numversion]) install_requires = ['logilab-common >= 0.60.0', 'six'] license = 'LGPL' author = 'Logilab' author_email = 'python-projects@lists.logilab.org' mailinglist = "mailto://%s" % author_email web = 'http://bitbucket.org/logilab/astroid' description = "rebuild a new abstract syntax tree from Python's ast" classifiers = ["Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Quality Assurance", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", ]
Python
0
@@ -905,12 +905,12 @@ (1, -2, 1 +3, 0 )%0Ave @@ -1076,21 +1076,16 @@ 'py -thon-projects +lint-dev @lis @@ -1208,21 +1208,9 @@ = %22 -rebuild a new +A abs @@ -1232,24 +1232,41 @@ ee f +o r -om Python -'s ast + with inference support. %22%0A%0Ac
8db52cb2481e49d7ca5568a0d10886da41643aba
Update retrosign.py
retrosign/retrosign.py
retrosign/retrosign.py
import discord, aiohttp, re from bs4 import BeautifulSoup as b_s from io import BytesIO from discord.ext import commands import os import random from random import choice import lxml class retrosign: def __init__(self, bot): self.bot = bot @commands.group(name="retrosign") async def retrosign(self, content : str): """Make a Retrosign""" texts = [t.strip() for t in content.split('|')] if len(texts) < 3: lenstr = len(texts[0]) await self.bot.say(lenstr) if lenstr <= 12: global data data = dict( bcg=choice([1, 2, 3, 4, 5]), txt=choice([1, 2, 3, 4]), text1="", text2=texts[0], text3="" ) await self.bot.type() with aiohttp.ClientSession() as session: async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response: if response.status == 200: soup = b_s(await response.text(), "lxml") download_url = soup.find("div", class_="downloads-container").ul.li.a["href"] async with session.get(download_url) as image_response: if image_response.status == 200: image_data = await image_response.read() with BytesIO(image_data) as temp_image: await self.bot.upload(temp_image, filename="retro.jpg") else: await self.bot.say("\N{CROSS MARK} too many Characters for one Line") return elif len(texts) != 3: await self.bot.say("\N{CROSS MARK} please provide three strings seperated by `|`") return else: global data data = dict( bcg=choice([1, 2, 3, 4, 5]), txt=choice([1, 2, 3, 4]), text1=texts[0], text2=texts[1], text3=texts[2] ) await self.bot.type() with aiohttp.ClientSession() as session: async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response: if response.status == 200: soup = b_s(await response.text(), "lxml") download_url = soup.find("div", class_="downloads-container").ul.li.a["href"] async with session.get(download_url) as image_response: if image_response.status == 200: image_data = await image_response.read() with BytesIO(image_data) as temp_image: await self.bot.upload(temp_image, filename="retro.jpg") @retrosign.command(name="top") async def _top_(self, content : str): """Make a Retrosign with top and middle Text""" texts = [t.strip() for t in content.split('|')] if len(texts) != 2: await self.bot.say("\N{CROSS MARK} please provide two strings seperated by `|`") return else: global data data = dict( bcg=choice([1, 2, 3, 4, 5]), txt=choice([1, 2, 3, 4]), text1=texts[0], text2=texts[1], text3="" ) await self.bot.type() with aiohttp.ClientSession() as session: async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response: if response.status == 200: soup = b_s(await response.text(), "lxml") download_url = soup.find("div", class_="downloads-container").ul.li.a["href"] async with session.get(download_url) as image_response: if image_response.status == 200: image_data = await image_response.read() with BytesIO(image_data) as temp_image: await self.bot.upload(temp_image, filename="retro.jpg") @retrosign.command(name="bottom") async def _bottom_(self, content : str): """Make a Retrosign with middle and bottom Text""" texts = [t.strip() for t in content.split('|')] if len(texts) != 2: await self.bot.say("\N{CROSS MARK} please provide two strings seperated by `|`") return else: global data data = dict( bcg=choice([1, 2, 3, 4, 5]), txt=choice([1, 2, 3, 4]), text1="", text2=texts[0], text3=[1] ) do_it() await self.bot.type() with aiohttp.ClientSession() as session: async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response: if response.status == 200: soup = b_s(await response.text(), "lxml") download_url = soup.find("div", class_="downloads-container").ul.li.a["href"] async with session.get(download_url) as image_response: if image_response.status == 200: image_data = await image_response.read() with BytesIO(image_data) as temp_image: await self.bot.upload(temp_image, filename="retro.jpg") def setup(bot): n = retrosign(bot) bot.add_cog(n)
Python
0
@@ -273,13 +273,15 @@ nds. -group +command (nam @@ -2943,33 +2943,32 @@ %0A @ -retrosign +commands .command(nam @@ -2970,19 +2970,28 @@ d(name=%22 -top +retrotopsign %22)%0A a @@ -4265,25 +4265,24 @@ %0A @ -retrosign +commands .command @@ -4288,22 +4288,31 @@ d(name=%22 +retro bottom +sign %22)%0A a
f5c22cdfef99878027f3f9e78ff9ae2701a21a3f
fix the url 'watch/' and 'watch3/' error
src/you_get/extractors/dilidili.py
src/you_get/extractors/dilidili.py
#!/usr/bin/env python __all__ = ['dilidili_download'] from ..common import * from .ckplayer import ckplayer_download headers = { 'DNT': '1', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'en-CA,en;q=0.8,en-US;q=0.6,zh-CN;q=0.4,zh;q=0.2', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Cache-Control': 'max-age=0', 'Referer': 'http://www.dilidili.com/', 'Connection': 'keep-alive', 'Save-Data': 'on', } #---------------------------------------------------------------------- def dilidili_parser_data_to_stream_types(typ ,vid ,hd2 ,sign, tmsign, ulk): """->list""" parse_url = 'http://player.005.tv/parse.php?xmlurl=null&type={typ}&vid={vid}&hd={hd2}&sign={sign}&tmsign={tmsign}&userlink={ulk}'.format(typ = typ, vid = vid, hd2 = hd2, sign = sign, tmsign = tmsign, ulk = ulk) html = get_content(parse_url, headers=headers) info = re.search(r'(\{[^{]+\})(\{[^{]+\})(\{[^{]+\})(\{[^{]+\})(\{[^{]+\})', html).groups() info = [i.strip('{}').split('->') for i in info] info = {i[0]: i [1] for i in info} stream_types = [] for i in zip(info['deft'].split('|'), info['defa'].split('|')): stream_types.append({'id': str(i[1][-1]), 'container': 'mp4', 'video_profile': i[0]}) return stream_types #---------------------------------------------------------------------- def dilidili_download(url, output_dir = '.', merge = False, info_only = False, **kwargs): if re.match(r'http://www.dilidili.com/watch\S', url): html = get_content(url) title = match1(html, r'<title>(.+)丨(.+)</title>') #title # player loaded via internal iframe frame_url = re.search(r'<iframe src=\"(.+?)\"', html).group(1) #print(frame_url) #https://player.005.tv:60000/?vid=a8760f03fd:a04808d307&v=yun&sign=a68f8110cacd892bc5b094c8e5348432 html = get_content(frame_url, headers=headers, decoded=False).decode('utf-8') match = re.search(r'(.+?)var video =(.+?);', html) vid = match1(html, r'var vid="(.+)"') hd2 = match1(html, r'var hd2="(.+)"') typ = match1(html, r'var typ="(.+)"') sign = match1(html, r'var sign="(.+)"') tmsign = match1(html, r'tmsign=([A-Za-z0-9]+)') ulk = match1(html, r'var ulk="(.+)"') # here s the parser... stream_types = dilidili_parser_data_to_stream_types(typ, vid, hd2, sign, tmsign, ulk) #get best best_id = max([i['id'] for i in stream_types]) parse_url = 'http://player.005.tv/parse.php?xmlurl=null&type={typ}&vid={vid}&hd={hd2}&sign={sign}&tmsign={tmsign}&userlink={ulk}'.format(typ = typ, vid = vid, hd2 = best_id, sign = sign, tmsign = tmsign, ulk = ulk) ckplayer_download(parse_url, output_dir, merge, info_only, is_xml = True, title = title, headers = headers) #type_ = '' #size = 0 #type_, ext, size = url_info(url) #print_info(site_info, title, type_, size) #if not info_only: #download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge) site_info = "dilidili" download = dilidili_download download_playlist = playlist_not_supported('dilidili')
Python
0.999999
@@ -1721,16 +1721,17 @@ /watch%5CS ++ ', url):
9fee9967deb9078e6b522cc638df3d2f8cb98443
add test
tests/cupy_tests/cuda_tests/test_stream.py
tests/cupy_tests/cuda_tests/test_stream.py
import unittest from cupy._creation import from_data from cupy import cuda from cupy import testing @testing.parameterize( *testing.product({ 'stream_name': ['null', 'ptds'], })) @testing.gpu class TestStream(unittest.TestCase): def setUp(self): if cuda.runtime.is_hip and self.stream_name == 'ptds': self.skipTest('HIP does not support PTDS') self._prev_stream = cuda.get_current_stream() if self.stream_name == 'null': self.stream = cuda.Stream.null elif self.stream_name == 'ptds': self.stream = cuda.Stream.ptds self.stream.use() def tearDown(self): self._prev_stream.use() @unittest.skipIf(cuda.runtime.is_hip, 'This test is only for CUDA') def test_eq_cuda(self): null0 = self.stream if self.stream == cuda.Stream.null: null1 = cuda.Stream(True) null2 = cuda.Stream(True) null3 = cuda.Stream(ptds=True) else: null1 = cuda.Stream(ptds=True) null2 = cuda.Stream(ptds=True) null3 = cuda.Stream(True) null4 = cuda.Stream() assert null0 == null1 assert null1 == null2 assert null2 != null3 assert null2 != null4 @unittest.skipIf(not cuda.runtime.is_hip, 'This test is only for HIP') def test_eq_hip(self): null0 = self.stream null1 = cuda.Stream(True) null2 = cuda.Stream(True) null3 = cuda.Stream() assert null0 == null1 assert null1 == null2 assert null2 != null3 def check_del(self, null, ptds): stream = cuda.Stream(null=null, ptds=ptds).use() stream_ptr = stream.ptr x = from_data.array([1, 2, 3]) del stream assert cuda.Stream.null == cuda.get_current_stream() # Want to test cudaStreamDestory is issued, but # runtime.streamQuery(stream_ptr) causes SEGV. We cannot test... del stream_ptr del x def test_del_default(self): self.check_del(null=False, ptds=False) def test_del(self): null = self.stream == cuda.Stream.null if cuda.runtime.is_hip: ptds = False else: ptds = self.stream == cuda.Stream.ptds self.check_del(null=null, ptds=ptds) def test_get_and_add_callback(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] if not cuda.runtime.is_hip: stream = self.stream else: # adding callbacks to the null stream in HIP would segfault... stream = cuda.Stream() out = [] stream_list = [] def _callback(s, _, t): out.append(t[0]) stream_list.append(s.ptr) for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.add_callback( _callback, (i, numpy_array)) stream.synchronize() assert out == list(range(N)) assert all(s == stream.ptr for s in stream_list) @unittest.skipIf(cuda.runtime.is_hip, 'HIP does not support launch_host_func') @unittest.skipIf(cuda.driver.get_build_version() < 10000, 'Only CUDA 10.0+ supports this') def test_launch_host_func(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] stream = cuda.Stream.null out = [] for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.launch_host_func( lambda t: out.append(t[0]), (i, numpy_array)) stream.synchronize() assert out == list(range(N)) def test_with_statement(self): stream1 = cuda.Stream() stream2 = cuda.Stream() assert self.stream == cuda.get_current_stream() with stream1: assert stream1 == cuda.get_current_stream() with stream2: assert stream2 == cuda.get_current_stream() assert stream1 == cuda.get_current_stream() assert self.stream == cuda.get_current_stream() def test_use(self): stream1 = cuda.Stream().use() assert stream1 == cuda.get_current_stream() self.stream.use() assert self.stream == cuda.get_current_stream() @testing.gpu class TestExternalStream(unittest.TestCase): def setUp(self): self.stream_ptr = cuda.runtime.streamCreate() self.stream = cuda.ExternalStream(self.stream_ptr) def tearDown(self): cuda.runtime.streamDestroy(self.stream_ptr) def test_get_and_add_callback(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] stream = self.stream out = [] for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.add_callback( lambda _, __, t: out.append(t[0]), (i, numpy_array)) stream.synchronize() assert out == list(range(N)) @unittest.skipIf(cuda.runtime.is_hip, 'HIP does not support launch_host_func') @unittest.skipIf(cuda.driver.get_build_version() < 10000, 'Only CUDA 10.0+ supports this') def test_launch_host_func(self): N = 100 cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)] stream = self.stream out = [] for i in range(N): numpy_array = cupy_arrays[i].get(stream=stream) stream.launch_host_func( lambda t: out.append(t[0]), (i, numpy_array)) stream.synchronize() assert out == list(range(N))
Python
0.000002
@@ -4374,16 +4374,459 @@ ream()%0A%0A + @testing.multi_gpu(2)%0A def test_per_device(self):%0A with cuda.Device(0):%0A stream0 = cuda.Stream()%0A with stream0:%0A assert stream0 == cuda.get_current_stream()%0A with cuda.Device(1):%0A assert stream0 != cuda.get_current_stream()%0A assert cuda.Stream.null == cuda.get_current_stream()%0A assert stream0 == cuda.get_current_stream()%0A%0A %0A@testin
a891594150a4456e0894f2b5b70f2bd4b650bd77
use debug to log dqn_freeze model update to prevent log overflow
rl/agent/dqn_freeze.py
rl/agent/dqn_freeze.py
import os import numpy as np from rl.agent.double_dqn import DoubleDQN from rl.agent.dqn import DQN from keras.models import load_model from rl.util import logger class DQNFreeze(DoubleDQN): ''' Extends DQN agent to freeze target Q network and periodically update them to the weights of the exploration model Avoids oscillations and breaks correlation between Q-network and target http://www0.cs.ucl.ac.uk/staff/d.silver/web/Resources_files/deep_rl.pdf Exploration model periodically saved and loaded into target Q network ''' def compute_Q_states(self, minibatch): Q_states = np.clip(self.model.predict(minibatch['states']), -self.clip_val, self.clip_val) Q_next_states = np.clip(self.model2.predict(minibatch['next_states']), -self.clip_val, self.clip_val) Q_next_states_max = np.amax(Q_next_states, axis=1) return (Q_states, Q_next_states, Q_next_states_max) def train_an_epoch(self): # Should call DQN to train an epoch, not DoubleDQN return DQN.train_an_epoch(self) def update_target_model(self): pid = os.getpid() name = 'temp_Q_model_freeze_' + str(pid) + '.h5' self.model.save(name) self.model2 = load_model(name) logger.info("Updated target model weights") def update(self, sys_vars): ''' Agent update apart from training the Q function ''' done = sys_vars['done'] timestep_check = sys_vars['t'] == (self.env_spec['timestep_limit'] - 1) if done or timestep_check: self.update_target_model() super(DQNFreeze, self).update(sys_vars)
Python
0
@@ -1327,12 +1327,13 @@ ger. -info +debug (%22Up
cf837175763cd99e19dd95f14c9ac0dfd705bffd
set global 'sqlalchemy' log level to ERROR so it is insulated from other logging configs [ticket:353]
lib/sqlalchemy/logging.py
lib/sqlalchemy/logging.py
# logging.py - adapt python logging module to SQLAlchemy # Copyright (C) 2006 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """provides a few functions used by instances to turn on/off their logging, including support for the usual "echo" parameter. Control of logging for SA can be performed from the regular python logging module. The regular dotted module namespace is used, starting at 'sqlalchemy'. For class-level logging, the class name is appended, and for instance-level logging, the hex id of the instance is appended. The "echo" keyword parameter which is available on some SA objects corresponds to an instance-level logger for that instance. E.g.: engine.echo = True is equivalent to: import logging logging.getLogger('sqlalchemy.engine.Engine.%s' % hex(id(engine))).setLevel(logging.DEBUG) """ import sys # py2.5 absolute imports will fix.... logging = __import__('logging') default_enabled = False def default_logging(name): global default_enabled if logging.getLogger(name).getEffectiveLevel() < logging.WARN: default_enabled=True if not default_enabled: default_enabled = True rootlogger = logging.getLogger('sqlalchemy') rootlogger.setLevel(logging.NOTSET) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')) rootlogger.addHandler(handler) def _get_instance_name(instance): # since getLogger() does not have any way of removing logger objects from memory, # instance logging displays the instance id as a modulus of 16 to prevent endless memory growth # also speeds performance as logger initialization is apparently slow return instance.__class__.__module__ + "." + instance.__class__.__name__ + ".0x.." + hex(id(instance))[-2:] def instance_logger(instance): return logging.getLogger(_get_instance_name(instance)) def class_logger(cls): return logging.getLogger(cls.__module__ + "." + cls.__name__) def is_debug_enabled(logger): return logger.isEnabledFor(logging.DEBUG) def is_info_enabled(logger): return logger.isEnabledFor(logging.INFO) class echo_property(object): level_map={logging.DEBUG : "debug", logging.INFO:True} def __get__(self, instance, owner): level = logging.getLogger(_get_instance_name(instance)).getEffectiveLevel() return echo_property.level_map.get(level, False) def __set__(self, instance, value): if value: default_logging(_get_instance_name(instance)) logging.getLogger(_get_instance_name(instance)).setLevel(value == 'debug' and logging.DEBUG or logging.INFO) else: logging.getLogger(_get_instance_name(instance)).setLevel(logging.NOTSET)
Python
0.000001
@@ -1039,16 +1039,121 @@ ging')%0A%0A +# turn off logging at the root sqlalchemy level%0Alogging.getLogger('sqlalchemy').setLevel(logging.ERROR)%0A%0A default_ @@ -1434,52 +1434,8 @@ y')%0A - rootlogger.setLevel(logging.NOTSET)%0A
9c03b61730b6f2c09f789301f633551480ff4b5e
Tidy up of unused code
saleor/checkout/forms.py
saleor/checkout/forms.py
"""Checkout-related forms.""" from django import forms from django.utils.safestring import mark_safe from django.utils.translation import pgettext_lazy from django_prices.templatetags.prices_i18n import format_price from ..shipping.models import ShippingMethodCountry class CheckoutAddressField(forms.ChoiceField): """Like a choice field but uses a radio group instead of a dropdown.""" widget = forms.RadioSelect() class ShippingAddressesForm(forms.Form): """Shipping address form.""" NEW_ADDRESS = 'new_address' CHOICES = [ (NEW_ADDRESS, pgettext_lazy( 'Shipping addresses form choice', 'Enter a new address'))] address = CheckoutAddressField( label=pgettext_lazy('Shipping addresses form field label', 'Address'), choices=CHOICES, initial=NEW_ADDRESS) def __init__(self, *args, **kwargs): additional_addresses = kwargs.pop('additional_addresses', []) super().__init__(*args, **kwargs) address_field = self.fields['address'] address_choices = [ (address.id, str(address)) for address in additional_addresses] address_field.choices = self.CHOICES + address_choices class BillingAddressesForm(ShippingAddressesForm): """Billing address form.""" NEW_ADDRESS = 'new_address' SHIPPING_ADDRESS = 'shipping_address' CHOICES = [ (NEW_ADDRESS, pgettext_lazy( 'Billing addresses form choice', 'Enter a new address')), (SHIPPING_ADDRESS, pgettext_lazy( 'Billing addresses form choice', 'Same as shipping'))] address = CheckoutAddressField(choices=CHOICES, initial=SHIPPING_ADDRESS) class BillingWithoutShippingAddressForm(ShippingAddressesForm): """Billing address form when shipping is not required. Same as the default shipping address as in this came "billing same as shipping" option does not make sense. """ # FIXME: why is this called a country choice field? class ShippingCountryChoiceField(forms.ModelChoiceField): """Shipping method choice field. Uses a radio group instead of a dropdown and includes estimated shipping prices. """ widget = forms.RadioSelect() def label_from_instance(self, obj): """Return a friendly label for the shipping method.""" price_html = format_price(obj.price.gross, obj.price.currency) label = mark_safe('%s %s' % (obj.shipping_method, price_html)) return label class ShippingMethodForm(forms.Form): """Shipping method form.""" method = ShippingCountryChoiceField( queryset=ShippingMethodCountry.objects.select_related( 'shipping_method').order_by('price').all(), label=pgettext_lazy( 'Shipping method form field label', 'Shipping method'), required=True) def __init__(self, country_code, *args, **kwargs): super().__init__(*args, **kwargs) method_field = self.fields['method'] if country_code: queryset = method_field.queryset method_field.queryset = queryset.unique_for_country_code( country_code) if self.initial.get('method') is None: self.initial['method'] = method_field.queryset.first() method_field.empty_label = None class AnonymousUserShippingForm(forms.Form): """Additional shipping information form for users who are not logged in.""" email = forms.EmailField( required=True, widget=forms.EmailInput( attrs={'autocomplete': 'shipping email'}), label=pgettext_lazy('Shipping form field label', 'Email')) class AnonymousUserBillingForm(forms.Form): """Additional billing information form for users who are not logged in.""" email = forms.EmailField( required=True, widget=forms.EmailInput( attrs={'autocomplete': 'billing email'}), label=pgettext_lazy('Billing form field label', 'Email')) class NoteForm(forms.Form): """ Form to add a note to an order as it is created for shop staff to see """ note = forms.CharField(widget=forms.Textarea, max_length=250, required=False) # def __init__(self, *args, **kwargs): # self.checkout = kwargs.pop('checkout') # initial = kwargs.get('initial', {}) # if 'note' not in initial: # initial['note'] = self.checkout.note # kwargs['initial'] = initial # super(NoteForm, self).__init__(*args, **kwargs) # # def clean(self): # cleaned_data = super(NoteForm, self).clean() # if 'note' in cleaned_data: # self.checkout.note = cleaned_data['note'] # return cleaned_data
Python
0.000096
@@ -4126,518 +4126,5 @@ se)%0A - %0A# def __init__(self, *args, **kwargs):%0A# self.checkout = kwargs.pop('checkout')%0A# initial = kwargs.get('initial', %7B%7D)%0A# if 'note' not in initial:%0A# initial%5B'note'%5D = self.checkout.note%0A# kwargs%5B'initial'%5D = initial%0A# super(NoteForm, self).__init__(*args, **kwargs)%0A#%0A# def clean(self):%0A# cleaned_data = super(NoteForm, self).clean()%0A# if 'note' in cleaned_data:%0A# self.checkout.note = cleaned_data%5B'note'%5D%0A# return cleaned_data %0A
38efa9aa11f949fc8bd0b6c4d1a673ca3416dd3c
Fix up iterator implementation in LISTALLOBJECTs
groundstation/transfer/request_handlers/listallobjects.py
groundstation/transfer/request_handlers/listallobjects.py
import groundstation.transfer.request from groundstation import settings from groundstation import logger log = logger.getLogger(__name__) def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in xrange(0, len(l), n): yield l[i:i+n] def handle_listallobjects(self): if not self.station.recently_queried(self.origin): log.info("%s not up to date, issuing LISTALLOBJECTS" % (self.origin)) # Pass in the station for gizmo_factory in the constructor listobjects = groundstation.transfer.request.Request("LISTALLOBJECTS", station=self.station) self.stream.enqueue(listobjects) else: log.info("object cache for %s still valid" % (self.origin)) log.info("Handling LISTALLOBJECTS") payload = self.station.objects() if len(payload) > settings.LISTALLOBJECTS_CHUNK_THRESHOLD: log.info("Lots of objects to send, registering an iterator") def _(): for chunk in chunks(payload, settings.LISTALLOBJECTS_CHUNK_THRESHOLD): log.info("Sending %i object descriptions" % (len(payload))) response = self._Response(self.id, "DESCRIBEOBJECTS", chr(0).join(payload)) self.stream.enqueue(response) yield self.TERMINATE() self.station.register_iter(_()) else: log.info("Sending %i object descriptions" % (len(payload))) response = self._Response(self.id, "DESCRIBEOBJECTS", chr(0).join(payload)) self.stream.enqueue(response) self.TERMINATE()
Python
0.00006
@@ -999,16 +999,53 @@ erator%22) +%0A%0A @self.station.register_iter %0A @@ -1049,17 +1049,24 @@ def -_ +iterator ():%0A @@ -1201,39 +1201,37 @@ iptions%22 %25 (len( -payload +chunk )))%0A @@ -1332,39 +1332,37 @@ chr(0).join( -payload +chunk ))%0A @@ -1416,16 +1416,16 @@ yield%0A + @@ -1448,48 +1448,8 @@ TE() -%0A self.station.register_iter(_()) %0A%0A
1fc0026aa72f7fcf66c221de402971023361e6c3
implement memo logger
spyne/util/memo.py
spyne/util/memo.py
# # spyne - Copyright (C) Spyne contributors. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # """The module for memoization stuff. When you have memory leaks in your daemon, the reason could very well be reckless usage of the tools here. """ import functools class memoize(object): """A memoization decorator that keeps caching until reset.""" def __init__(self, func): self.func = func self.memo = {} def __call__(self, *args, **kwargs): key = self.get_key(args, kwargs) if not key in self.memo: self.memo[key] = self.func(*args, **kwargs) return self.memo[key] def get_key(self, args, kwargs): return tuple(args), tuple(kwargs.items()) def reset(self): self.memo = {} class memoize_id(memoize): """A memoization decorator that keeps caching until reset for unhashable types. It works on id()'s of objects instead.""" def get_key(self, args, kwargs): return tuple([id(a) for a in args]), \ tuple([(k, id(v)) for k, v in kwargs.items()]) class memoize_id_method(memoize_id): """A memoization decorator that keeps caching until reset for unhashable types on instance methods. It works on id()'s of objects instead.""" def __get__(self, obj, objtype): """Support instance methods.""" fn = functools.partial(self.__call__, obj) fn.reset = self.reset return fn
Python
0.000001
@@ -926,25 +926,540 @@ %0Aimport -functools +logging%0Alogger = logging.getLogger(__name__)%0A%0Aimport functools%0A%0A%0AMEMOIZATION_STATS_LOG_INTERVAL = 60.0%0A%0A%0Adef _do_log():%0A logger.debug(%22%25d memoizers%22, len(memoize.registry))%0A for memo in memoize.registry:%0A logger.debug(%22%25r: %25d entries.%22, memo.func, len(memo.memo))%0A%0A%0Adef start_memoization_stats_logger():%0A import threading%0A%0A _do_log()%0A%0A t = threading.Timer(MEMOIZATION_STATS_LOG_INTERVAL,%0A start_memoization_stats_logger)%0A t.daemon = True%0A t.start() %0A%0A%0Aclass @@ -1539,24 +1539,43 @@ reset.%22%22%22%0A%0A + registry = %5B%5D%0A%0A def __in @@ -1617,16 +1617,16 @@ = func%0A - @@ -1639,16 +1639,54 @@ emo = %7B%7D +%0A memoize.registry.append(self) %0A%0A de
ae044f507f3bcf508648b1a73a802b657009cd48
fix nxos_reboot command format (#30549)
lib/ansible/modules/network/nxos/nxos_reboot.py
lib/ansible/modules/network/nxos/nxos_reboot.py
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_reboot extends_documentation_fragment: nxos version_added: 2.2 short_description: Reboot a network device. description: - Reboot a network device. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - The module will fail due to timeout issues, but the reboot will be performed anyway. options: confirm: description: - Safeguard boolean. Set to true if you're sure you want to reboot. required: false default: false ''' EXAMPLES = ''' - nxos_reboot: confirm: true host: "{{ inventory_hostname }}" username: "{{ username }}" password: "{{ password }}" ''' RETURN = ''' rebooted: description: Whether the device was instructed to reboot. returned: success type: boolean sample: true ''' from ansible.module_utils.nxos import run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule def reboot(module): cmds = [ {'command': 'terminal-dont-ask'}, {'command': 'reload', 'output': 'text'} ] run_commands(module, cmds) def main(): argument_spec = {} argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) results = dict(changed=False, warnings=warnings) if not module.check_mode: reboot(module) results['changed'] = True module.exit_json(**results) if __name__ == '__main__': main()
Python
0
@@ -1935,17 +1935,17 @@ terminal -- + dont-ask @@ -1945,16 +1945,34 @@ ont-ask' +, 'output': 'text' %7D,%0A
b8ed081ac4cc5953aaf5b1a2091fefa59d375bf1
Add logging for extension
uno_image.py
uno_image.py
""" Example usage of UNO, graphic objects and networking in LO extension """ import uno import unohelper from com.sun.star.task import XJobExecutor class ImageExample(unohelper.Base, XJobExecutor): '''Class that implements the service registered in LibreOffice''' def __init__(self, context): self.context = context self.desktop = self.createUnoService("com.sun.star.frame.Desktop") self.graphics = self.createUnoService("com.sun.star.graphic.GraphicProvider") def createUnoService(self, name): return self.context.ServiceManager.createInstanceWithContext(name, self.context) def show_warning(self, title, msg): """Shows warning message box""" frame = self.desktop.ActiveFrame window = frame.ContainerWindow window.Toolkit.createMessageBox( window, uno.Enum('com.sun.star.awt.MessageBoxType', 'WARNINGBOX'), uno.getConstantByName("com.sun.star.awt.MessageBoxButtons.BUTTONS_OK"), title, msg).execute() def trigger(self, args): """This method provides options for ImageExample service""" if args == 'show_warning': self.show_warning("Warning", "Warning description here") g_ImplementationHelper = unohelper.ImplementationHelper() g_ImplementationHelper.addImplementation( ImageExample, 'org.libreoffice.imageexample.ImageExample', ('com.sun.star.task.JobExecutor',))
Python
0.000001
@@ -71,16 +71,31 @@ on%0A%22%22%22%0A%0A +import logging%0A import u @@ -503,16 +503,130 @@ ovider%22) +%0A logging.basicConfig(filename=%22opencl_uno_example.log%22,%0A level=logging.WARNING) %0A%0A de
67d26378653c2764f0c95beae991370344644716
add reset method.
rmake/lib/publisher.py
rmake/lib/publisher.py
# # Copyright (c) 2006 rPath, Inc. # # This program is distributed under the terms of the Common Public License, # version 1.0. A copy of this license should have been distributed with this # source file in a file called LICENSE. If it is not present, the license # is always available at http://www.opensource.org/licenses/cpl.php. # # This program is distributed in the hope that it will be useful, but # without any warranty; without even the implied warranty of merchantability # or fitness for a particular purpose. See the Common Public License for # full details. # from rmake import constants class Publisher(object): states = set() def __init__(self): self.listeners = {} self.dispatchers = {} for state in self.states: setattr(self, state, state) self._toEmit = {} self._corked = False def cork(self): self._corked = True def uncork(self): toEmit = self._toEmit self._toEmit = {} for fn, (isDispatcher, eventList) in toEmit.iteritems(): if not isDispatcher: for event, data in eventList: fn(*data) else: fn(constants.subscriberApiVersion, eventList) self._corked = False def _emit(self, event, subevent, *args): data = ((event, subevent), args) if self._corked: for fn in self.dispatchers.get(event, []): if fn not in self._toEmit: self._toEmit[fn] = (True, [data]) else: self._toEmit[fn][1].append(data) for fn in self.listeners.get(event, []): if fn not in self._toEmit: self._toEmit[fn] = (False, [data]) else: self._toEmit[fn][1].append(data) else: for fn in self.dispatchers.get(event, []): fn(constants.subscriberApiVersion, [data]) for fn in self.listeners.get(event, []): fn(*args) def subscribe(self, stateList, fn, dispatcher=False): if isinstance(stateList, str): stateList = [stateList] for state in stateList: if state not in self.states: raise ValueError("no such state '%s'" % state) if dispatcher: l = self.dispatchers.setdefault(state, []) else: l = self.listeners.setdefault(state, []) if fn not in l: l.append(fn) def unsubscribe(self, state, fn): self.listeners[state].remove(fn) def subscribeAll(self, fn, dispatcher=False): for state in self.getStates(): self.subscribe(state, fn, dispatcher=dispatcher) def getStates(self): return self.states
Python
0
@@ -669,66 +669,8 @@ f):%0A - self.listeners = %7B%7D%0A self.dispatchers = %7B%7D%0A @@ -731,32 +731,133 @@ , state, state)%0A + self.reset()%0A%0A def reset(self):%0A self.listeners = %7B%7D%0A self.dispatchers = %7B%7D%0A self._to
3050659a87a7386b0a1d038a8f2ebcce23df1aa1
add missing command
mint/broker.py
mint/broker.py
#!/usr/bin/python # # Copyright (c) 2007 rPath, Inc. # # All rights reserved # import os, sys import optparse import httplib from mcp import client as mcp_client from mcp import queue from mcp import jobstatus from mint import config as mint_config from mint import server as mint_server from conary import dbstore def handleImages(mcpCfg, mintCfg): # ensure schema is upgraded mintClient = shimclient.ShimMintClient(mintCfg, (mintCfg.authUser, mintCfg.authPass)) mcpClient = mcp_client.MCPClient(mcpCfg) queueName = '%s.%s' % (mintCfg.hostName, mintCfg.externalDomainName) postQueue = queue.Queue(mcpCfg.queueHost, mcpCfg.queuePort, queueName, timeOut = None) db = dbstore.connect(mintCfg.dbPath, mintCfg.dbDriver) cu = db.cursor() isogenUid = os.geteuid() apacheGid = pwd.getpwnam('apache')[3] try: while True: uuid, urlMap = postQueue.read() buildId = int(uuid.split('-')[-1]) build = mintClient.getBuild(buildId) project = mintClient.getProject(build.projectId) finalDir = \ os.path.join(mintCfg.imagesPath, project.hostname, str(buildId)) os.chown(finalDir, isogenUid, apacheGid) os.chmod(finalDir, os.stat(finalDir)[0] & 0777 | 0020) os.chown(pardir, isogenUid, apacheGid) os.chmod(pardir, os.stat(pardir)[0] & 0777 | 0020) for url, fileDesc in urlMap: filePath = os.path.join(finalDir, httplib.urlsplit(url)[2].split('/')[-1]) os.system('curl --create-dirs -o %s %s' % (filePath, url)) os.chown(filePath, isogenUid, apacheGid) os.chmod(filePath, os.stat(newfile)[0] & 0777 | 0020) build.setFiles(urlMap) finally: mcpClient.disconnect() postQueue.disconnect() def daemon(func, *args, **kwargs): pid = os.fork() if not pid: os.setsid() devnull = os.open(os.devnull, os.O_RDWR) os.dup2(devnull, sys.stdout.fileno()) os.dup2(devnull, sys.stderr.fileno()) os.dup2(devnull, sys.stdin.fileno()) pid = os.fork() if not pid: func(*args, **kwargs) def main(envArgs = sys.argv[1:]): parser = optparse.OptionParser() parser.add_option("-n", "--no-daemon", dest = "daemon", default = True, action = "store_false", help = "don't daemonize. go into debug mode") parser.add_option("-c", "--config", dest = 'config', help = "location of rBuilder config file", default = '') parser.add_option("-m", "--mcp-config", dest = "mcp_config", help = "location of mcp client config file", default = '/srv/rbuilder/config/mcp-client.conf') parser.add_option("-u", '--user', dest = 'user', help = 'run as specific user. must be superuser.', default = None) (options, args) = parser.parse_args(envArgs) # drop privileges as early as possible curUid = os.geteuid() newUid = pwd.getpwnam(options.user) if (newUid != curUid): os.seteuid(newUid) mintCfg = mint_config.MintConfig() if not options.config: mintCfg.read(mint_config.RBUILDER_CONFIG) mintCfg.read(mint_config.RBUILDER_GENERATED_CONFIG) mintCfg.read(mint_config.RBUILDER_GENERATED_CONFIG.replace('generated', 'custom')) else: mintCfg.read(options.config) mcpCfg = mcp_client.MCPClientConfig() mcpCfg.read(options.mcp_config) if options.daemon: daemon(handleImages, mcpCfg, mintCfg) else: handleImages(mcpCfg, mintCfg) if __name__ == '__main__': main()
Python
0.655937
@@ -1853,16 +1853,49 @@ urlMap)%0A + client.stopJob(uuid)%0A fina
22a2c4ec841741297825f0e8ac301d6cad3b739b
Add replace for "0x2212" to -.
translate.py
translate.py
#!/usr/bin/env python3 import re # подцепляем словарь из внешнего файла import translate_dict as dictionary input_text = None filename = None def replace_lang(text, dic): langs = re.findall('(?:^Languages|(?<= ))(\w+)[, ]*', text) langru = "Языки " for lang in langs: try: langru = langru + dic[lang] + ", " except: langru = langru + lang + ", " return langru[:-2] def replace_other(line, dic): """Заменяем всё подряд, что не попало под другие фильтры""" for i, j in dic.items(): line = line.replace(i, j) return line def replacer(text, dic): translated_text = "" for line in text.splitlines(): line = line.replace(b'\xe2\x88\x92'.decode("utf-8"), "-") if "languages" in line.lower(): if "understand" in line.lower(): pass else: line = replace_lang(line, dictionary.lang_dict) if "multiattack" in line.lower(): line = re.sub(r"Multiattack. ([\s\w]+) makes (\w+) melee.+", r"Мультиатака. \1 делает \2 рукопашные атаки.", line) line = re.sub(r"Multiattack. ([\s\w]+) makes (\w+) ranged.+", r"Мультиатака. \1 делает \2 дальнобойные атаки.", line) # замена кубов по всему тексту line = re.sub(r"(\d+)d(\d+)", r"\1к\2", line) # замена всего подряд line = replace_other(line, dic) # и собираем текст заново построчно translated_text = translated_text + "\r\n" + line return translated_text def main(): import sys try: filename = sys.argv[1] except IndexError: print("Should specify input filename") sys.exit(1) with open(filename, 'r') as f: input_text = f.read() translated_text = replacer(input_text, dictionary.words_dict) print(translated_text) if __name__ == "__main__": main()
Python
0
@@ -714,22 +714,28 @@ ce(b -'%5Cxe2%5Cx88%5Cx9 +ytes.fromhex('221 2' +) .dec
1a78c461a0dcf6895646086c7863889c78f95f03
Fix Nanoleaf light turn_off transition (#57305)
homeassistant/components/nanoleaf/light.py
homeassistant/components/nanoleaf/light.py
"""Support for Nanoleaf Lights.""" from __future__ import annotations from aionanoleaf import Nanoleaf, Unavailable import voluptuous as vol from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_HS_COLOR, ATTR_TRANSITION, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_TRANSITION, LightEntity, ) from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN from homeassistant.core import HomeAssistant import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.util import color as color_util from homeassistant.util.color import ( color_temperature_mired_to_kelvin as mired_to_kelvin, ) from .const import DOMAIN RESERVED_EFFECTS = ("*Solid*", "*Static*", "*Dynamic*") DEFAULT_NAME = "Nanoleaf" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_TOKEN): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Import Nanoleaf light platform.""" hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: config[CONF_HOST], CONF_TOKEN: config[CONF_TOKEN]}, ) ) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up the Nanoleaf light.""" nanoleaf: Nanoleaf = hass.data[DOMAIN][entry.entry_id] async_add_entities([NanoleafLight(nanoleaf)]) class NanoleafLight(LightEntity): """Representation of a Nanoleaf Light.""" def __init__(self, nanoleaf: Nanoleaf) -> None: """Initialize an Nanoleaf light.""" self._nanoleaf = nanoleaf self._attr_unique_id = self._nanoleaf.serial_no self._attr_name = self._nanoleaf.name self._attr_min_mireds = 154 self._attr_max_mireds = 833 @property def brightness(self): """Return the brightness of the light.""" return int(self._nanoleaf.brightness * 2.55) @property def color_temp(self): """Return the current color temperature.""" return color_util.color_temperature_kelvin_to_mired( self._nanoleaf.color_temperature ) @property def effect(self): """Return the current effect.""" # The API returns the *Solid* effect if the Nanoleaf is in HS or CT mode. # The effects *Static* and *Dynamic* are not supported by Home Assistant. # These reserved effects are implicitly set and are not in the effect_list. # https://forum.nanoleaf.me/docs/openapi#_byoot0bams8f return ( None if self._nanoleaf.effect in RESERVED_EFFECTS else self._nanoleaf.effect ) @property def effect_list(self): """Return the list of supported effects.""" return self._nanoleaf.effects_list @property def icon(self): """Return the icon to use in the frontend, if any.""" return "mdi:triangle-outline" @property def is_on(self): """Return true if light is on.""" return self._nanoleaf.is_on @property def hs_color(self): """Return the color in HS.""" return self._nanoleaf.hue, self._nanoleaf.saturation @property def supported_features(self): """Flag supported features.""" return ( SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT | SUPPORT_COLOR | SUPPORT_TRANSITION ) async def async_turn_on(self, **kwargs): """Instruct the light to turn on.""" brightness = kwargs.get(ATTR_BRIGHTNESS) hs_color = kwargs.get(ATTR_HS_COLOR) color_temp_mired = kwargs.get(ATTR_COLOR_TEMP) effect = kwargs.get(ATTR_EFFECT) transition = kwargs.get(ATTR_TRANSITION) if hs_color: hue, saturation = hs_color await self._nanoleaf.set_hue(int(hue)) await self._nanoleaf.set_saturation(int(saturation)) if color_temp_mired: await self._nanoleaf.set_color_temperature( mired_to_kelvin(color_temp_mired) ) if transition: if brightness: # tune to the required brightness in n seconds await self._nanoleaf.set_brightness( int(brightness / 2.55), transition=int(kwargs[ATTR_TRANSITION]) ) else: # If brightness is not specified, assume full brightness await self._nanoleaf.set_brightness(100, transition=int(transition)) else: # If no transition is occurring, turn on the light await self._nanoleaf.turn_on() if brightness: await self._nanoleaf.set_brightness(int(brightness / 2.55)) if effect: if effect not in self.effect_list: raise ValueError( f"Attempting to apply effect not in the effect list: '{effect}'" ) await self._nanoleaf.set_effect(effect) async def async_turn_off(self, **kwargs): """Instruct the light to turn off.""" transition = kwargs.get(ATTR_TRANSITION) await self._nanoleaf.turn_off(transition) async def async_update(self) -> None: """Fetch new state data for this light.""" try: await self._nanoleaf.get_info() except Unavailable: self._attr_available = False return self._attr_available = True
Python
0
@@ -5697,32 +5697,46 @@ transition +: float %7C None = kwargs.get(AT @@ -5788,16 +5788,24 @@ urn_off( +None if transiti @@ -5806,16 +5806,45 @@ ansition + is None else int(transition) )%0A%0A a
807d197ca0c131c9ef5f4a683be04c7df8f715bb
Add goToAlias button (my position) to Overkiz integration (#76694)
homeassistant/components/overkiz/button.py
homeassistant/components/overkiz/button.py
"""Support for Overkiz (virtual) buttons.""" from __future__ import annotations from homeassistant.components.button import ButtonEntity, ButtonEntityDescription from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.entity_platform import AddEntitiesCallback from . import HomeAssistantOverkizData from .const import DOMAIN, IGNORED_OVERKIZ_DEVICES from .entity import OverkizDescriptiveEntity BUTTON_DESCRIPTIONS: list[ButtonEntityDescription] = [ # My Position (cover, light) ButtonEntityDescription( key="my", name="My Position", icon="mdi:star", ), # Identify ButtonEntityDescription( key="identify", # startIdentify and identify are reversed... Swap this when fixed in API. name="Start Identify", icon="mdi:human-greeting-variant", entity_category=EntityCategory.DIAGNOSTIC, entity_registry_enabled_default=False, ), ButtonEntityDescription( key="stopIdentify", name="Stop Identify", icon="mdi:human-greeting-variant", entity_category=EntityCategory.DIAGNOSTIC, entity_registry_enabled_default=False, ), ButtonEntityDescription( key="startIdentify", # startIdentify and identify are reversed... Swap this when fixed in API. name="Identify", icon="mdi:human-greeting-variant", entity_category=EntityCategory.DIAGNOSTIC, ), # RTDIndoorSiren / RTDOutdoorSiren ButtonEntityDescription(key="dingDong", name="Ding Dong", icon="mdi:bell-ring"), ButtonEntityDescription(key="bip", name="Bip", icon="mdi:bell-ring"), ButtonEntityDescription( key="fastBipSequence", name="Fast Bip Sequence", icon="mdi:bell-ring" ), ButtonEntityDescription(key="ring", name="Ring", icon="mdi:bell-ring"), ] SUPPORTED_COMMANDS = { description.key: description for description in BUTTON_DESCRIPTIONS } async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the Overkiz button from a config entry.""" data: HomeAssistantOverkizData = hass.data[DOMAIN][entry.entry_id] entities: list[ButtonEntity] = [] for device in data.coordinator.data.values(): if ( device.widget in IGNORED_OVERKIZ_DEVICES or device.ui_class in IGNORED_OVERKIZ_DEVICES ): continue for command in device.definition.commands: if description := SUPPORTED_COMMANDS.get(command.command_name): entities.append( OverkizButton( device.device_url, data.coordinator, description, ) ) async_add_entities(entities) class OverkizButton(OverkizDescriptiveEntity, ButtonEntity): """Representation of an Overkiz Button.""" async def async_press(self) -> None: """Handle the button press.""" await self.executor.async_execute_command(self.entity_description.key)
Python
0
@@ -74,16 +74,110 @@ ations%0A%0A +from dataclasses import dataclass%0A%0Afrom pyoverkiz.types import StateType as OverkizStateType%0A%0A from hom @@ -611,16 +611,182 @@ Entity%0A%0A +%0A@dataclass%0Aclass OverkizButtonDescription(ButtonEntityDescription):%0A %22%22%22Class to describe an Overkiz button.%22%22%22%0A%0A press_args: OverkizStateType %7C None = None%0A%0A%0A BUTTON_D @@ -803,28 +803,29 @@ S: list%5B +Overkiz Button -Entity Descript @@ -866,36 +866,37 @@ light)%0A +Overkiz Button -Entity Description( @@ -989,36 +989,37 @@ dentify%0A +Overkiz Button -Entity Description( @@ -1297,36 +1297,37 @@ %0A ),%0A +Overkiz Button -Entity Description( @@ -1533,36 +1533,37 @@ %0A ),%0A +Overkiz Button -Entity Description( @@ -1832,36 +1832,37 @@ orSiren%0A +Overkiz Button -Entity Description( @@ -1918,36 +1918,37 @@ ring%22),%0A +Overkiz Button -Entity Description( @@ -1993,36 +1993,37 @@ ring%22),%0A +Overkiz Button -Entity Description( @@ -2112,28 +2112,29 @@ ),%0A +Overkiz Button -Entity Descript @@ -2185,16 +2185,236 @@ ring%22),%0A + # DynamicScreen (ogp:blind) uses goToAlias (id 1: favorite1) instead of 'my'%0A OverkizButtonDescription(%0A key=%22goToAlias%22,%0A press_args=%221%22,%0A name=%22My position%22,%0A icon=%22mdi:star%22,%0A ),%0A %5D%0A%0ASUPPO @@ -3513,24 +3513,74 @@ Button.%22%22%22%0A%0A + entity_description: OverkizButtonDescription%0A%0A async de @@ -3643,24 +3643,240 @@ n press.%22%22%22%0A + if self.entity_description.press_args:%0A await self.executor.async_execute_command(%0A self.entity_description.key, self.entity_description.press_args%0A )%0A return%0A%0A awai
77e09f2f085bc894c1f45e94662e32a981e9b0db
Convert Chinese quotation
PythonScript/Helper/Helper.py
PythonScript/Helper/Helper.py
def main(): try: fileName = "MengZi_Traditional.md" filePath = "../../source/" + fileName with open(filePath, 'r') as file: for line in file: print line except IOError: print ("The file (" + filePath + ") does not exist.") if __name__ == '__main__': main()
Python
0.999998
@@ -1,8 +1,62 @@ +# This Python file uses the following encoding: utf-8%0A def main @@ -157,16 +157,39 @@ ileName%0A + content = None%0A @@ -207,17 +207,16 @@ ilePath, - 'r') as @@ -237,52 +237,333 @@ -for line in file:%0A +content = file.read().decode(%22utf-8%22)%0A content = content.replace(u%22%E3%80%8C%22,u'%E2%80%9C')%0A content = content.replace(u%22%E3%80%8D%22,u'%E2%80%9D')%0A content = content.replace(u%22%E3%80%8E%22,u'%E2%80%98')%0A content = content.replace(u%22%E3%80%8F%22,u'%E2%80%99')%0A with open(filePath,'w') as file:%0A file.write(content.encode(%22utf-8%22))%0A + print -line +%22OK%22 %0A @@ -595,17 +595,47 @@ print (%22 -T +IOError occurs while handling t he file @@ -656,23 +656,8 @@ + %22) - does not exist .%22)%0A
81d0e9eb255cd9afe20797a6c4b657a26a6b0d0b
Fix handler creation in abusehelper.core.mail.imapbot
abusehelper/core/mail/imapbot.py
abusehelper/core/mail/imapbot.py
from __future__ import absolute_import import ssl import socket import getpass import imaplib import idiokit from .. import bot, utils from .message import message_from_string, escape_whitespace from . import HandlerParam, load_handler _DEFAULT_PORT_IMAP4 = 143 _DEFAULT_PORT_IMAP4_SSL = 993 class _IMAP4(imaplib.IMAP4): def __init__(self, host, port, timeout=None): self._timeout = timeout imaplib.IMAP4.__init__(self, host, port) def open(self, host="", port=_DEFAULT_PORT_IMAP4): self.host = host self.port = port self.sock = socket.create_connection((host, port), timeout=self._timeout) self.file = self.sock.makefile("rb") class _IMAP4_SSL(imaplib.IMAP4_SSL): def __init__(self, host, port, certfile=None, keyfile=None, timeout=None): self._timeout = timeout imaplib.IMAP4_SSL.__init__(self, host, port, certfile, keyfile) def open(self, host="", port=_DEFAULT_PORT_IMAP4_SSL): self.host = host self.port = port self.sock = socket.create_connection((host, port), timeout=self._timeout) self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile) self.file = self.sslobj.makefile("rb") class IMAPBot(bot.FeedBot): handler = HandlerParam() poll_interval = bot.IntParam(default=300) filter = bot.Param(default="(UNSEEN)") mail_server = bot.Param("the mail server hostname") mail_port = bot.IntParam(""" the mail server port (default: 993 for SSL connections, 143 for plain text connections) """, default=None) mail_connection_timeout = bot.FloatParam(""" the timeout for the mail server connection socket, in seconds (default: %default seconds) """, default=60.0) mail_user = bot.Param(""" the username used for mail server authentication """) mail_password = bot.Param(""" the password used for mail server authentication """, default=None) mail_box = bot.Param(""" the polled mailbox (default: %default) """, default="INBOX") mail_disable_ssl = bot.BoolParam(""" connect to the mail server using unencrypted plain text connections (default: use encrypted SSL connections) """) def __init__(self, **keys): bot.FeedBot.__init__(self, **keys) self.handler = load_handler(self.handler) if self.mail_port is None: if self.mail_disable_ssl: self.mail_port = _DEFAULT_PORT_IMAP4 else: self.mail_port = _DEFAULT_PORT_IMAP4_SSL if self.mail_password is None: self.mail_password = getpass.getpass("Mail password: ") self.queue = self.run_mailbox() def feed(self): return self.queue | self.noop() | self.poll() # Mailbox handling @idiokit.stream def run_mailbox(self, min_delay=5.0, max_delay=60.0): mailbox = None try: while True: item = yield idiokit.next() while True: delay = min(min_delay, max_delay) while mailbox is None: try: mailbox = yield idiokit.thread(self.connect) except (imaplib.IMAP4.abort, socket.error) as error: self.log.error("Failed IMAP connection ({0})".format(utils.format_exception(error))) else: break self.log.info("Retrying connection in {0:.2f} seconds".format(delay)) yield idiokit.sleep(delay) delay = min(2 * delay, max_delay) event, name, args, keys = item if event.result().unsafe_is_set(): break try: method = getattr(mailbox, name) result = yield idiokit.thread(method, *args, **keys) except (imaplib.IMAP4.abort, socket.error) as error: yield idiokit.thread(self.disconnect, mailbox) self.log.error("Lost IMAP connection ({0})".format(utils.format_exception(error))) mailbox = None except imaplib.IMAP4.error as error: event.fail(type(error), error, None) break else: event.succeed(result) break finally: if mailbox is not None: yield idiokit.thread(self.disconnect, mailbox) def connect(self): self.log.info("Connecting to IMAP server {0!r} port {1}".format( self.mail_server, self.mail_port)) if self.mail_disable_ssl: mail_class = _IMAP4 else: mail_class = _IMAP4_SSL mailbox = mail_class(self.mail_server, self.mail_port, timeout=self.mail_connection_timeout) self.log.info("Logging in to IMAP server {0!r} port {1}".format( self.mail_server, self.mail_port)) mailbox.login(self.mail_user, self.mail_password) try: status, msgs = mailbox.select(self.mail_box, readonly=False) if status != "OK": for msg in msgs: raise imaplib.IMAP4.abort(msg) except: mailbox.logout() raise self.log.info("Logged in to IMAP server {0!r} port {1}".format( self.mail_server, self.mail_port)) return mailbox def disconnect(self, mailbox): try: mailbox.close() except (imaplib.IMAP4.error, socket.error): pass try: mailbox.logout() except (imaplib.IMAP4.error, socket.error): pass def call(self, name, *args, **keys): event = idiokit.Event() self.queue.send(event, name, args, keys) return event # Keep-alive @idiokit.stream def noop(self, noop_interval=60.0): while True: yield self.call("noop") yield idiokit.sleep(noop_interval) # Main polling @idiokit.stream def poll(self): while True: yield self.fetch_mails(self.filter) yield idiokit.sleep(self.poll_interval) @idiokit.stream def fetch_mails(self, filter): result, data = yield self.call("uid", "SEARCH", None, filter) if not data or not data[0]: return for uid in data[0].split(): result, parts = yield self.call("uid", "FETCH", uid, "(RFC822)") for part in parts: if isinstance(part, tuple) and len(part) >= 2: data = part[1] break else: continue msg = message_from_string(data) subject = escape_whitespace(msg.get_unicode("Subject", "<no subject>", errors="replace")) sender = escape_whitespace(msg.get_unicode("From", "<unknown sender>", errors="replace")) self.log.info(u"Handling mail '{0}' from {1}".format(subject, sender)) handler = self.handler(self.log) yield handler.handle(msg) self.log.info(u"Done with mail '{0}' from {1}".format(subject, sender)) # UID STORE command flags have to be in parentheses, otherwise # imaplib quotes them, which is not allowed. yield self.call("uid", "STORE", uid, "+FLAGS", "(\\Seen)") if __name__ == "__main__": IMAPBot.from_command_line().execute()
Python
0
@@ -7249,24 +7249,28 @@ elf.handler( +log= self.log)%0A
d93de18a890ce2b03e5eb872033aaa81e83cdc8b
Resolve missing exception import in cryptdev
salt/modules/cryptdev.py
salt/modules/cryptdev.py
# -*- coding: utf-8 -*- ''' Salt module to manage Unix cryptsetup jobs and the crypttab file ''' # Import python libraries from __future__ import absolute_import import json import logging import os # Import salt libraries import salt.utils # Import 3rd-party libs import salt.ext.six as six from salt.ext.six.moves import filter, zip # pylint: disable=import-error,redefined-builtin # Set up logger log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'cryptdev' def __virtual__(): ''' Only load on POSIX-like systems ''' if salt.utils.is_windows(): return (False, 'The cryptdev module cannot be loaded: not a POSIX-like system') else: return True class _crypttab_entry(object): ''' Utility class for manipulating crypttab entries. Primarily we're parsing, formatting, and comparing lines. Parsing emits dicts expected from crypttab() or raises a ValueError. ''' class ParseError(ValueError): '''Error raised when a line isn't parsible as a crypttab entry''' crypttab_keys = ('name', 'device', 'password', 'options') crypttab_format = '{name: <12} {device: <44} {password: <22} {options}\n' @classmethod def dict_from_line(cls, line, keys=crypttab_keys): if len(keys) != 4: raise ValueError('Invalid key array: {0}'.format(keys)) if line.startswith('#'): raise cls.ParseError("Comment!") comps = line.split() # If there are only three entries, then the options have been omitted. if len(comps) == 3: comps += [''] if len(comps) != 4: raise cls.ParseError("Invalid Entry!") return dict(zip(keys, comps)) @classmethod def from_line(cls, *args, **kwargs): return cls(** cls.dict_from_line(*args, **kwargs)) @classmethod def dict_to_line(cls, entry): return cls.crypttab_format.format(**entry) def __str__(self): '''String value, only works for full repr''' return self.dict_to_line(self.criteria) def __repr__(self): '''Always works''' return str(self.criteria) def pick(self, keys): '''Returns an instance with just those keys''' subset = dict([(key, self.criteria[key]) for key in keys]) return self.__class__(**subset) def __init__(self, **criteria): '''Store non-empty, non-null values to use as filter''' self.criteria = {key: str(value) for key, value in six.iteritems(criteria) if value is not None} @staticmethod def norm_path(path): '''Resolve equivalent paths equivalently''' return os.path.normcase(os.path.normpath(path)) def match(self, line): '''Compare potentially partial criteria against a complete line''' entry = self.dict_from_line(line) for key, value in six.iteritems(self.criteria): if entry[key] != value: return False return True def crypttab(config='/etc/crypttab'): ''' List the contents of the crypttab CLI Example: .. code-block:: bash salt '*' cryptdev.crypttab ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.fopen(config) as ifile: for line in ifile: try: entry = _crypttab_entry.dict_from_line(line) entry['options'] = entry['options'].split(',') # Handle duplicate names by appending `_` while entry['name'] in ret: entry['name'] += '_' ret[entry.pop('name')] = entry except _crypttab_entry.ParseError: pass return ret def rm_crypttab(name, device, config='/etc/crypttab'): ''' Remove the device point from the crypttab. If the described entry does not exist, nothing is changed, but the command succeeds. CLI Example: .. code-block:: bash salt '*' cryptdev.rm_crypttab foo /dev/sdg ''' modified = False criteria = _crypttab_entry(name=name, device=device) # For each line in the config that does not match the criteria, add it to # the list. At the end, re-create the config from just those lines. lines = [] try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: try: if criteria.match(line): modified = True else: lines.append(line) except _crypttab_entry.ParseError: lines.append(line) except (IOError, OSError) as exc: msg = "Couldn't read from {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) if modified: try: with salt.utils.fopen(config, 'w+') as ofile: ofile.writelines(lines) except (IOError, OSError) as exc: msg = "Couldn't write to {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) # If we reach this point, the changes were successful return True def set_crypttab( name, device, password='none', options='', config='/etc/crypttab', test=False, match_on='name', **kwargs): ''' Verify that this device is represented in the crypttab, change the device to match the name passed, or add the name if it is not present. CLI Example: .. code-block:: bash salt '*' cryptdev.set_crypttab foo /dev/sdz1 mypassword swap,size=256 ''' # Fix the options type if it is a list if isinstance(options, list): options = ','.join(options) # preserve arguments for updating entry_args = { 'name': name, 'device': device, 'password': password, 'options': options, } lines = [] ret = None # Transform match_on into list--items will be checked later if isinstance(match_on, list): pass elif not isinstance(match_on, six.string_types): msg = 'match_on must be a string or list of strings' raise CommandExecutionError(msg) else: match_on = [match_on] # generate entry and criteria objects, handle invalid keys in match_on entry = _crypttab_entry(**entry_args) try: criteria = entry.pick(match_on) except KeyError: filterFn = lambda key: key not in _crypttab_entry.crypttab_keys invalid_keys = filter(filterFn, match_on) msg = 'Unrecognized keys in match_on: "{0}"'.format(invalid_keys) raise CommandExecutionError(msg) # parse file, use ret to cache status if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{0}"'.format(config)) try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: try: if criteria.match(line): # Note: If ret isn't None here, # we've matched multiple lines ret = 'present' if entry.match(line): lines.append(line) else: ret = 'change' lines.append(str(entry)) else: lines.append(line) except _crypttab_entry.ParseError: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, str(exc))) # add line if not present or changed if ret is None: lines.append(str(entry)) ret = 'new' if ret != 'present': # ret in ['new', 'change']: if not salt.utils.test_mode(test=test, **kwargs): try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return ret
Python
0
@@ -235,16 +235,66 @@ lt.utils +%0Afrom salt.exceptions import CommandExecutionError %0A%0A# Impo
491938c9a7e976012ce4d4242d3c0d2079288040
Fix broken test.
numba/tests/test_gil.py
numba/tests/test_gil.py
from __future__ import print_function import ctypes import ctypes.util import os import sys import threading import warnings import numpy as np import numba.unittest_support as unittest from numba.compiler import compile_isolated, Flags from numba import config, jit from .support import TestCase, tag # This CPython API function is a portable way to get the current thread id. PyThread_get_thread_ident = ctypes.pythonapi.PyThread_get_thread_ident PyThread_get_thread_ident.restype = ctypes.c_long PyThread_get_thread_ident.argtypes = [] # A way of sleeping from nopython code if os.name == 'nt': sleep = ctypes.windll.kernel32.Sleep sleep.argtypes = [ctypes.c_uint] sleep.restype = None sleep_factor = 1 # milliseconds else: sleep = ctypes.CDLL(ctypes.util.find_library("c")).usleep sleep.argtypes = [ctypes.c_uint] sleep.restype = ctypes.c_int sleep_factor = 1000 # microseconds def f(a, indices): # If run from one thread at a time, the function will always fill the # array with identical values. # If run from several threads at a time, the function will probably # fill the array with differing values. for idx in indices: # Let another thread run sleep(10 * sleep_factor) a[idx] = PyThread_get_thread_ident() f_sig = "void(int64[:], intp[:])" def lifted_f(a, indices): """ Same as f(), but inside a lifted loop """ object() # Force object mode for idx in indices: # Let another thread run sleep(10 * sleep_factor) a[idx] = PyThread_get_thread_ident() def object_f(a, indices): """ Same as f(), but in object mode """ for idx in indices: # Let another thread run sleep(10 * sleep_factor) object() # Force object mode a[idx] = PyThread_get_thread_ident() class TestGILRelease(TestCase): def make_test_array(self, n_members): return np.arange(n_members, dtype=np.int64) def run_in_threads(self, func, n_threads): # Run the function in parallel over an array and collect results. threads = [] # Warm up compilation, since we don't want that to interfere with # the test proper. func(self.make_test_array(1), np.arange(1, dtype=np.intp)) arr = self.make_test_array(50) for i in range(n_threads): # Ensure different threads write into the array in different # orders. indices = np.arange(arr.size, dtype=np.intp) np.random.shuffle(indices) t = threading.Thread(target=func, args=(arr, indices)) threads.append(t) for t in threads: t.start() for t in threads: t.join() return arr def check_gil_held(self, func): arr = self.run_in_threads(func, n_threads=4) distinct = set(arr) self.assertEqual(len(distinct), 1, distinct) def check_gil_released(self, func): for n_threads in (4, 12, 32): # Try harder each time. On an empty machine 4 threads seems # sufficient, but in some contexts (e.g. Travis CI) we need more. arr = self.run_in_threads(func, n_threads) distinct = set(arr) try: self.assertGreater(len(distinct), 1, distinct) except AssertionError as e: failure = e else: return raise failure def test_gil_held(self): """ Test the GIL is held by default, by checking serialized runs produce deterministic results. """ cfunc = jit(f_sig, nopython=True)(f) self.check_gil_held(cfunc) @tag('important') def test_gil_released(self): """ Test releasing the GIL, by checking parallel runs produce unpredictable results. """ cfunc = jit(f_sig, nopython=True, nogil=True)(f) self.check_gil_released(cfunc) def test_gil_released_inside_lifted_loop(self): """ Test the GIL can by released by a lifted loop even though the surrounding code uses object mode. """ cfunc = jit(f_sig, nogil=True)(lifted_f) self.check_gil_released(cfunc) def test_gil_released_by_caller(self): """ Releasing the GIL in the caller is sufficient to have it released in a callee. """ compiled_f = jit(f_sig, nopython=True)(f) @jit(f_sig, nopython=True, nogil=True) def caller(a, i): compiled_f(a, i) self.check_gil_released(caller) def test_gil_released_by_caller_and_callee(self): """ Same, but with both caller and callee asking to release the GIL. """ compiled_f = jit(f_sig, nopython=True, nogil=True)(f) @jit(f_sig, nopython=True, nogil=True) def caller(a, i): compiled_f(a, i) self.check_gil_released(caller) def test_gil_ignored_by_callee(self): """ When only the callee asks to release the GIL, it gets ignored. """ compiled_f = jit(f_sig, nopython=True, nogil=True)(f) @jit(f_sig, nopython=True) def caller(a, i): compiled_f(a, i) self.check_gil_held(caller) def test_object_mode(self): """ When the function is compiled in object mode, a warning is printed out. """ with warnings.catch_warnings(record=True) as wlist: warnings.simplefilter('always', config.NumbaWarning) cfunc = jit(f_sig, nogil=True)(object_f) self.assertTrue(any(w.category is config.NumbaWarning and "Code running in object mode won't allow parallel execution" in str(w.message) for w in wlist), wlist) # Just check it doesn't crash. self.run_in_threads(cfunc, 2) if __name__ == '__main__': unittest.main()
Python
0.000003
@@ -251,22 +251,22 @@ import -config +errors , jit%0Afr @@ -5532,22 +5532,22 @@ lways', -config +errors .NumbaWa @@ -5652,14 +5652,14 @@ is -config +errors .Num
7d985243a7ca32041101f34cfa6138e197de0c3c
Fix compatibility problem with string.Teamplate #69
awscfncli2/config/formats.py
awscfncli2/config/formats.py
# -*- coding: utf-8 -*- import copy import json import os import string import jsonschema import six from semantic_version import Version from .deployment import StackKey, StackDeployment, StackMetadata, StackProfile, \ StackParameters, Deployment from .schema import load_schema CANNED_STACK_POLICIES = { 'ALLOW_ALL': '{"Statement":[{"Effect":"Allow","Action":"Update:*","Principal":"*","Resource":"*"}]}', 'ALLOW_MODIFY': '{"Statement":[{"Effect":"Allow","Action":["Update:Modify"],"Principal":"*","Resource":"*"}]}', 'DENY_DELETE': '{"Statement":[{"Effect":"Allow","NotAction":"Update:Delete","Principal":"*","Resource":"*"}]}', 'DENY_ALL': '{"Statement":[{"Effect":"Deny","Action":"Update:*","Principal":"*","Resource":"*"}]}', } class FormatError(Exception): pass def load_format(version): if version is None: return FormatV1 try: v = Version(version, partial=True) except ValueError: raise FormatError('Invalid Version "%s"' % version) if v == FormatV3.VERSION: return FormatV3 elif v == FormatV2.VERSION: return FormatV2 elif v == FormatV1.VERSION: return FormatV1 else: raise FormatError('Unspported config version') class ConfigFormat(object): VERSION = None def validate(self, config): raise NotImplementedError def parse(self, config): raise NotImplementedError class FormatV1(ConfigFormat): VERSION = Version('1.0.0') def __init__(self, **context): self._context = context def validate(self, config): schema = load_schema(str(self.VERSION)) jsonschema.validate(config, schema) def parse(self, config): raise NotImplementedError class FormatV2(ConfigFormat): VERSION = Version('2.0.0') STAGE_CONFIG = dict( Order=(six.integer_types, None), ) STACK_CONFIG = dict( Order=(six.integer_types, None), Profile=(six.string_types, None), Region=(six.string_types, None), Package=(bool, None), ArtifactStore=(six.string_types, None), StackName=(six.string_types, None), Template=(six.string_types, None), Parameters=(dict, None), DisableRollback=(bool, None), RollbackConfiguration=(dict, None), TimeoutInMinutes=(six.integer_types, None), NotificationARNs=(six.string_types, None), Capabilities=(list, None), ResourceTypes=(list, None), RoleARN=(six.string_types, None), OnFailure=(six.string_types, None), StackPolicy=(six.string_types, None), Tags=(dict, None), ClientRequestToken=(six.string_types, None), EnableTerminationProtection=(bool, None) ) def __init__(self, basedir='.'): self._basedir = basedir def validate(self, config): schema = load_schema(str(self.VERSION)) jsonschema.validate(config, schema) if have_parameter_reference_pattern(config): raise jsonschema.SchemaError( 'Do not support parameter reference in config version <= 2') def parse(self, config): deployment = Deployment() blueprints = config.get('Blueprints', dict()) stage_configs = config.get('Stages', dict()) for stage_key, stage_config in stage_configs.items(): for stack_key, stack_config in stage_config.items(): if stack_key == 'Order': continue base = dict() blueprint_id = stack_config.get('Extends') if blueprint_id: blueprint = blueprints.get(blueprint_id) if not blueprint: raise FormatError( 'Blueprint "%s" not found' % blueprint_id) base = copy.deepcopy(blueprint) self._extends(base, stack_config) stack = self._build_stack( stage_key, stack_key, stage_config, base) deployment.add_stack(stage_key, stack_key, stack) return deployment def _extends(self, config, extends): for key, (typ, default) in self.STACK_CONFIG.items(): # skip unknown parameters if key not in extends: continue # overwrite Capabilities parameter if key == 'Capabilities': config[key] = copy.deepcopy(extends[key]) # append list elif typ is list: if key not in config: config[key] = list(extends[key]) else: config[key].extend(extends[key]) # update dict elif typ is dict: if key not in config: config[key] = dict(extends[key]) else: config[key].update(extends[key]) # copy everything else else: config[key] = copy.deepcopy(extends[key]) return config def _build_stack(self, stage_key, stack_key, stage_config, stack_config): # add default order stage_order = stage_config.get('Order', 0) stack_order = stack_config.get('Order', 0) stack_config['Order'] = (stage_order, stack_order) # add default name if 'StackName' not in stack_config: stack_config['StackName'] = stack_key # Make relate template path template = stack_config.get('Template') if template and \ not (template.startswith('https') and template.startswith( 'http')): template_path = os.path.realpath( os.path.join(self._basedir, template)) if not os.path.exists(template_path): raise FormatError('File Not Found %s' % template_path) stack_config['Template'] = template_path stack_policy = stack_config.get('StackPolicy') if stack_policy and stack_policy not in CANNED_STACK_POLICIES: stack_policy_path = os.path.realpath( os.path.join(self._basedir, stack_policy)) if not os.path.exists(stack_policy_path): raise FormatError('File Not Found %s' % stack_policy_path) stack_config['StackPolicy'] = stack_policy_path key = StackKey(stage_key, stack_key) stack_profile = StackProfile.from_dict(**stack_config) stack_parameters = StackParameters.from_dict(**stack_config) stack_metadata = StackMetadata.from_dict(**stack_config) stack = StackDeployment( key, stack_metadata, stack_profile, stack_parameters) return stack class ParamReferenceTemplate(string.Template): idpattern = 'a^' braceidpattern = r'(?a:[_a-z]+[._a-z0-9-]*)' def have_parameter_reference_pattern(config): string = json.dumps(config) match = ParamReferenceTemplate.pattern.search(string) if match is None: return False return match.group('escaped') is not None or \ match.group('braced') is not None or \ match.group('named') is not None class FormatV3(FormatV2): VERSION = Version('3.0.0') def validate(self, config): schema = load_schema(str(FormatV2.VERSION)) # use same schema as v2 jsonschema.validate(config, schema)
Python
0.000003
@@ -6785,36 +6785,12 @@ n = -'a%5E'%0A braceidpattern = r' +%5C%7B (?a: @@ -6809,16 +6809,18 @@ z0-9-%5D*) +%5C%7D '%0A%0A%0Adef
44733bc3e1b530d3deda89c8ebf9cd6c20a8e6c1
Fix the context manager to return a connection object (fix #41)
pyathena/connection.py
pyathena/connection.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals import logging import os import time from boto3.session import Session from pyathena.converter import TypeConverter from pyathena.cursor import Cursor from pyathena.error import NotSupportedError from pyathena.formatter import ParameterFormatter _logger = logging.getLogger(__name__) class Connection(object): _ENV_S3_STAGING_DIR = 'AWS_ATHENA_S3_STAGING_DIR' def __init__(self, s3_staging_dir=None, region_name=None, schema_name='default', poll_interval=1, encryption_option=None, kms_key=None, profile_name=None, role_arn=None, role_session_name='PyAthena-session-{0}'.format(int(time.time())), duration_seconds=3600, converter=None, formatter=None, retry_exceptions=('ThrottlingException', 'TooManyRequestsException'), retry_attempt=5, retry_multiplier=1, retry_max_delay=1800, retry_exponential_base=2, cursor_class=Cursor, **kwargs): if s3_staging_dir: self.s3_staging_dir = s3_staging_dir else: self.s3_staging_dir = os.getenv(self._ENV_S3_STAGING_DIR, None) assert self.s3_staging_dir, 'Required argument `s3_staging_dir` not found.' assert schema_name, 'Required argument `schema_name` not found.' self.region_name = region_name self.schema_name = schema_name self.poll_interval = poll_interval self.encryption_option = encryption_option self.kms_key = kms_key if role_arn: creds = self._assume_role(profile_name, region_name, role_arn, role_session_name, duration_seconds, **kwargs) profile_name = None kwargs.update({ 'aws_access_key_id': creds['AccessKeyId'], 'aws_secret_access_key': creds['SecretAccessKey'], 'aws_session_token': creds['SessionToken'], }) session = Session(profile_name=profile_name, **kwargs) self._client = session.client('athena', region_name=region_name, **kwargs) self._converter = converter if converter else TypeConverter() self._formatter = formatter if formatter else ParameterFormatter() self.retry_exceptions = retry_exceptions self.retry_attempt = retry_attempt self.retry_multiplier = retry_multiplier self.retry_max_delay = retry_max_delay self.retry_exponential_base = retry_exponential_base self.cursor_class = cursor_class @staticmethod def _assume_role(profile_name, region_name, role_arn, role_session_name, duration_seconds, **kwargs): # MFA is not supported. If you want to use MFA, create a configuration file. # http://boto3.readthedocs.io/en/latest/guide/configuration.html#assume-role-provider session = Session(profile_name=profile_name, **kwargs) client = session.client('sts', region_name=region_name, **kwargs) response = client.assume_role( RoleArn=role_arn, RoleSessionName=role_session_name, DurationSeconds=duration_seconds, ) return response['Credentials'] def __enter__(self): return self.cursor() def __exit__(self, exc_type, exc_val, exc_tb): self.close() def cursor(self, cursor=None, **kwargs): if not cursor: cursor = self.cursor_class return cursor(self._client, self.s3_staging_dir, self.schema_name, self.poll_interval, self.encryption_option, self.kms_key, self._converter, self._formatter, self.retry_exceptions, self.retry_attempt, self.retry_multiplier, self.retry_max_delay, self.retry_exponential_base, **kwargs) def close(self): pass def commit(self): pass def rollback(self): raise NotSupportedError
Python
0.000004
@@ -3383,17 +3383,8 @@ self -.cursor() %0A%0A
0046421f050057bf1c3c87b06677feaa4cdc1f4a
set log formatter message type
awx/main/utils/formatters.py
awx/main/utils/formatters.py
# Copyright (c) 2017 Ansible Tower by Red Hat # All Rights Reserved. from logstash.formatter import LogstashFormatterVersion1 from copy import copy import json import time class LogstashFormatter(LogstashFormatterVersion1): def __init__(self, **kwargs): settings_module = kwargs.pop('settings_module', None) ret = super(LogstashFormatter, self).__init__(**kwargs) if settings_module: self.host_id = settings_module.CLUSTER_HOST_ID self.tower_uuid = settings_module.LOG_AGGREGATOR_TOWER_UUID return ret def reformat_data_for_log(self, raw_data, kind=None): ''' Process dictionaries from various contexts (job events, activity stream changes, etc.) to give meaningful information Output a dictionary which will be passed in logstash or syslog format to the logging receiver ''' if kind == 'activity_stream': return raw_data rename_fields = set(( 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'thread', 'threadName', 'extra', 'auth_token', 'tags', 'host', 'host_id', 'level', 'port', 'uuid')) if kind == 'system_tracking': data = copy(raw_data['facts_data']) elif kind == 'job_events': data = copy(raw_data['event_model_data']) else: data = copy(raw_data) if isinstance(data, basestring): data = json.loads(data) skip_fields = ('res', 'password', 'event_data', 'stdout') data_for_log = {} def index_by_name(alist): """Takes a list of dictionaries with `name` as a key in each dict and returns a dictionary indexed by those names""" adict = {} for item in alist: subdict = copy(item) if 'name' in subdict: name = subdict.get('name', None) elif 'path' in subdict: name = subdict.get('path', None) if name: # Logstash v2 can not accept '.' in a name name = name.replace('.', '_') adict[name] = subdict return adict def convert_to_type(t, val): if t is float: val = val[:-1] if val.endswith('s') else val try: return float(val) except ValueError: return val elif t is int: try: return int(val) except ValueError: return val elif t is str: return val if kind == 'job_events': data.update(data.get('event_data', {})) for fd in data: if fd in skip_fields: continue key = fd if fd in rename_fields: key = 'event_%s' % fd val = data[fd] if key.endswith('created'): time_float = time.mktime(data[fd].timetuple()) val = self.format_timestamp(time_float) data_for_log[key] = val elif kind == 'system_tracking': module_name = raw_data['module_name'] if module_name in ['services', 'packages', 'files']: data_for_log[module_name] = index_by_name(data) elif module_name == 'ansible': data_for_log['ansible'] = data # Remove sub-keys with data type conflicts in elastic search data_for_log['ansible'].pop('ansible_python_version', None) data_for_log['ansible']['ansible_python'].pop('version_info', None) else: data_for_log['facts'] = data data_for_log['module_name'] = module_name elif kind == 'performance': request = raw_data['python_objects']['request'] response = raw_data['python_objects']['response'] # Note: All of the below keys may not be in the response "dict" # For example, X-API-Query-Time and X-API-Query-Count will only # exist if SQL_DEBUG is turned on in settings. headers = [ (float, 'X-API-Time'), # may end with an 's' "0.33s" (int, 'X-API-Query-Count'), (float, 'X-API-Query-Time'), # may also end with an 's' (str, 'X-API-Node'), ] data_for_log['x_api'] = {k: convert_to_type(t, response[k]) for (t, k) in headers if k in response} data_for_log['request'] = { 'method': request.method, 'path': request.path, 'path_info': request.path_info, 'query_string': request.META['QUERY_STRING'], 'data': request.data, } return data_for_log def get_extra_fields(self, record): fields = super(LogstashFormatter, self).get_extra_fields(record) if record.name.startswith('awx.analytics'): log_kind = record.name[len('awx.analytics.'):] fields = self.reformat_data_for_log(fields, kind=log_kind) return fields def format(self, record): message = { # Fields not included, but exist in related logs # 'path': record.pathname # '@version': '1', # from python-logstash # 'tags': self.tags, '@timestamp': self.format_timestamp(record.created), 'message': record.getMessage(), 'host': self.host, 'type': self.message_type, # Extra Fields 'level': record.levelname, 'logger_name': record.name, } if getattr(self, 'tower_uuid', None): message['tower_uuid'] = self.tower_uuid if getattr(self, 'host_id', None): message['cluster_host_id'] = self.host_id # Add extra fields message.update(self.get_extra_fields(record)) # If exception, add debug info if record.exc_info: message.update(self.get_debug_fields(record)) return self.serialize(message)
Python
0.000001
@@ -539,16 +539,84 @@ ER_UUID%0A + self.message_type = settings_module.LOG_AGGREGATOR_TYPE%0A
4545fd9f5a6652ff787da6db1f4868aa81f82f75
change font
string_recorder/string_recorder.py
string_recorder/string_recorder.py
import io import shutil import subprocess import tempfile class StringRecorder(object): font = 'consolas' def __init__(self, max_frames=100000): self.tmp_dir = tempfile.mkdtemp() self.max_frames = max_frames self.__frame_t = 0 self.height = -1 self.width = -1 def __del__(self): self._delete_tmp_dir() def _delete_tmp_dir(self): shutil.rmtree(self.tmp_dir) self.tmp_dir = None def reset(self): self._delete_tmp_dir() self.tmp_dir = tempfile.mkdtemp() self.__frame_t = 0 self.height = -1 self.width = -1 def record_frame(self, frame, speed=None): assert type(frame) == str output = io.StringIO() output.write(frame) lines = frame.strip().split('\n') width = max([len(l) for l in lines]) height = len(lines) record_path = '{}/{:09d}.png'.format(self.tmp_dir, self.frame_t) if self.width < width: self.width = width self.width_file = record_path if self.height < height: self.height = height self.height_file = record_path command = ['convert', '-font', '{}'.format(self.font), 'label:{}'.format(output.getvalue()), record_path] with subprocess.Popen(command) as proc: proc.wait() if self.frame_t > 0 and self.frame_t % self.max_frames == 0: # if the number of frames are too large. tmp = self.frame_t self.make_gif('frm{:09d}_{:09d}.gif'.format( self.frame_t - self.max_frames, self.frame_t - 1)) self.__frame_t = tmp self.__frame_t += 1 def get_max_size(self): def get(w_or_h, record_path): command = ['identify', '-format', '%[fx:{}]'.format(w_or_h), record_path] proc = subprocess.Popen(command, stdout=subprocess.PIPE) return proc.stdout.read().decode('utf8') w = get('w', self.width_file) h = get('h', self.height_file) return w, h def make_gif(self, save_path, speed=0.3): if not save_path.endswith('.gif'): save_path += '.gif' w, h = self.get_max_size() command = ['convert', '-background', 'white', '-delay', '{}'.format(int(speed * 100)), '-extent', '{}x{}'.format(w, h), '{}/*.png'.format(self.tmp_dir), save_path] with subprocess.Popen(command) as proc: proc.wait() self.reset() @property def frame_t(self): return self.__frame_t
Python
0.000002
@@ -88,31 +88,8 @@ ):%0A%0A - font = 'consolas'%0A%0A @@ -106,16 +106,32 @@ __(self, + font='Courier', max_fra
35825bbf06d7eb98c9e06cbd98e610659627c3d4
ajuste conta dv
pyboleto/bank/safra.py
pyboleto/bank/safra.py
# -*- coding: utf-8 -*- from ..data import BoletoData, CustomProperty class BoletoSafra(BoletoData): """ Boleto Safra """ agencia_cedente = CustomProperty('agencia_cedente', 5) conta_cedente = CustomProperty('conta_cedente', 8) conta_cedente_dv = CustomProperty('conta_cedente_dv',1) nosso_numero = CustomProperty('nosso_numero', 8) def __init__(self): BoletoData.__init__(self) self.codigo_banco = "422" self.logo_image = "logo_safra.jpg" self.modalidade_cobranca = '2' @property def agencia_conta_cedente(self): return "%s/%s" % (self.agencia_cedente, self.conta_cedente) @property def dv_nosso_numero(self): _c = '98765432' _d = '%8s' %(self.nosso_numero.zfill(8)) t = 0 for i in range(len(_c)): t+= int(_d[i]) * int(_c[i]) r = t % 11 if r == 0: return 1 elif r == 1: return 0 else: return 11-r def format_nosso_numero(self): return "%s-%s" % (self.nosso_numero,self.dv_nosso_numero) @property def campo_livre(self): content = "%1s%5s%8s%1s%8s%1s%1s" % ('7', self.agencia_cedente, self.conta_cedente, self.conta_cedente_dv, self.nosso_numero, self.dv_nosso_numero, self.modalidade_cobranca) return str(content)
Python
0.000001
@@ -611,16 +611,19 @@ n %22%25s/%25s +-%25s %22 %25 (sel @@ -659,16 +659,38 @@ _cedente +,self.conta_cedente_dv )%0A%0A @
eccaff6482b4dcf4555cf05d425223b9c5afad97
use cfdisk if available, per recommendation in fdisk man; make compatible with Ubuntu
salt/modules/qemu_nbd.py
salt/modules/qemu_nbd.py
''' Qemu Command Wrapper ==================== The qemu system comes with powerful tools, such as qemu-img and qemu-nbd which are used here to build up kvm images. ''' # Import python libs import os import glob import tempfile import time # Import third party tools import yaml # Import salt libs import salt.utils import salt.crypt def __virtual__(): ''' Only load if qemu-img and qemu-nbd are installed ''' if salt.utils.which('qemu-nbd'): return 'qemu_nbd' return False def connect(image): ''' Activate nbd for an image file. CLI Example:: salt '*' qemu_nbd.connect /tmp/image.raw ''' if not os.path.isfile(image): return '' __salt__['cmd.run']('modprobe nbd max_part=63') for nbd in glob.glob('/dev/nbd?'): if __salt__['cmd.retcode']('fdisk -l {0}'.format(nbd)): while True: # Sometimes nbd does not "take hold", loop until we can verify __salt__['cmd.run']( 'qemu-nbd -c {0} {1}'.format(nbd, image) ) if not __salt__['cmd.retcode']('fdisk -l {0}'.format(nbd)): break return nbd return '' def mount(nbd): ''' Pass in the nbd connection device location, mount all partitions and return a dict of mount points CLI Example:: salt '*' qemu_nbd.mount /dev/nbd0 ''' ret = {} for part in glob.glob('{0}p*'.format(nbd)): root = os.path.join( tempfile.gettempdir(), 'nbd', os.path.basename(nbd)) m_pt = os.path.join(root, os.path.basename(part)) time.sleep(1) mnt = __salt__['mount.mount'](m_pt, part, True) if mnt is not True: continue ret[m_pt] = part return ret def init(image): ''' Mount the named image via qemu-nbd and return the mounted roots CLI Example:: salt '*' qemu_nbd.init /srv/image.qcow2 ''' nbd = connect(image) if not nbd: return '' return mount(nbd) def clear(mnt): ''' Pass in the mnt dict returned from nbd_mount to unmount and disconnect the image from nbd. If all of the partitions are unmounted return an empty dict, otherwise return a dict containing the still mounted partitions CLI Example:: salt '*' qemu_nbd.clear '{/mnt/foo: /dev/nbd0p1}' ''' if isinstance(mnt, str): mnt = yaml.load(mnt) ret = {} nbds = set() for m_pt, dev in mnt.items(): mnt_ret = __salt__['mount.umount'](m_pt) if mnt_ret is not True: ret[m_pt] = dev nbds.add(dev[:dev.rindex('p')]) if ret: return ret for nbd in nbds: __salt__['cmd.run']('qemu-nbd -d {0}'.format(nbd)) return ret
Python
0
@@ -688,24 +688,127 @@ return '' +%0A%0A if salt.utils.which('cfdisk'):%0A fdisk = 'cfdisk -P t'%0A else:%0A fdisk = 'fdisk -l' %0A __salt_ @@ -919,35 +919,30 @@ .retcode'%5D(' -fdisk -l %7B0 +%7B0%7D %7B1 %7D'.format(nb @@ -931,32 +931,39 @@ %7B0%7D %7B1%7D'.format( +fdisk, nbd)):%0A @@ -1236,19 +1236,14 @@ '%5D(' -fdisk -l %7B0 +%7B0%7D %7B1 %7D'.f @@ -1240,32 +1240,39 @@ %7B0%7D %7B1%7D'.format( +fdisk, nbd)):%0A @@ -1520,24 +1520,24 @@ ev/nbd0%0A - '''%0A ret @@ -1524,24 +1524,103 @@ bd0%0A '''%0A + __salt__%5B'cmd.run'%5D(%0A 'partprobe %7B0%7D'.format(nbd)%0A )%0A ret = %7B%7D
34e121f22adac487b7dbd5f79d3e2033a89fabd6
fix mask utils unittest
plugin_tests/polygon_and_mask_utils_test.py
plugin_tests/polygon_and_mask_utils_test.py
# -*- coding: utf-8 -*- """ Created on Sun Aug 11 22:50:03 2019 @author: tageldim """ import unittest import os import girder_client from pandas import read_csv from histomicstk.utils.polygon_and_mask_utils import ( get_image_from_htk_response, get_bboxes_from_slide_annotations, _get_idxs_for_all_rois, get_roi_mask) # %%=========================================================================== # Constants & prep work # ============================================================================= APIURL = 'http://demo.kitware.com/histomicstk/api/v1/' SOURCE_FOLDER_ID = '5bbdeba3e629140048d017bb' SAMPLE_SLIDE_ID = "5bbdeed1e629140048d01bcb" GTCODE_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'test_files', 'sample_GTcodes.csv') gc= girder_client.GirderClient(apiUrl = APIURL) gc.authenticate(interactive=True) # %%=========================================================================== class GirderUtilsTest(unittest.TestCase): def test_get_image_from_htk_response(self): getStr = "/item/%s/tiles/region?left=%d&right=%d&top=%d&bottom=%d" % ( SAMPLE_SLIDE_ID, 59000, 59100, 35000, 35100) resp = gc.get(getStr, jsonResp=False) rgb = get_image_from_htk_response(resp) self.TestCase.assertTupleEqual(rgb.shape, (100, 100, 3)) # %%=========================================================================== class MaskUtilsTest(unittest.TestCase): def test_get_bboxes_from_slide_annotations(self): slide_annotations = gc.get('/annotation/item/' + SAMPLE_SLIDE_ID) element_infos = get_bboxes_from_slide_annotations(slide_annotations) self.assertTupleEqual(element_infos.shape, (49, 9)) self.TestCase.assertTupleEqual( tuple(element_infos.columns), (('annidx','elementidx','type','group','xmin','xmax','ymin', 'ymax','bbox_area'))) def test_get_roi_mak(self): slide_annotations = gc.get('/annotation/item/' + SAMPLE_SLIDE_ID) element_infos = get_bboxes_from_slide_annotations(slide_annotations) # read ground truth codes and information GTCodes = read_csv(GTCODE_PATH) GTCodes.index = GTCodes.loc[:, 'group'] # get indices of rois idxs_for_all_rois = _get_idxs_for_all_rois( GTCodes=GTCodes, element_infos=element_infos) # get roi mask and info ROI, roiinfo = get_roi_mask( slide_annotations=slide_annotations, element_infos=element_infos, GTCodes_df=GTCodes.copy(), idx_for_roi = idxs_for_all_rois[0], # <- let's focus on first ROI, iou_thresh=0.0, roiinfo=None, crop_to_roi=True, verbose=True, monitorPrefix="roi 1") self.TestCase.assertTupleEqual(ROI.shape, (4594, 4542)) self.TestCase.assertTupleEqual(( roiinfo['BBOX_HEIGHT'], roiinfo['BBOX_WIDTH'], roiinfo['XMIN'], roiinfo['XMAX'], roiinfo['YMIN'], roiinfo['YMAX']), (4595, 4543, 59206, 63749, 33505, 38100)) # %%=========================================================================== if __name__ == '__main__': unittest.main()
Python
0
@@ -647,10 +647,10 @@ bdee -d1 +92 e629 @@ -663,11 +663,12 @@ d01b -cb%22 +5d%22 %0AGTC @@ -1294,33 +1294,24 @@ self. -TestCase. assertTupleE @@ -1764,33 +1764,24 @@ self. -TestCase. assertTupleE @@ -2797,19 +2797,20 @@ verbose= -Tru +Fals e, monit @@ -2845,33 +2845,24 @@ self. -TestCase. assertTupleE @@ -2908,17 +2908,8 @@ elf. -TestCase. asse
44696aa932b1655d9850ecd9bfc6412696c06bd2
disable parallel tests in http_provider_test
studio/tests/http_provider_test.py
studio/tests/http_provider_test.py
import unittest import subprocess import time from random import randint import os import tempfile import uuid from studio import model from studio.util import has_aws_credentials from model_test import get_test_experiment @unittest.skipIf('GOOGLE_APPLICATION_CREDENTIALS' not in os.environ.keys(), "GOOGLE_APPLICATION_CREDENTIALS is missing, needed for " + "server to communicate with storage") class HTTPProviderTest(unittest.TestCase): _mutliprocess_shared_ = True @classmethod def setUpClass(self): if not has_aws_credentials(): return print("Starting up the API server") self.port = randint(5000, 9000) # self.app.run(port=self.port, debug=True) # self.serverp.start() self.server_config_file = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'test_config_http_server.yaml') print(self.server_config_file) self.client_config_file = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'test_config_http_client.yaml') self.serverp = subprocess.Popen([ 'studio-ui', '--port=' + str(self.port), '--verbose=debug', '--config=' + self.server_config_file, '--host=localhost']) time.sleep(25) @classmethod def tearDownClass(self): if not has_aws_credentials(): return print("Shutting down the API server") self.serverp.kill() def get_db_provider(self): config = model.get_config(self.client_config_file) config['database']['serverUrl'] = 'http://localhost:' + str(self.port) return model.get_db_provider(config) def test_add_get_experiment(self): experiment_tuple = get_test_experiment() db = self.get_db_provider() db.add_experiment(experiment_tuple[0]) experiment = db.get_experiment(experiment_tuple[0].key) self.assertEquals(experiment.key, experiment_tuple[0].key) self.assertEquals(experiment.filename, experiment_tuple[0].filename) self.assertEquals(experiment.args, experiment_tuple[0].args) db.delete_experiment(experiment_tuple[1]) def test_add_get_experiment_artifacts(self): experiment_tuple = get_test_experiment() e_experiment = experiment_tuple[0] e_artifacts = e_experiment.artifacts a1_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4())) a2_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4())) with open(a1_filename, 'w') as f: f.write('hello world') e_artifacts['a1'] = { 'local': a1_filename, 'mutable': False } e_artifacts['a2'] = { 'local': a2_filename, 'mutable': True } db = self.get_db_provider() db.add_experiment(e_experiment) experiment = db.get_experiment(e_experiment.key) self.assertEquals(experiment.key, e_experiment.key) self.assertEquals(experiment.filename, e_experiment.filename) self.assertEquals(experiment.args, e_experiment.args) db.delete_experiment(e_experiment.key) os.remove(a1_filename) def test_start_experiment(self): db = self.get_db_provider() experiment_tuple = get_test_experiment() db.add_experiment(experiment_tuple[0]) db.start_experiment(experiment_tuple[0]) experiment = db.get_experiment(experiment_tuple[1]) self.assertTrue(experiment.status == 'running') self.assertTrue(experiment.time_added <= time.time()) self.assertTrue(experiment.time_started <= time.time()) self.assertEquals(experiment.key, experiment_tuple[0].key) self.assertEquals(experiment.filename, experiment_tuple[0].filename) self.assertEquals(experiment.args, experiment_tuple[0].args) db.finish_experiment(experiment_tuple[0]) db.delete_experiment(experiment_tuple[1]) if __name__ == '__main__': unittest.main()
Python
0
@@ -473,16 +473,18 @@ e):%0A%0A + # _mutlip
550766a1447b2d8ed8c1ee7f34e1e6d06b64859c
Add a pragma
dataproperty/_function.py
dataproperty/_function.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import absolute_import, unicode_literals import decimal import math import re from collections import namedtuple from decimal import Decimal from mbstrdecoder import MultiByteStrDecoder from six import text_type from six.moves import range decimal.setcontext(decimal.Context(prec=60, rounding=decimal.ROUND_HALF_DOWN)) _ansi_escape = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]", re.IGNORECASE) def get_integer_digit(value): from typepy import RealNumber, TypeConversionError float_type = RealNumber(value) try: abs_value = abs(float_type.convert()) except TypeConversionError: raise ValueError( "the value must be a number: value='{}' type='{}'".format(value, type(value)) ) if abs_value.is_zero(): return 1 try: return len(text_type(abs_value.quantize(Decimal("1."), rounding=decimal.ROUND_DOWN))) except decimal.InvalidOperation as e: raise ValueError(e) class DigitCalculator(object): Threshold = namedtuple("Threshold", "pow digit_len") def __init__(self): upper_threshold = self.Threshold(pow=-2, digit_len=6) self.__min_digit_len = 1 self.__treshold_list = [ self.Threshold(upper_threshold.pow + i, upper_threshold.digit_len - i) for i, _ in enumerate(range(upper_threshold.digit_len, self.__min_digit_len - 1, -1)) ] def get_decimal_places(self, value): from typepy import Integer int_type = Integer(value) float_digit_len = 0 if int_type.is_type(): abs_value = abs(int_type.convert()) else: abs_value = abs(float(value)) text_value = text_type(abs_value) float_text = 0 if text_value.find(".") != -1: float_text = text_value.split(".")[1] float_digit_len = len(float_text) elif text_value.find("e-") != -1: float_text = text_value.split("e-")[1] float_digit_len = int(float_text) - 1 abs_digit = self.__min_digit_len for treshold in self.__treshold_list: if abs_value < math.pow(10, treshold.pow): abs_digit = treshold.digit_len break return min(abs_digit, float_digit_len) _digit_calculator = DigitCalculator() def get_number_of_digit(value): try: integer_digits = get_integer_digit(value) except (ValueError, TypeError, OverflowError): return (None, None) try: decimal_places = _digit_calculator.get_decimal_places(value) except (ValueError, TypeError): decimal_places = None return (integer_digits, decimal_places) def is_multibyte_str(text): from typepy import StrictLevel, String if not String(text, strict_level=StrictLevel.MIN).is_type(): return False try: unicode_text = MultiByteStrDecoder(text).unicode_str except ValueError: return False try: unicode_text.encode("ascii") except UnicodeEncodeError: return True return False def _validate_eaaw(east_asian_ambiguous_width): if east_asian_ambiguous_width in (1, 2): return raise ValueError( "invalid east_asian_ambiguous_width: expected=1 or 2, actual={}".format( east_asian_ambiguous_width ) ) def strip_ansi_escape(unicode_str): return _ansi_escape.sub("", unicode_str) def calc_ascii_char_width(unicode_str, east_asian_ambiguous_width=1): import unicodedata width = 0 for char in unicode_str: char_width = unicodedata.east_asian_width(char) if char_width in "WF": width += 2 elif char_width == "A": _validate_eaaw(east_asian_ambiguous_width) width += east_asian_ambiguous_width else: width += 1 return width def get_ascii_char_width(unicode_str, east_asian_ambiguous_width=1): import warnings warnings.warn( "calc_ascii_char_width is now deprecated, use calc_ascii_char_width instead.", DeprecationWarning, ) return calc_ascii_char_width(unicode_str, east_asian_ambiguous_width)
Python
0.999999
@@ -4051,32 +4051,52 @@ iguous_width=1): + # pragma: no cover %0A import warn
82bf2b2ca661d9f1057d7001b26a741178749df5
Fix the bug about dupefilter
zhihu/zhihu/spiders/zhihuspider.py
zhihu/zhihu/spiders/zhihuspider.py
# -*- coding: utf-8 -*- import scrapy import re import pdb import json from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from ..items import ZhihuItem,RelationItem from scrapy.http import Request,FormRequest from scrapy_redis.spiders import RedisSpider # ------------------------------------------ # 版本:1.0 # 日期:2017-8-06 # 作者:AlexTan # <CSDN: http://blog.csdn.net/alextan_> # <e-mail: alextanbz@gmail.com> # ------------------------------------------ #zhihuspider1是模拟浏览器爬(速度慢,不建议,仅供学习) zhihuspider0抓包爬(速度快) class ZhihuspiderSpider(RedisSpider): #class ZhihuspiderSpider(scrapy.Spider): name = "zhihuspider1" #allowed_domains = ["zhihu.com"] host = 'https://www.zhihu.com' redis_key = "zhihuspider:start_urls" #start_urls = ['https://www.zhihu.com/people/yun-he-shu-ju-8/answers'] strat_user_id = ['yun-he-shu-ju-8'] #pdb.set_trace() dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0") dcap["phantomjs.page.settings.loadImages"] = False obj = webdriver.PhantomJS(desired_capabilities=dcap) def start_requests(self): for one in self.strat_user_id: yield Request('https://www.zhihu.com/people/'+one+'/answers',callback=self.parse) #return [Request('https://www.zhihu.com/#signin',callback=self.start_login,meta={'cookiejar':1})] #这个登录已不可用,仅供学习 def start_login(self,response): xsrf = response.xpath('//input[@name="_xsrf"]/@value').extract_first() return [FormRequest('https://www.zhihu.com/login/phone_num',method='POST',meta={'cookiejar':response.meta['cookiejar']},formdata={ #'_xsrf':xsrf, 'password':'feifengwind', 'remember_me':"true", 'phone_num':'18983848805'}, callback=self.after_login )] def after_login(self,response): pdb.set_trace() if json.loads(response.body)['msg'].encode('utf8') == "登录成功": self.logger.info("登录成功!%s" % str(response.meta['cookiejar'])) print("登录成功!") self.obj.add_cookie(response.meta['cookiejar']) for one in self.strat_user_id: yield Request('https://www.zhihu.com/people/'+one+'/answers',meta={'cookiejar':response.meta['cookiejar']},callback=self.parse) else: self.logger.error('登录失败') def __del__(self): self.obj.quit() def parse(self, response): item = ZhihuItem() name = response.xpath('//span[@class="ProfileHeader-name"]/text()').extract()[0] #pdb.set_trace() user_image_url = response.xpath('//img[@class="Avatar Avatar--large UserAvatar-inner"]/@srcset').extract()[0].replace(' 2x','') user_id = re.findall('people\/(.*?)\/',response.url)[0] gender_icon = response.xpath('.//svg[@class="Icon Icon--male" or @class="Icon Icon--female"]/@class').extract() #pdb.set_trace() gender = "" if gender_icon: if gender_icon[0] == "Icon Icon--female": gender = "女" elif gender_icon[0] == "Icon Icon--male": gender = "男" item['name'] = name item['user_id'] = user_id item['user_image_url'] = user_image_url item['gender'] = gender try: num = response.xpath('//div[@class="NumberBoard-value"]/text()').extract() item['followees_num'] = num[0] item['followers_num'] = num[1] followees_url = response.url.replace('answers','following') followers_url = response.url.replace('answers','followers') relation_item = RelationItem() relation_item['relations_id'] = [] relation_item['user_id'] = user_id relation_item['relation_type'] = 'followees' yield Request(followees_url,callback=self.relations,meta={'page':1,'item':relation_item}) relation_item['relation_type'] = 'followers' yield Request(followers_url,callback=self.relations,meta={'page':1,'item':relation_item}) except: print("需要登录!") self.obj.get(response.url) try: self.obj.find_element_by_class_name('ProfileHeader-expandButton').click() first = self.obj.find_elements_by_xpath('//div[@class="ProfileHeader-detailItem"]') for one in first: label = one.find_element_by_class_name('ProfileHeader-detailLabel').text if label == "居住地": location = one.find_element_by_class_name('ProfileHeader-detailValue').text.replace('\n',',') item['location'] = location elif label == "所在行业" or "行业": business = one.find_element_by_class_name('ProfileHeader-detailValue').text.replace('\n',',') item['business'] = business elif label == "职业经历": professional = one.find_element_by_class_name('ProfileHeader-detailValue').text.replace('\n',',') item['professional'] = professional elif label == "教育经历": education = one.find_element_by_class_name('ProfileHeader-detailValue').text.replace('\n',',') item['education'] = education else: pass except: pass yield item def relations(self,response): self.obj.get(response.url) followees_a = self.obj.find_elements_by_xpath('//a[@class="UserLink-link"]') #pdb.set_trace() #followees_a = response.xpath('//a[@class="UserLink-link"]/@href').extract() followees = [] for one in followees_a: try: one = one.get_attribute('href') followees.append(one.replace('https://www.zhihu.com/people/','')) except: pass followees = list(set(followees)) #pdb.set_trace() response.meta['item']['relations_id']+=followees nextpage_button = response.xpath('//button[@class="Button PaginationButton PaginationButton-next Button--plain"]').extract() if nextpage_button: #pdb.set_trace() nextpage_url = response.url.replace('?page='+str(response.meta['page']),'') + "?page=" + str(response.meta['page']+1) yield Request(nextpage_url,callback=self.relations,meta={'page':response.meta['page']+1,'item':response.meta['item']}) else: yield response.meta['item'] for user in followees: yield Request('https://www.zhihu.com/people/'+user+'/answers',callback=self.parse)
Python
0.000002
@@ -1375,32 +1375,49 @@ lback=self.parse +,dont_filter=True )%0A #retur @@ -6789,28 +6789,29 @@ nswers',callback=self.parse) +%0A
670bc221b7af6398c90dbbde64feb22003c97690
Revert "Violate architecture (on purpose)"
squad/api/views.py
squad/api/views.py
from django.shortcuts import get_object_or_404 from django.views.decorators.http import require_http_methods from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponseForbidden from django.http import HttpResponse import logging # an architecture violation from squad.frontend import views from squad.http import read_file_upload from squad.core.models import Group from squad.core.models import Project from squad.core.models import Build from squad.core.models import Environment from squad.core.models import TestRun from squad.core.models import Token from squad.core.tasks import ReceiveTestRun from squad.core.tasks import exceptions logger = logging.getLogger() def valid_token(token, project): return project.tokens.filter(key=token).exists() or Token.objects.filter(project=None).exists() @csrf_exempt @require_http_methods(["POST"]) def add_test_run(request, group_slug, project_slug, version, environment_slug): group = get_object_or_404(Group, slug=group_slug) project = get_object_or_404(group.projects, slug=project_slug) # authenticate token X project token = request.META.get('HTTP_AUTH_TOKEN', None) if token: if valid_token(token, project): pass else: return HttpResponseForbidden() else: return HttpResponse('Authentication needed', status=401) test_run_data = { 'version': version, 'environment_slug': environment_slug, } uploads = { 'tests_file': 'tests', 'metrics_file': 'metrics', 'log_file': 'log', 'metadata': 'metadata', } for key, field in uploads.items(): if field in request.FILES: f = request.FILES[field] test_run_data[key] = read_file_upload(f).decode('utf-8') if 'attachment' in request.FILES: attachments = {} for f in request.FILES.getlist('attachment'): attachments[f.name] = read_file_upload(f) test_run_data['attachments'] = attachments receive = ReceiveTestRun(project) try: receive(**test_run_data) except exceptions.invalid_input as e: logger.warning(request.get_full_path() + ": " + str(e)) return HttpResponse(str(e), status=400) return HttpResponse('', status=201)
Python
0
@@ -259,71 +259,8 @@ g%0A%0A%0A -# an architecture violation%0Afrom squad.frontend import views%0A%0A%0A from
d8241adb51dcb81b99013aa23744a7a4a45f7d84
fix self importer
mod_pbxproj.py
mod_pbxproj.py
# MIT License # # Copyright (c) 2016 Ignacio Calderon aka kronenthaler # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # This is a backwards-compatibility file. For Unity developers this is the only file it needs to be added to the Unity # project. # This file will install the proper python package into the user's python's local space, if it's not present at run-time # of this script. Afterwards, it will import all necessary modules to the developer to make his/her own script work as # before. from distutils.core import run_setup, setup import site __author__ = 'kronenthaler' __version__ = '1.3.1' __package_name__ = 'mod_pbxproj_installer' try: # check if file exists from mod_pbxproj import XcodeProject except: # install it if not present print 'Installing package...' setup(name=__package_name__, license='MIT License', install_requires=['mod-pbxproj'], script_args=['install', '--user', '--force', '--record', '.uninstall_files']) # force the refresh of the packages reload(site) # import publicly from mod_pbxproj import *
Python
0.000001
@@ -1607,13 +1607,13 @@ = ' -1.3.1 +2.0.0 '%0A__ @@ -1691,28 +1691,24 @@ ts%0A from -mod_ pbxproj impo @@ -2069,20 +2069,16 @@ ly%0Afrom -mod_ pbxproj
87b3e1612bc57461b439355ad08b18480d526430
Update plot_tests.py
autotest/plot_tests.py
autotest/plot_tests.py
import os #import matplotlib.pyplot as plt import numpy as np import pandas as pd import pyemu def plot_summary_test(): try: import matplotlib.pyplot as plt except: return par_df = pd.read_csv(os.path.join("utils","freyberg_pp.par.usum.csv"), index_col=0) idx = list(par_df.index.map(lambda x: x.startswith("HK"))) par_df = par_df.loc[idx,:] ax = pyemu.plot_utils.plot_summary_distributions(par_df,label_post=True) plt.savefig(os.path.join("temp","hk_par.png")) plt.close() df = os.path.join("utils","freyberg_pp.pred.usum.csv") figs,axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True) #plt.show() for i,fig in enumerate(figs): plt.figure(fig.number) plt.savefig(os.path.join("temp","test_pred_{0}.png".format(i))) plt.close(fig) df = os.path.join("utils","freyberg_pp.par.usum.csv") figs, axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True) for i,fig in enumerate(figs): plt.figure(fig.number) plt.savefig(os.path.join("temp","test_par_{0}.png".format(i))) plt.close(fig) def pst_plot_test(): try: import matplotlib.pyplot as plt except: return pst = pyemu.Pst(os.path.join("pst","freyberg_gr.pst")) par = pst.parameter_data par.loc[pst.par_names[:3],"pargp"] = "test" par.loc[pst.par_names[1:],"partrans"] = "fixed" pst.plot() #pst.plot(kind="prior", unique_only=False) # pst.plot(kind="prior",unique_only=True) # pst.plot(kind="prior", unique_only=True, fig_title="priors") # # pst.plot(kind="1to1") # pst.plot(kind="1to1",include_zero=True) # pst.plot(kind="1to1", include_zero=True,fig_title="1to1") # # # pst.plot(kind="obs_v_sim") # pst.plot(kind="obs_v_sim",include_zero=True) # pst.plot(kind="obs_v_sim", include_zero=True,fig_title="obs_v_sim") # ax = pst.plot(kind="phi_pie") plt.show() # ax = plt.subplot(111,aspect="equal") # pst.plot(kind="phi_pie",ax=ax) # plt.show() def ensemble_plot_test(): try: import matplotlib.pyplot as plt except: return pst = pyemu.Pst(os.path.join("pst","pest.pst")) cov = pyemu.Cov.from_parameter_data(pst) num_reals = 100 pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst,cov,num_reals=num_reals, use_homegrown=True) csv_file = os.path.join("temp", "pe.csv") pe.plot(filename=csv_file + ".pdf",plot_cols=pst.par_names[:10]) pe.to_csv(csv_file) pyemu.plot_utils.ensemble_helper(pe, filename=csv_file + ".pdf", plot_cols=pst.par_names[:10]) pyemu.plot_utils.ensemble_helper(csv_file, filename=csv_file + ".pdf", plot_cols=pst.par_names[:10]) pst.parameter_data.loc[:,"partrans"] = "none" cov = pyemu.Cov.from_parameter_data(pst) pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst, cov, num_reals=num_reals, use_homegrown=True) pyemu.plot_utils.ensemble_helper([pe,csv_file],filename=csv_file+".pdf", plot_cols=pst.par_names[:10]) pyemu.plot_utils.ensemble_helper([pe, csv_file], filename=csv_file + ".pdf", plot_cols=pst.par_names[:10],sync_bins=False) pyemu.plot_utils.ensemble_helper({"b":pe,"y":csv_file}, filename=csv_file + ".pdf", plot_cols=pst.par_names[:10]) pyemu.plot_utils.ensemble_helper({"b":pe,"y":csv_file}, filename=csv_file + ".pdf", plot_cols=pst.par_names[:10], sync_bins=False) pyemu.plot_utils.ensemble_helper({"b": pe, "y": csv_file}, filename=csv_file + ".pdf", plot_cols=pst.par_names[:10], sync_bins=False, func_dict={pst.par_names[0]:np.log10}) deter_vals = pst.parameter_data.parval1.apply(np.log10).to_dict() pyemu.plot_utils.ensemble_helper({"b": pe, "y": csv_file}, filename=csv_file + ".pdf", plot_cols=pst.par_names[:10], sync_bins=False, func_dict={pst.par_names[0]: np.log10}, deter_vals=deter_vals) if __name__ == "__main__": #plot_summary_test() pst_plot_test() #ensemble_plot_test()
Python
0.000001
@@ -1460,17 +1460,16 @@ t()%0A -# pst.plot @@ -1501,26 +1501,24 @@ y=False)%0A - # pst.plot(ki @@ -1541,34 +1541,32 @@ e_only=True)%0A - # pst.plot(kind=%22 @@ -1612,34 +1612,32 @@ iors%22)%0A #%0A - # pst.plot(kind=%22 @@ -1642,26 +1642,24 @@ =%221to1%22)%0A - # pst.plot(ki @@ -1682,34 +1682,32 @@ e_zero=True)%0A - # pst.plot(kind=%22 @@ -1760,26 +1760,24 @@ #%0A #%0A - # pst.plot(ki @@ -1791,26 +1791,24 @@ _v_sim%22)%0A - # pst.plot(ki @@ -1840,26 +1840,24 @@ ro=True)%0A - # pst.plot(ki @@ -1956,27 +1956,16 @@ ie%22)%0A - plt.show() %0A %0A%0A @@ -1966,18 +1966,16 @@ %0A%0A - # ax = pl @@ -2007,18 +2007,16 @@ al%22)%0A - # pst.plo
b221251b13882789c2ed95e4cd24b2327e068711
Bump @graknlabs_client_java and @graknlabs_benchmark
dependencies/graknlabs/dependencies.bzl
dependencies/graknlabs/dependencies.bzl
# # GRAKN.AI - THE KNOWLEDGE GRAPH # Copyright (C) 2018 Grakn Labs Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") def graknlabs_build_tools(): git_repository( name = "graknlabs_build_tools", remote = "https://github.com/graknlabs/build-tools", commit = "b5b9f44fc074aa91cda43b4ca8468634bfb44482", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_build_tools ) def graknlabs_graql(): git_repository( name = "graknlabs_graql", remote = "https://github.com/graknlabs/graql", commit = "e28748cffcc74bdb8fdd754f90c8cf71e2e79d2b", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_graql ) def graknlabs_protocol(): git_repository( name = "graknlabs_protocol", remote = "https://github.com/graknlabs/protocol", commit = "e9cb7fa417822247e26cd571e838688f11b4ae35", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_protocol ) def graknlabs_client_java(): git_repository( name = "graknlabs_client_java", remote = "https://github.com/graknlabs/client-java", commit = "a19c068650e83a1b6aa388a63847e92164bc7795", ) def graknlabs_benchmark(): git_repository( name = "graknlabs_benchmark", remote = "https://github.com/graknlabs/benchmark.git", commit = "97ecea678665e155587a58ebcacb8d3226a76fc8" # keep in sync with protocol changes )
Python
0.000001
@@ -1907,48 +1907,48 @@ = %22 -a19c068650e83a1b6aa388a63847e92164bc7795 +97197af2424e9c1f056a46633ee1c566e3f85111 %22,%0A @@ -2124,48 +2124,48 @@ = %22 -97ecea678665e155587a58ebcacb8d3226a76fc8 +a2d31c7a0e7a478a909792e12aa1198d8a56c16c %22 #
cfa947f12f72ac2d74bc351c7ff268fc48840631
Enable NTP by default
anaconda-updates/6/installclasses/silvereye/__init__.py
anaconda-updates/6/installclasses/silvereye/__init__.py
# # silverye.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import installclass from constants import * from pykickstart.constants import * from product import * from flags import flags import iutil import os import re import types from kickstart import AnacondaKSScript from storage.partspec import * import installmethod import yuminstall class InstallClass(installclass.BaseInstallClass): # name has underscore used for mnemonics, strip if you dont need it id = "silvereye" name = N_("Silvereye Eucalyptus Installer") _description = N_("The default installation of %s is a 'Cloud in a Box'" "install. You can optionally select a different set of" "software now.") _descriptionFields = (productName,) sortPriority = 100 hidden = 1 bootloaderTimeoutDefault = 5 bootloaderExtraArgs = ["crashkernel=auto"] colocated_nc = 0 tasks = [(N_("Eucalyptus Cloud in a Box"), ["core", "eucalyptus-cloud-controller", "eucalyptus-storage-controller", "eucalyptus-walrus", "eucalyptus-cluster-controller", "eucalyptus-node-controller"]), (N_("Eucalyptus Front-end Only"), ["core", "eucalyptus-cloud-controller", "eucalyptus-storage-controller", "eucalyptus-walrus", "eucalyptus-cluster-controller"]), (N_("Eucalyptus Node Controller Only"), ["core", "eucalyptus-node-controller"]), (N_("Minimal"), ["core"])] def setGroupSelection(self, anaconda): for pkg in [ 'epel-release', 'elrepo-release', 'euca2ools-release', 'eucalyptus-release', 'ntp', 'tcpdump', 'strace', 'man' ]: anaconda.backend.selectPackage(pkg) def setInstallData(self, anaconda): installclass.BaseInstallClass.setInstallData(self, anaconda) anaconda.id.simpleFilter = True self.setDefaultPartitioning(anaconda.id.storage, anaconda.platform) anaconda.id.security.setSELinux(SELINUX_PERMISSIVE) def setDefaultPartitioning(self, storage, platform): autorequests = [PartSpec(mountpoint="/", fstype=storage.defaultFSType, size=10240, grow=True, asVol=False, requiredSpace=20*1024)] bootreq = platform.setDefaultPartitioning() if bootreq: autorequests.extend(bootreq) (minswap, maxswap) = iutil.swapSuggestion() autorequests.append(PartSpec(fstype="swap", size=minswap, maxSize=maxswap, grow=True, asVol=False)) storage.autoPartitionRequests = autorequests def setSteps(self, anaconda): installclass.BaseInstallClass.setSteps(self, anaconda) # Unskip memcheck anaconda.dispatch.skipStep("memcheck", skip = 0) anaconda.dispatch.skipStep("protectstorage", skip = 0) anaconda.dispatch.skipStep("tasksel",permanent=1) anaconda.dispatch.skipStep("firewall") anaconda.dispatch.skipStep("group-selection") anaconda.dispatch.skipStep("filtertype") anaconda.dispatch.skipStep("filter") anaconda.dispatch.skipStep("partition") # anaconda.dispatch.skipStep("parttype") def getBackend(self): if flags.livecdInstall: import livecd return livecd.LiveCDCopyBackend else: return yuminstall.YumBackend def postAction(self, anaconda): installclass.BaseInstallClass.postAction(self, anaconda) postscriptlines =""" if [ -e /etc/libvirt/qemu/networks/autostart/default.xml ]; then rm -f /etc/libvirt/qemu/networks/autostart/default.xml fi """ postscript = AnacondaKSScript(postscriptlines, inChroot=True, logfile='/root/euca-common-ks-post.log', type=KS_SCRIPT_POST) postscript.run(anaconda.rootPath, flags.serial, anaconda.intf) def __init__(self): installclass.BaseInstallClass.__init__(self)
Python
0
@@ -4353,16 +4353,67 @@ .xml%0Afi%0A +/sbin/chkconfig ntpdate on%0A/sbin/chkconfig ntpd on%0A %22%22%22%0A
7b1d8bd1b2a8b1cb78ec9ab13b61acde977e5642
remove ability to create/delete volumes on v2
api/v2/views/volume.py
api/v2/views/volume.py
import django_filters from rest_framework import viewsets from core.models import Volume from api.v2.serializers.details import VolumeSerializer from core.query import only_current_source class VolumeFilter(django_filters.FilterSet): min_size = django_filters.NumberFilter(name="size", lookup_type='gte') max_size = django_filters.NumberFilter(name="size", lookup_type='lte') class Meta: model = Volume fields = ['min_size', 'max_size', 'projects'] class VolumeViewSet(viewsets.ModelViewSet): """ API endpoint that allows providers to be viewed or edited. """ queryset = Volume.objects.all() serializer_class = VolumeSerializer filter_class = VolumeFilter def get_queryset(self): """ Filter projects by current user """ user = self.request.user return Volume.objects.filter(only_current_source(), instance_source__created_by=user)
Python
0.000001
@@ -705,16 +705,92 @@ meFilter +%0A http_method_names = %5B'get', 'put', 'patch', 'head', 'options', 'trace'%5D %0A%0A de
7f709b974c37020054634c09589ffb9922c36737
Fix typo in variable name.
model/model.py
model/model.py
#! /usr/bin/env python2.7 import lib.database class Model(object): @classmethod def init_model(cls, table, primary_key): cls._table = table cls._primary_key = primary_key cls._db = lib.database.db try: cls._db.cursor.execute("SELECT * FROM %s WHERE FALSE"%cls._table) cls.COLUMNS = set([desc.name for desc in cls._db.cursor.description]) cls.SECRET_COLUMNS = set(["created_at", "created_by", "approved_at", "approved_by", "rejected_at", "rejected_by", "deleted_at", "deleted_by"]) cls._db.cursor.execute("""SELECT column_name FROM information_schema.columns WHERE table_name = '%s' AND table_catalog = '%s' AND is_nullable = 'NO' AND column_default IS NULL"""%(cls._table, cls._db.db_name)) cls.REQURED_COLUMNS = set(zip(*cls._db.cursor.fetchall())[0]) cls._db.conn.commit() except lib.database.psycopg2.DatabaseError: cls._db.conn.rollback() raise def __init__(self, db_id): self._id = db_id self._cache = dict() self._cache_populate() def _cache_populate(self, key="*"): if key not in self.COLUMNS and key != "*": raise Exception("No column: %s"%key) try: self._db.cursor.execute("SELECT %s FROM %s WHERE %s = %%s"%(key, self._table, self._primary_key), (self._id,)) for k, v in enumerate(self._db.cursor.fetchall()[0]): self._cache[self._db.cursor.description[k].name] = v self._db.conn.commit() except lib.database.psycopg2.DatabaseError: self._db.conn.rollback() raise return v def __getitem__(self, key): if key not in self.COLUMNS: raise Exception("No column: %s"%key) try: return self._cache[key] except KeyError: return self._cache_populate(key) except: raise def __setitem__(self, key, value): if key not in self.COLUMNS: raise Exception("No column: %s"%key) try: self._db.cursor.execute("UPDATE %s SET %s = %%s WHERE %s = %%s"%(self._table, key, self._primary_key), (value, self._id)) self._db.conn.commit() except lib.database.psycopg2.DatabaseError: self._db.conn.rollback() raise self._cache_populate(key) def __delitem__(self, key): raise Exception("Can't delete items") def keys(self): return list(self.COLUMNS - self.SECRET_COLUMNS) @classmethod def create(cls, params): param_order = params.keys() param_keys = set(param_order) for column in cls.REQURED_COLUMNS: if column not in param_keys: raise Exception("Missing column: %s"%column) sql = "INSERT INTO %s (%s) VALUES (%s) RETURNING %s"% \ (cls._table, ', '.join(param_order), ', '.join(("%s",)*len(param_order)), cls._primary_key) values = [params[k] for k in param_order] try: cls._db.cursor.execute(sql, values) db_id = cls._db.cursor.fetchall()[0][0] cls._db.conn.commit() except lib.database.psycopg2.DatabaseError: cls._db.conn.rollback() raise return cls(db_id)
Python
0.000014
@@ -727,32 +727,33 @@ me))%0A%09%09%09cls.REQU +I RED_COLUMNS = se @@ -2317,16 +2317,17 @@ cls.REQU +I RED_COLU
53fbbe47ea1d9af09df20f884399148b80373d3e
Fix update result function
libcrowds_analyst/view.py
libcrowds_analyst/view.py
# -*- coding: utf8 -*- """View module for libcrowds-analyst.""" import json import enki from redis import Redis from rq import Queue from flask import render_template, request, abort, flash, redirect, url_for from flask import current_app from libcrowds_analyst import analysis, auth, forms queue = Queue('libcrowds_analyst', connection=Redis()) def _get_first_result(project_id, **kwargs): """Return a result or abort an exception is thrown.""" res = enki.pbclient.find_results(project_id, limit=1, all=1, **kwargs) if isinstance(res, dict) and 'status_code' in res: # pragma: no cover abort(res['status_code']) return res[0] if res else None def _update_result(result): """Update a result or abort if exception thrown.""" res = enki.pbclient._update_result(result) if isinstance(res, dict) and 'status_code' in res: # pragma: no cover abort(res.status_code) def index(): """Index view.""" if request.method == 'GET': return render_template('index.html', title="LibCrowds Analyst") else: try: e = enki.Enki(current_app.config['API_KEY'], current_app.config['ENDPOINT'], request.json['project_short_name']) except enki.ProjectNotFound: # pragma: no cover abort(404) analyst_func = analysis.get_analyst_func(e.project.category_id) if analyst_func: queue.enqueue(analyst_func, current_app.config['API_KEY'], current_app.config['ENDPOINT'], request.json['project_short_name'], request.json['task_id']) return "OK" else: abort(404) def analyse_empty_result(short_name): """View for analysing empty results.""" try: e = enki.Enki(current_app.config['API_KEY'], current_app.config['ENDPOINT'], short_name) except enki.ProjectNotFound: # pragma: no cover abort(404) if request.method == 'POST': data = dict((k, request.form.getlist(k)) for k in request.form.keys()) result_id = data.pop('result_id', None) data.pop('csrf_token', None) result = _get_first_result(e.project.id, id=result_id) if not result: # pragma: no cover flash('That result does not exist!', 'danger') return redirect(url_for('.index')) result.info = json.dumps(data) _update_result(result) result = _get_first_result(e.project.id, info='Unanalysed') if not result: # pragma: no cover flash('There are no unanlysed results to process!', 'success') return redirect(url_for('.index')) e.get_tasks(task_id=result.task_id) e.get_task_runs() task = e.tasks[0] task_runs = e.task_runs[task.id] url = 'category_{0}.html'.format(e.project.category_id) return render_template(url, project=e.project, result=result, task=task, task_runs=task_runs, title=e.project.name) def edit_result(short_name, result_id): """View for directly editing a result.""" try: e = enki.Enki(current_app.config['API_KEY'], current_app.config['ENDPOINT'], short_name) except enki.ProjectNotFound: # pragma: no cover abort(404) result = _get_first_result(e.project.id, id=result_id) if not result: # pragma: no cover abort(404) title = "Editing result {0}".format(result.id) form = forms.EditResultForm(request.form) if request.method == 'POST' and form.validate(): result.info = json.loads(form.info.data) _update_result(result) flash('Result updated.', 'success') elif request.method == 'POST' and not form.validate(): # pragma: no cover flash('Please correct the errors.', 'danger') form.info.data = json.dumps(result.info) return render_template('edit_result.html', form=form, title=title) def reanalyse(short_name): """View for triggering reanalysis of all results.""" try: e = enki.Enki(current_app.config['API_KEY'], current_app.config['ENDPOINT'], short_name) except enki.ProjectNotFound: # pragma: no cover abort(404) form = forms.ReanalysisForm(request.form) analyst_func = analysis.get_analyst_func(e.project.category_id) if not analyst_func: flash('No analyst configured for this category of project.', 'danger') elif request.method == 'POST' and form.validate(): e.get_tasks() sleep = int(request.form.get('sleep', 2)) # To handle API rate limit for t in e.tasks: queue.enqueue(analyst_func, current_app.config['API_KEY'], current_app.config['ENDPOINT'], short_name, t.id, sleep=sleep) flash('''Results for {0} completed tasks will be reanalysed. '''.format(len(e.tasks)), 'success') return render_template('reanalyse.html', title="Reanalyse results", project=e.project, form=form)
Python
0.000001
@@ -777,17 +777,16 @@ bclient. -_ update_r
daee45e358f61d2e9cfef109efd9f474f7e91a4d
Add viz import to top level __init__
pycroscopy/__init__.py
pycroscopy/__init__.py
""" The Pycroscopy package. Submodules ---------- .. autosummary:: :toctree: _autosummary core """ from . import core from .core import * from .io import translators from . import analysis from . import processing from .__version__ import version as __version__ from .__version__ import time as __time__ __all__ = core.__all__
Python
0
@@ -219,16 +219,34 @@ ocessing +%0Afrom . import viz %0A%0Afrom .
efa4aede4b9faa9f0fc8639e4495ca8e98127d15
Bump @graknlabs_verification
dependencies/graknlabs/dependencies.bzl
dependencies/graknlabs/dependencies.bzl
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") def graknlabs_common(): git_repository( name = "graknlabs_common", remote = "https://github.com/graknlabs/common", tag = "0.2.2" # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_common ) def graknlabs_build_tools(): git_repository( name = "graknlabs_build_tools", remote = "https://github.com/graknlabs/build-tools", commit = "04f9678403cdbde889b8e25cc74d16bf1751fd81", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_build_tools ) def graknlabs_verification(): git_repository( name = "graknlabs_verification", remote = "https://github.com/graknlabs/verification", commit = "7221428a2315b2ec26438b7c99363fd36052e380" )
Python
0
@@ -1614,48 +1614,48 @@ = %22 -7221428a2315b2ec26438b7c99363fd36052e380 +0f831029010385cf584cfd6787f574e9f210dfe2 %22%0A
dd83da792fbe1c90da855fe7d298f446a839f8ca
change for localhost in door.py
paulla.ircbot/src/paulla/ircbot/plugins/door.py
paulla.ircbot/src/paulla/ircbot/plugins/door.py
import irc3 from irc3.plugins.cron import cron import requests from datetime import datetime @irc3.plugin class Door: """ Door state plugin """ def __init__(self, bot): self.bot = bot self.log = self.bot.log @irc3.event(irc3.rfc.MY_PRIVMSG) def question(self, mask, event, target, nick, data): #TODO pass @irc3.event(irc3.rfc.PRIVMSG) def question(self, mask, event, target, data): #TODO pass @cron('*/1 * * * *') def anoncement(self): r = requests.get('http://sd-36895.dedibox.fr:2222').json() last_change = datetime.strptime(r['lastchange'], "%d/%m/%Y %H:%M:%S") if (datetime.now() - last_change).seconds < 60: if "0" in r['state']: self.bot.privmsg('#test-mika','Le lab est ouvert') elif "1" in r['state']: self.bot.privmsg('#test-mika','Le lab viens de fermer')
Python
0.000001
@@ -560,27 +560,17 @@ p:// -sd-36895.dedibox.fr +localhost :222
d564d73622902f10f46ae53d2e72090b9f93cc7b
Fix for AC-694, we weren't saving the instance if using the cached user
awx/main/middleware.py
awx/main/middleware.py
from django.conf import settings from django.contrib.auth.models import User from django.db.models.signals import pre_save, post_save from django.utils.functional import curry from awx.main.models import ActivityStream, AuthToken import json import uuid import urllib2 class ActivityStreamMiddleware(object): def process_request(self, request): self.isActivityStreamEvent = False if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated') and request.user.is_authenticated(): user = request.user else: user = None self.instances = [] self.cached_user = None set_actor = curry(self.set_actor, user) self.disp_uid = str(uuid.uuid1()) self.finished = False post_save.connect(set_actor, sender=ActivityStream, dispatch_uid=self.disp_uid, weak=False) def process_response(self, request, response): drf_request = getattr(request, 'drf_request', None) drf_user = getattr(drf_request, 'user', None) # FIXME: Associate the user above from Django REST framework with instances. post_save.disconnect(dispatch_uid=self.disp_uid) self.finished = True if self.isActivityStreamEvent: for instance in self.instances: if self.cached_user is not None: instance.user = self.cached_user elif "current_user" in request.COOKIES and "id" in request.COOKIES["current_user"]: userInfo = json.loads(urllib2.unquote(request.COOKIES['current_user']).decode('utf8')) userActual = User.objects.get(id=int(userInfo['id'])) self.cached_user = userActual instance.user = self.cached_user instance.save() elif "HTTP_AUTHORIZATION" in request.META: token_actual = request.META['HTTP_AUTHORIZATION'] token_actual = token_actual.split(" ")[1] matching_tokens = AuthToken.objects.filter(key=token_actual) if matching_tokens.exists(): self.cached_user = matching_tokens[0].user instance.user = self.cached_user instance.save() else: obj1_type_actual = instance.object1_type.split(".")[-1] if obj1_type_actual in ("InventoryUpdate", "ProjectUpdate", "JobEvent", "Job") and instance.id is not None: instance.delete() return response def set_actor(self, user, sender, instance, **kwargs): if not self.finished: if sender == ActivityStream: if isinstance(user, User) and instance.user is None: instance.user = user else: if instance not in self.instances: self.isActivityStreamEvent = True self.instances.append(instance) else: self.isActivityStreamEvent = False
Python
0
@@ -1371,32 +1371,68 @@ elf.cached_user%0A + instance.save()%0A
f90f7bd226c42e900074f0c7bfcc5210e580b5ed
Fix error reporting to actually set the right response code.
obstaravania/serving.py
obstaravania/serving.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from data_model import Firma, Obstaravanie, Firma, Candidate, Session import db from utils import obstaravanieToJson, getEidForIco from jinja2 import Template import json from paste import httpserver import webapp2 db.connect(False) def errorJSON(code, text): d = {"code": code, "message": "ERROR: " + text} return d class MyServer(webapp2.RequestHandler): def returnJSON(self,j): self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(j, separators=(',',':'))) def get(self): try: self.process() except: self.returnJSON(errorJSON( 500, "Internal server error: sa mi neda vycentrovat!")) class ServeObstaravanie(MyServer): def process(self): try: oid = int(self.request.GET["id"]) except: self.returnJSON(errorJSON(400, "Incorrect id")) return session = Session() obstaravanie = session.query(Obstaravanie).filter_by(id=oid).first() if obstaravanie is None: self.returnJSON(errorJSON(400, "No matching id")) return j = obstaravanieToJson(obstaravanie, 20, 20) # TODO: before launching this, move this to load only once singleTemplate = Template( open("obstaravanie.tmpl").read().decode("utf8")) html = singleTemplate.render(obstaravanie=j) self.response.write(html.encode("utf8")) class ServeCompany(MyServer): def process(self): try: company_id = int(self.request.GET["id"]) except: self.returnJSON(errorJSON(400, "Incorrect id")) return session = Session() company = session.query(Firma).filter_by(id=company_id).first() result = { "name": company.name, "ico": company.ico, "eid": getEidForIco(company.ico) } candidates = [] for candidate in session.query(Candidate). \ filter_by(company_id=company_id). \ order_by(-Candidate.score): candidates.append([ candidate.score, obstaravanieToJson(candidate.obstaravanie, candidates=0, full_candidates=0), obstaravanieToJson(candidate.reason, candidates=0, full_candidates=0) ]) result["obstaravania"] = candidates singleTemplate = Template(open("firma.tmpl").read().decode("utf8")) html = singleTemplate.render(firma=result) self.response.write(html.encode("utf8")) def main(): app = webapp2.WSGIApplication( [ ('/obstaravanie', ServeObstaravanie), ('/obstaravanieFirma', ServeCompany) ], debug=False) httpserver.serve( app, host='127.0.0.1', port='8082') if __name__ == '__main__': main()
Python
0
@@ -279,100 +279,8 @@ e)%0A%0A -def errorJSON(code, text):%0A d = %7B%22code%22: code, %22message%22: %22ERROR: %22 + text%7D%0A return d%0A %0Acla @@ -477,16 +477,170 @@ ':')))%0A%0A + def returnError(self, code, message):%0A self.response.set_status(code)%0A self.returnJSON(%7B'code': code, 'message': 'ERROR: ' + message%7D)%0A%0A def @@ -729,30 +729,21 @@ f.return -JSON(errorJSON +Error (%0A @@ -961,38 +961,29 @@ self.return -JSON(errorJSON +Error (400, %22Incor @@ -1170,38 +1170,29 @@ self.return -JSON(errorJSON +Error (400, %22No ma @@ -1692,22 +1692,13 @@ turn -JSON(errorJSON +Error (400
a228f73ea12fa4fcfa29bc2249c74c9e94954cff
Test function name adjustment.
cnxrepo/tests.py
cnxrepo/tests.py
# -*- coding: utf-8 -*- """Application tests""" import os import transaction from nose import with_setup from pyramid import testing from pyramid.paster import get_appsettings HERE = os.path.abspath(os.path.dirname(__file__)) TEST_RESOURCE_FILENAME = 'test-resource.png' with open(os.path.join(HERE, TEST_RESOURCE_FILENAME), 'rb') as f: TEST_RESOURCE_DATA = f.read() def _acquire_sql_session(): """Acquire a live SQL session to the actual database implementation.""" # Grab the SQL DB settings to initialze the SQLAlchemy engine. # XXX Obtaining the configuration setting for the SQL DB is # currently evil and MUST be changed in the future. try: config_file = os.environ['PYRAMID_INI'] except KeyError: raise RuntimeError("Missing PYRAMID_INI environment variable.") settings = get_appsettings(config_file) # Initialize the Session. from sqlalchemy import engine_from_config from .models import DBSession, Base engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) Base.metadata.create_all(engine) # Now the DBSession can be used as usual. def test(): _acquire_sql_session() for func in [f for n, f in globals().items() if n.startswith('check')]: with testing.testConfig() as config: yield func, config transaction.abort() def check_contentadded_resource_subscriber(config): # Configure the event subscriber in question. from .models import catalog_resources_on_add config.add_subscriber(catalog_resources_on_add) # Create a DB session to work with. from .models import DBSession session = DBSession() # Make some content... from .models import Content, Resource resource = Resource(TEST_RESOURCE_FILENAME, TEST_RESOURCE_DATA) session.add(resource) session.flush() # Flush to get an id for the resource. external_resource_uri = 'http://example.com/play-physics.swf' content_body = 'Content <img src="/resource/{}" /> Content' \ '<embed src="{}"></embed>'.format(resource.id, external_resource_uri) content = Content('Content Title', content_body) session.add(content) session.flush() # Now verify the relationships were created using the relationship # properties on the objects. assert content in resource.used_in assert resource in content.internal_resources from .models import ExternalResource external_resource = session.query(ExternalResource).one() assert content in external_resource.used_in assert external_resource in content.external_resources def check_contentadd_reference_subscriber(config): # Configure the event subscriber in question. from .models import catalog_content_references_on_add config.add_subscriber(catalog_content_references_on_add) # Create a DB session to work with. from .models import DBSession session = DBSession() # Make some content... from .models import Content content_one = Content('One', 'Content One') content_two = Content('Two', 'Content Two') session.add_all([content_one, content_two]) session.flush() # And now add a piece of content that references other content. external_reference_uri = 'http://example.com/blah.html' content_body = '<a href="/content/{}">one</a>' \ '<a href="/content/{}">two</a>' \ '<a href="{}">blah</a>' \ .format(content_one.id, content_two.id, external_reference_uri) content = Content('Three', content_body) session.add(content) session.flush() # Now verify the relationships were created using the relationship # properties on the objects. assert content_one in content.internal_references assert content_two in content.internal_references from .models import ExternalReference external_reference = session.query(ExternalReference).one() assert content in external_reference.used_in assert external_reference in content.external_references # def check_race_condition_w_content_before_resource(config): # pass
Python
0
@@ -1388,16 +1388,17 @@ _content +_ added_re @@ -2686,19 +2686,22 @@ _content +_ add +ed _referen
1916f45ed5d6a77a585153a4daacc8a6ab48b3a3
fix conftest.py
dit/conftest.py
dit/conftest.py
""" Configuration for tests. """ from hypothesis import settings settings.default.deadline = None
Python
0.000102
@@ -73,24 +73,40 @@ ngs. -default. +register_profile(%22dit%22, deadline = N @@ -105,11 +105,40 @@ line - = += None +)%0Asettings.load_profile(%22dit%22)%0A
920f5476cd9f63ff611567a63dd125cc6efaa228
Fix import
deepvoice/data/cmudict.py
deepvoice/data/cmudict.py
import numpy as np import itertools import re from keras.utils.data_utils import get_file from sklearn.model_selection import train_test_split from deepvoice.util.data_util import CharacterTable def get_cmudict(origin='https://raw.githubusercontent.com/cmusphinx/cmudict/master/cmudict.dict', test_size=0.33, verbose=False, maxlen_x=None, maxlen_y=None, blacklist='().0123456789', max_phonemes=np.inf, max_chars=np.inf, seed=42): """ Process CMU pronounciation dictionary as one-hot encoded grapheme and phoneme data # Output (X_train, y_train): One-hot encoded graphemes and phonemes (X_test, y_test): Test data (xtable, ytable): Charecter en/decoding tables # Arguments seed: random seed for data split and shuffle test_size: fraction of data to set aside for testing, 0 will return empty test data. verbose: print messages about data processing maxlen_x: crop and pad grapheme sequences to this length maxlen_y: crop and pad phoneme sequences to this length max_phonemes: restrict data to this <=max_phonemes max_chars: restrict data to this <=max_charectors blacklist: remove words with these charectors e.g. HOUSE(2) for the second varient of house # Example (X_train, _), (X_test, ), (xtable, ytable) = get_cmudict( verbose=1, test_size=0.33 ) [''.join(i) for i in xtable.decode(X_train[:5])] [''.join(i) for i in ytable.decode(y_train[:5])] """ cmudict_path = get_file("cmudict-py", origin=origin, untar=False) # load data X, y= [], [] for line in open(cmudict_path,'r').readlines(): word, pron = line.strip().split(' ',1) X.append(list(word)) y.append(pron.split(' ')) X = np.array(X) y = np.array(y) if verbose: print('loaded {} entries from cmu_dict'.format(len(X))) # compile blacklist p=re.compile('[%s]'%(blacklist)) # filter out duplicate entries like 'HOUSE(2)' X, y = zip(*[(x,y) for x,y in zip(X,y) if not bool(p.findall(''.join(x)))]) if verbose: print('removed duplicate entries leaving {}'.format(len(X))) # filter out complex entries X, y = zip(*[(x,y) for x,y in zip(X,y) if len(y)<=max_phonemes and len(x)<=max_chars]) if verbose: print('restricted to less than {} phonemes leaving {} entries'.format(max_phonemes, len(X))) # split data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed) # encode x and y and pad them xtable, X_train, X_test = _encode_chartable(X_train, X_test, maxlen_x) ytable, y_train, y_test = _encode_chartable(y_train, y_test, maxlen_y) if verbose: print('X_train shape:', X_train.shape) print('y_train shape:', y_train.shape) print('X_test shape:', X_test.shape) print('y_test shape:', y_test.shape) return (X_train, y_train), (X_test, y_test), (xtable, ytable) def _encode_chartable(train, test, maxlen=None): load = train test_empty = len(test) == 0 if not test_empty: load += test table = CharacterTable() table.fit(load) if maxlen: table.maxlen = maxlen return table, table.encode(train), np.empty(0) if test_empty else table.encode(test)
Python
0.000002
@@ -157,22 +157,28 @@ ice. -util.data_util +data.character_table imp
7d43e6fd794fa1ef942a39937a653d5b18e867de
reorganize automatic dashboard
.github/scripts/create_dashboard.py
.github/scripts/create_dashboard.py
import os from glob import glob statuses = glob("workflow_testing_indicator/notebooks/*/*/*.png") user = "probml" base_url = f"https://github.com/{user}/pyprobml/tree/" get_url = lambda x: f'<img width="20" alt="image" src=https://raw.githubusercontent.com/{user}/pyprobml/{x}>' get_nb_url = lambda x: os.path.join(base_url, "master", x.split("/", 1)[-1].replace(".png", ".ipynb")) # sort statuses def sort_key(x): parts = x.split("/") return (parts[-3], parts[-2]) statuses = sorted(statuses, key=sort_key) # write an md file log_counter = 0 file_counter = 0 with open("workflow_testing_indicator/README.md", "w") as f: f.write(f"# PyProbML status\n") f.write(f"\n") f.write(f"## Status\n") f.write(f"\n") f.write(f"| Job | Status | Log |\n") f.write(f"| --- | --- | --- |\n") for status in statuses: job = status.split("/", 2)[-1].split(".")[0] url = get_url(status) url_to_nb = get_nb_url(status) if os.path.exists(status.replace(".png", ".log")): log = os.path.join(base_url, status.replace(".png", ".log")) log_counter += 1 else: log = "-" f.write(f"| [{job}]({url_to_nb}) | {url} | [log]({log}) |\n") file_counter += 1 f.write(f"\n") f.write(f"## Summary\n") f.write(f"\n") final_log = f"In total, {file_counter} jobs were tested.\n{log_counter} jobs failed.\n" f.write(final_log) print(final_log)
Python
0.000073
@@ -804,24 +804,58 @@ %7C --- %7C%5Cn%22)%0A + passing = %5B%5D%0A failing = %5B%5D%0A for stat @@ -1124,16 +1124,97 @@ .log%22))%0A + failing.append(f%22%7C %5B%7Bjob%7D%5D(%7Burl_to_nb%7D) %7C %7Burl%7D %7C %5Blog%5D(%7Blog%7D) %7C%5Cn%22)%0A @@ -1274,31 +1274,42 @@ %22-%22%0A -f.write + passing.append (f%22%7C %5B%7Bjob%7D%5D @@ -1373,24 +1373,81 @@ ounter += 1%0A + for entry in passing+failing:%0A f.write(entry)%0A f.write(
2bac8c8df7a6f99fdc8a4efbdf2a094d3c6a7bae
fix link type data
product_template_multi_link/__manifest__.py
product_template_multi_link/__manifest__.py
# Copyright 2017-Today GRAP (http://www.grap.coop). # @author Sylvain LE GAL <https://twitter.com/legalsylvain> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). { "name": "Product Multi Links (Template)", "version": "13.0.1.1.0", "category": "Generic Modules", "author": "GRAP, ACSONE SA/NV, Odoo Community Association (OCA)", "website": "https://github.com/OCA/e-commerce", "license": "AGPL-3", "depends": ["sale"], "data": [ "security/product_template_link_type.xml", "views/product_template_link_type.xml", "security/ir.model.access.csv", "views/action.xml", "views/product_template_view.xml", "views/product_template_link_view.xml", "views/menu.xml", ], "demo": [ "data/product_template_link_type.xml", "demo/product_template_link_type.xml", "demo/product_template_link.xml", ], "installable": True, }
Python
0.000012
@@ -464,24 +464,71 @@ %22data%22: %5B%0A + %22data/product_template_link_type.xml%22,%0A %22sec @@ -823,64 +823,8 @@ %22: %5B -%0A %22data/product_template_link_type.xml%22,%0A %22dem @@ -857,24 +857,16 @@ pe.xml%22, -%0A %22demo/p @@ -886,30 +886,24 @@ te_link.xml%22 -,%0A %5D,%0A %22inst
e19a99a555cd39cd380b7ede12da2190eb164eec
Make CSV errors into warnings
ingestors/tabular/csv.py
ingestors/tabular/csv.py
import io import csv import logging from followthemoney import model from ingestors.ingestor import Ingestor from ingestors.support.encoding import EncodingSupport from ingestors.support.table import TableSupport from ingestors.exc import ProcessingException log = logging.getLogger(__name__) class CSVIngestor(Ingestor, EncodingSupport, TableSupport): """Decode and ingest a CSV file. This expects a properly formatted CSV file with a header in the first row. """ MIME_TYPES = [ 'text/csv', 'text/tsv', 'text/tab-separated-values' ] EXTENSIONS = ['csv', 'tsv'] SCORE = 7 def ingest(self, file_path, entity): entity.schema = model.get('Table') with io.open(file_path, 'rb') as fh: encoding = self.detect_stream_encoding(fh) log.debug("Detected encoding [%r]: %s", entity, encoding) fh = io.open(file_path, 'r', encoding=encoding, errors='replace') try: sample = fh.read(4096 * 10) fh.seek(0) dialect = csv.Sniffer().sniff(sample) reader = csv.reader(fh, dialect=dialect) self.emit_row_tuples(entity, reader) except UnicodeDecodeError as ude: log.warning("Encoding error: %r", entity) raise ProcessingException("Could not decode CSV (%s)" % encoding) from ude # noqa except (Exception, csv.Error) as err: log.exception("CSV error: %s", err) raise ProcessingException("Invalid CSV: %s" % err) from err finally: fh.close()
Python
0.998737
@@ -1195,16 +1195,28 @@ except +(Exception, UnicodeD @@ -1229,191 +1229,8 @@ rror - as ude:%0A log.warning(%22Encoding error: %25r%22, entity)%0A raise ProcessingException(%22Could not decode CSV (%25s)%22 %25 encoding) from ude # noqa%0A except (Exception , cs @@ -1266,17 +1266,15 @@ log. -exception +warning (%22CS
f7f37620a605954e1aeb2034f37be7ff35b0b4d6
rewrite api root view
djoser/views.py
djoser/views.py
from django.contrib.auth import get_user_model from rest_framework import generics, permissions, status, response from rest_framework.serializers import BaseSerializer from rest_framework.authtoken.models import Token from rest_framework.response import Response from rest_framework.reverse import reverse from django.contrib.auth.tokens import default_token_generator from . import serializers, settings, utils User = get_user_model() @api_view(('GET',)) def api_root(request, format=None): """ Root endpoint - use one of sub endpoints. """ urls_mapping = { 'me': 'user', 'register': 'register', 'login': 'login', 'logout': 'logout', 'activate': 'activate', 'change-' + User.USERNAME_FIELD: 'set_username', 'change-password': 'set_password', 'password-reset': 'password_reset', 'password-reset-confirm': 'password_reset_confirm', } return Response( dict([(key, reverse(url_name, request=request, format=format)) for key, url_name in urls_mapping.items()]) ) class RegistrationView(utils.SendEmailViewMixin, generics.CreateAPIView): """ Use this endpoint to register new user. """ permission_classes = ( permissions.AllowAny, ) token_generator = default_token_generator def get_serializer_class(self): if settings.get('LOGIN_AFTER_REGISTRATION'): return serializers.UserRegistrationWithAuthTokenSerializer return serializers.UserRegistrationSerializer def post_save(self, obj, created=False): if settings.get('LOGIN_AFTER_REGISTRATION'): Token.objects.get_or_create(user=obj) if settings.get('SEND_ACTIVATION_EMAIL'): self.send_email(**self.get_send_email_kwargs(obj)) def perform_create(self, serializer): instance = serializer.save() self.post_save(obj=instance, created=True) def get_send_email_extras(self): return { 'subject_template_name': 'activation_email_subject.txt', 'plain_body_template_name': 'activation_email_body.txt', 'html_body_template_name': 'activation_email_body.html', } def get_email_context(self, user): context = super(RegistrationView, self).get_email_context(user) context['url'] = settings.get('ACTIVATION_URL').format(**context) return context class LoginView(utils.ActionViewMixin, generics.GenericAPIView): """ Use this endpoint to obtain user authentication token. """ serializer_class = serializers.UserLoginSerializer permission_classes = ( permissions.AllowAny, ) def action(self, serializer): token, _ = Token.objects.get_or_create(user=serializer.object) return Response( data=serializers.TokenSerializer(token).data, status=status.HTTP_200_OK, ) class LogoutView(generics.GenericAPIView): """ Use this endpoint to logout user (remove user authentication token). """ permission_classes = ( permissions.IsAuthenticated, ) def post(self, request): Token.objects.filter(user=request.user).delete() return response.Response(status=status.HTTP_200_OK) class PasswordResetView(utils.ActionViewMixin, utils.SendEmailViewMixin, generics.GenericAPIView): """ Use this endpoint to send email to user with password reset link. """ serializer_class = serializers.PasswordResetSerializer permission_classes = ( permissions.AllowAny, ) token_generator = default_token_generator def action(self, serializer): for user in self.get_users(serializer.data['email']): self.send_email(**self.get_send_email_kwargs(user)) return response.Response(status=status.HTTP_200_OK) def get_users(self, email): active_users = User._default_manager.filter( email__iexact=email, is_active=True, ) return (u for u in active_users if u.has_usable_password()) def get_send_email_extras(self): return { 'subject_template_name': 'password_reset_email_subject.txt', 'plain_body_template_name': 'password_reset_email_body.txt', 'html_body_template_name': 'password_reset_email_body.html', } def get_email_context(self, user): context = super(PasswordResetView, self).get_email_context(user) context['url'] = settings.get('PASSWORD_RESET_CONFIRM_URL').format(**context) return context class SetPasswordView(utils.ActionViewMixin, generics.GenericAPIView): """ Use this endpoint to change user password. """ permission_classes = ( permissions.IsAuthenticated, ) def get_serializer_class(self): if settings.get('SET_PASSWORD_RETYPE'): return serializers.SetPasswordRetypeSerializer return serializers.SetPasswordSerializer def action(self, serializer): self.request.user.set_password(serializer.data['new_password']) self.request.user.save() return response.Response(status=status.HTTP_200_OK) class PasswordResetConfirmView(utils.ActionViewMixin, generics.GenericAPIView): """ Use this endpoint to finish reset password process. """ permission_classes = ( permissions.AllowAny, ) token_generator = default_token_generator def get_serializer_class(self): if settings.get('PASSWORD_RESET_CONFIRM_RETYPE'): return serializers.PasswordResetConfirmRetypeSerializer return serializers.PasswordResetConfirmSerializer def action(self, serializer): serializer.user.set_password(serializer.data['new_password']) serializer.user.save() return response.Response(status=status.HTTP_200_OK) class ActivationView(utils.ActionViewMixin, generics.GenericAPIView): """ Use this endpoint to activate user account. """ serializer_class = serializers.UidAndTokenSerializer permission_classes = ( permissions.AllowAny, ) token_generator = default_token_generator def action(self, serializer): serializer.user.is_active = True serializer.user.save() if settings.get('LOGIN_AFTER_ACTIVATION'): token, _ = Token.objects.get_or_create(user=serializer.user) data = serializers.TokenSerializer(token).data else: data = {} return Response(data=data, status=status.HTTP_200_OK) class SetUsernameView(utils.ActionViewMixin, generics.GenericAPIView): """ Use this endpoint to change user username. """ serializer_class = serializers.SetUsernameSerializer permission_classes = ( permissions.IsAuthenticated, ) def get_serializer_class(self): if settings.get('SET_USERNAME_RETYPE'): return serializers.SetUsernameRetypeSerializer return serializers.SetUsernameSerializer def action(self, serializer): setattr(self.request.user, User.USERNAME_FIELD, serializer.data['new_' + User.USERNAME_FIELD]) self.request.user.save() return response.Response(status=status.HTTP_200_OK) class UserView(generics.RetrieveUpdateAPIView): """ Use this endpoint to retrieve/update user. """ model = User serializer_class = serializers.UserSerializer permission_classes = ( permissions.IsAuthenticated, ) def get_object(self, *args, **kwargs): return self.request.user
Python
0.000001
@@ -127,25 +127,24 @@ amework. -serialize +decorato rs impor @@ -149,22 +149,16 @@ ort -BaseSerializer +api_view %0Afro
3c0e18944c7ff712288ccb16e439e07d4db0b3c1
Fix init migration dependency
cmsplugin_date/migrations/0001_initial.py
cmsplugin_date/migrations/0001_initial.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Date', fields=[ ('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')), ('date', models.DateField(verbose_name='Date')), ], options={ 'abstract': False, }, bases=('cms.cmsplugin',), ), ]
Python
0.000003
@@ -158,24 +158,68 @@ dencies = %5B%0A + ('cms', '0003_auto_20140926_2347'),%0A %5D%0A%0A o
890263898e23de06b6b864898d753362fd604c92
debug output for dupe=false
address_deduper/views/address.py
address_deduper/views/address.py
from address_deduper.views.base import * from address_normalizer.deduping.near_duplicates import * from address_normalizer.models.address import * class AddressView(BaseView): blueprint = Blueprint('addresses', __name__, url_prefix='/addresses') @classmethod def address_from_params(cls, require_street=True, require_latlon=True): address = Address(request.args, strict=False) if require_street and not address.street: abort(400, 'street must be specified') if require_latlon and (not address.latitude or not address.longitude): abort(400, 'latitude and longitude must be specified') return address @classmethod def addresses_from_json(cls, require_street=True, require_latlon=True): key, batch = ('addresses', True) if 'batch' in request.args else ('address', False) try: data = request.json except Exception: abort(400, 'POST request was not JSON or could not be parsed') data = data.get(key) if not data: abort(400, 'The key "{}" could not be found in the request data'.format(key)) addresses = [Address(a) for a in (data if batch else [data])] if not require_latlon and not require_street: return addresses for a in addresses: if require_street and not a.street: abort(400, 'All addresses must include "street" key') if require_latlon and (not a.latitude or not a.longitude): abort(400, 'All addresses must include "latitude" and "longitude" keys') return addresses @route('/normalize', methods=['GET']) def normalize_get(cls): address = cls.address_from_params(require_latlon=False) surface_forms = AddressNearDupe.expanded_street_address(address) return jsonify({'address': address.to_primitive(), 'normalized_expansions': list(surface_forms)}) @route('/normalize', methods=['POST']) def normalize_post(cls): addresses = cls.addresses_from_json(require_latlon=False) return jsonify({'addresses': [{'address': address.to_primitive(), 'normalized_expansions': list(AddressNearDupe.expanded_street_address(address))} for address in addresses]}) @route('/dedupe', methods=['GET']) def exists(cls): address = cls.address_from_params() existence = AddressNearDupe.check([address], add=False) return_val = {} if existence: _, (guid, dupe) = existence[0] response = {'guid': guid, 'dupe': dupe} if 'debug' in request.values: existing_address = AddressNearDupe.storage.get(guid) if existing_address: response['existing'] = json.loads(existing_address) return jsonify(response) else: abort(500, 'Unknown error') @route('/dedupe', methods=['POST']) def dedupe(cls): addresses = cls.addresses_from_json() created = AddressNearDupe.check(addresses, add=True) response = [{'guid': guid, 'dupe': dupe} for _, (guid, dupe) in created] if 'debug' in request.values: guids = [guid for _, (guid, dupe) in created if dupe] existing_addresses = AddressNearDupe.storage.multiget(guids) for r in response: existing_address = existing_addresses.get(r['guid']) if existing_address: r['existing'] = json.loads(existing_address) return jsonify({'addresses': response})
Python
0.009255
@@ -2683,32 +2683,41 @@ n request.values + and dupe :%0A @@ -3526,32 +3526,46 @@ existing_address + and r%5B'dupe'%5D :%0A
43b992c09b092391e95b5a1893b6c19855482ff7
fix autoconf header
dist/tools/kconfiglib/riot_kconfig.py
dist/tools/kconfiglib/riot_kconfig.py
""" RIOT customization of Kconfig """ import argparse import sys from kconfiglib import Kconfig, KconfigError class RiotKconfig(Kconfig): """ RIOT adaption of Kconfig class """ def _parse_help(self, node): """ Parses the help section of a node, removing Doxygen markers """ doxygen_markers = ["@ref ", "@see "] # call default parsing super(RiotKconfig, self)._parse_help(node) # remove Doxygen markers for marker in doxygen_markers: node.help = node.help.replace(marker, "") def write_autoconf(self, filename=None, header=None): """ Override to convert - to _ when writing autoconf.h """ tmp_unique_defined_syms = self.unique_defined_syms.copy() for sym in self.unique_defined_syms: if not sym._write_to_conf: continue sym.name = sym.name.replace('-', '_') super(RiotKconfig, self).write_autoconf(filename, header) self.unique_defined_syms = tmp_unique_defined_syms def standard_riot_kconfig(description=None): """ Argument parsing helper for tools that take a single optional Kconfig file argument (default: Kconfig). Returns the RiotKconfig instance for the parsed configuration. Uses argparse internally. Exits with sys.exit() (which raises SystemExit) on errors. description (default: None): The 'description' passed to argparse.ArgumentParser(). argparse.RawDescriptionHelpFormatter is used, so formatting is preserved. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=description) parser.add_argument( "kconfig", metavar="KCONFIG", default="Kconfig", nargs="?", help="Kconfig file (default: Kconfig)") args = parser.parse_args() # Suppress backtraces for expected exceptions try: return RiotKconfig(args.kconfig) except (EnvironmentError, KconfigError) as e: # Some long exception messages have extra newlines for better # formatting when reported as an unhandled exception. Strip them here. sys.exit(str(e).strip())
Python
0
@@ -593,20 +593,49 @@ header= -None +%22/* RIOT Configuration File */%5Cn%22 ):%0A
a0598143062d0b33fdf59ef56d16d27d0bff2dd6
fix #108
hitcount/views.py
hitcount/views.py
import warnings from collections import namedtuple from django.http import Http404, JsonResponse, HttpResponseBadRequest from django.conf import settings from django.views.generic import View, DetailView from hitcount.utils import get_ip from hitcount.models import Hit, BlacklistIP, BlacklistUserAgent from hitcount.utils import RemovedInHitCount13Warning, get_hitcount_model class HitCountMixin: """ Mixin to evaluate a HttpRequest and a HitCount and determine whether or not the HitCount should be incremented and the Hit recorded. """ @classmethod def hit_count(self, request, hitcount): """ Called with a HttpRequest and HitCount object it will return a namedtuple: UpdateHitCountResponse(hit_counted=Boolean, hit_message='Message'). `hit_counted` will be True if the hit was counted and False if it was not. `'hit_message` will indicate by what means the Hit was either counted or ignored. """ UpdateHitCountResponse = namedtuple( 'UpdateHitCountResponse', 'hit_counted hit_message') # as of Django 1.8.4 empty sessions are not being saved # https://code.djangoproject.com/ticket/25489 if request.session.session_key is None: request.session.save() user = request.user try: is_authenticated_user = user.is_authenticated() except: is_authenticated_user = user.is_authenticated session_key = request.session.session_key ip = get_ip(request) user_agent = request.headers.get('User-Agent', '')[:255] hits_per_ip_limit = getattr(settings, 'HITCOUNT_HITS_PER_IP_LIMIT', 0) exclude_user_group = getattr(settings, 'HITCOUNT_EXCLUDE_USER_GROUP', None) # first, check our request against the IP blacklist if BlacklistIP.objects.filter(ip__exact=ip): return UpdateHitCountResponse( False, 'Not counted: user IP has been blacklisted') # second, check our request against the user agent blacklist if BlacklistUserAgent.objects.filter(user_agent__exact=user_agent): return UpdateHitCountResponse( False, 'Not counted: user agent has been blacklisted') # third, see if we are excluding a specific user group or not if exclude_user_group and is_authenticated_user: if user.groups.filter(name__in=exclude_user_group): return UpdateHitCountResponse( False, 'Not counted: user excluded by group') # eliminated first three possible exclusions, now on to checking our database of # active hits to see if we should count another one # start with a fresh active query set (HITCOUNT_KEEP_HIT_ACTIVE) qs = Hit.objects.filter_active() # check limit on hits from a unique ip address (HITCOUNT_HITS_PER_IP_LIMIT) if hits_per_ip_limit: if qs.filter(ip__exact=ip).count() >= hits_per_ip_limit: return UpdateHitCountResponse( False, 'Not counted: hits per IP address limit reached') # create a generic Hit object with request data hit = Hit(session=session_key, hitcount=hitcount, ip=get_ip(request), user_agent=request.headers.get('User-Agent', '')[:255],) # first, use a user's authentication to see if they made an earlier hit if is_authenticated_user: if not qs.filter(user=user, hitcount=hitcount): hit.user = user # associate this hit with a user hit.save() response = UpdateHitCountResponse( True, 'Hit counted: user authentication') else: response = UpdateHitCountResponse( False, 'Not counted: authenticated user has active hit') # if not authenticated, see if we have a repeat session else: if not qs.filter(session=session_key, hitcount=hitcount): hit.save() response = UpdateHitCountResponse( True, 'Hit counted: session key') else: response = UpdateHitCountResponse( False, 'Not counted: session key has active hit') return response class HitCountJSONView(View, HitCountMixin): """ JSON response view to handle HitCount POST. """ def dispatch(self, request, *args, **kwargs): if not request.is_ajax(): raise Http404() return super().dispatch(request, *args, **kwargs) def get(self, request, *args, **kwargs): msg = "Hits counted via POST only." return JsonResponse({'success': False, 'error_message': msg}) def post(self, request, *args, **kwargs): hitcount_pk = request.POST.get('hitcountPK') try: hitcount = get_hitcount_model().objects.get(pk=hitcount_pk) except: return HttpResponseBadRequest("HitCount object_pk not working") hit_count_response = self.hit_count(request, hitcount) return JsonResponse(hit_count_response._asdict()) class HitCountDetailView(DetailView, HitCountMixin): """ HitCountDetailView provides an inherited DetailView that will inject the template context with a `hitcount` variable giving you the number of Hits for an object without using a template tag. Optionally, by setting `count_hit = True` you can also do the business of counting the Hit for this object (in lieu of using JavaScript). It will then further inject the response from the attempt to count the Hit into the template context. """ count_hit = False def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) if self.object: hit_count = get_hitcount_model().objects.get_for_object(self.object) hits = hit_count.hits context['hitcount'] = {'pk': hit_count.pk} if self.count_hit: hit_count_response = self.hit_count(self.request, hit_count) if hit_count_response.hit_counted: hits = hits + 1 context['hitcount']['hit_counted'] = hit_count_response.hit_counted context['hitcount']['hit_message'] = hit_count_response.hit_message context['hitcount']['total_hits'] = hits return context def _update_hit_count(request, hitcount): """ Deprecated in 1.2. Use hitcount.views.Hit CountMixin.hit_count() instead. """ warnings.warn( "hitcount.views._update_hit_count is deprecated. " "Use hitcount.views.HitCountMixin.hit_count() instead.", RemovedInHitCount13Warning ) return HitCountMixin.hit_count(request, hitcount) def update_hit_count_ajax(request, *args, **kwargs): """ Deprecated in 1.2. Use hitcount.views.HitCountJSONView instead. """ warnings.warn( "hitcount.views.update_hit_count_ajax is deprecated. " "Use hitcount.views.HitCountJSONView instead.", RemovedInHitCount13Warning ) view = HitCountJSONView.as_view() return view(request, *args, **kwargs)
Python
0.000001
@@ -4519,17 +4519,59 @@ est. -is_ajax() +headers.get('x-requested-with') == 'XMLHttpRequest' :%0A
c9cd1497eee67af351bdc98ee43aa25330ab6d7a
Add another date
inspectors/peacecorps.py
inspectors/peacecorps.py
#!/usr/bin/env python import datetime import logging import os from bs4 import BeautifulSoup from utils import utils, inspector # http://www.peacecorps.gov/about/inspgen/ archive = 1989 # options: # standard since/year options for a year range to fetch from. # # Notes for IG's web team: # REPORTS_URL = "http://www.peacecorps.gov/about/inspgen/reports/" REPORT_PUBLISHED_MAPPING = { "Death_Inquiry_and_Assessment_of_Medical_Care_in_Peace_Corps_Morocco": datetime.datetime(2010,2,1), "Burkina_Faso_Medical_Supply_Management_Advisory_Report": datetime.datetime(2013, 3, 14), "PCIG_Final_MAR_Certification_of_Volunteer_Payments": datetime.datetime(2013, 9, 24), "MAR_Cost_Savings_Opportunity_on_Value_Added_Tax": datetime.datetime(2013, 2, 13), "Management_Advisory_Report-Peace_Corps_Drug_Free_Workplace_Program": datetime.datetime(2012, 8, 16), "PCIG_2014_Peace_Corps_OIG_Peer_Review_Final": datetime.datetime(2014, 3, 27), "MAR_Sierra_Leone": datetime.datetime(2013, 3, 14), "Capstone_Report_2012_Medical_Inventory_Issues_Final": datetime.datetime(2013, 8, 26), "PCIG_Capstone_Report_Billing_and_Collection_Process": datetime.datetime(2014, 9, 30), "PC_Morocco_Assessment_of_Medical_Care": datetime.datetime(2010, 2, 1), "PCIG_New_Country_Entries_Lessons_Learned_Final_Report": datetime.datetime(2014, 9, 30), "PC_Recurring_Issues_OIG_Post_Audits_Evaluations_FYs_2009-2011": datetime.datetime(2012, 4, 1), "PC_Vanuatu_SR_Advice_and_Assistance": datetime.datetime(2010, 5, 1), "PC_Gambia_SR_Grant_Activities": datetime.datetime(2010, 5, 14), "PC_Ecuador_Special_Review_IG1005SR": datetime.datetime(2010, 9, 1), "PCIG_Agency_Policies_Related_to_Volunteer_Sexual_Assault_Allegations": datetime.datetime(2014, 11, 21), "PCIG_Investigative_Review_of_a_Volunteer_Death_in_Peace_Corps_China": datetime.datetime(2014, 11, 1), "PCIG_Agency_Response_to_the_China_Investigative_Review_Nov_2014": datetime.datetime(2015, 1, 23), } REPORT_TYPE_MAP = { 'Advisories': 'press', 'Annual and Strategic Plans': 'other', 'Semiannual Reports to Congress': 'semiannual_report', 'Special Reports and Reviews': 'other', 'Audit Reports': 'audit', 'Program Evaluation Reports': 'evaluation', } def run(options): year_range = inspector.year_range(options, archive) # Pull the reports doc = BeautifulSoup(utils.download(REPORTS_URL)) results = doc.select("li div li") if not results: raise inspector.NoReportsFoundError("Peace Corps") for result in results: report = report_from(result, year_range) if report: inspector.save_report(report) def report_from(result, year_range): link = result.find("a") report_url = link.get('href') report_filename = report_url.split("/")[-1] report_id, _ = os.path.splitext(report_filename) title = link.text topic_text = result.find_previous("h2").text.strip() report_type = REPORT_TYPE_MAP.get(topic_text, 'other') section_title = result.find_previous("h3").text.strip() estimated_date = False if report_id in REPORT_PUBLISHED_MAPPING: published_on = REPORT_PUBLISHED_MAPPING[report_id] else: try: published_on_text = title.split("–")[-1].strip() published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y') except ValueError: # For reports where we can only find the year, set them to Nov 1st of that year published_on_year =int(section_title.lstrip("FY ")) published_on = datetime.datetime(published_on_year, 11, 1) estimated_date = True if published_on.year not in year_range: logging.debug("[%s] Skipping, not in requested range." % report_url) return report = { 'inspector': 'peacecorps', 'inspector_url': 'http://www.peacecorps.gov/about/inspgen/', 'agency': 'peacecorps', 'agency_name': 'Peace Corps', 'type': report_type, 'report_id': report_id, 'url': report_url, 'title': title, 'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"), } if estimated_date: report['estimated_date'] = estimated_date return report utils.run(run) if (__name__ == "__main__") else None
Python
0.000126
@@ -1955,16 +1955,104 @@ 1, 23),%0A + %22PCIG_MAR_Peace_Corps_Cloud_Computing_Pilot_Program%22: datetime.datetime(2015, 3, 17),%0A %7D%0A%0AREPOR
d7f734dfae48c43bf10ddcc4b1e2a3a9863f0807
stop assuming addresses that start with D are DOGE addresses
homepage/views.py
homepage/views.py
from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from django.contrib import messages from django.utils.translation import ugettext_lazy as _ from annoying.decorators import render_to from blockexplorer.decorators import assert_valid_coin_symbol from blockexplorer.settings import BLOCKCYPHER_PUBLIC_KEY, BLOCKCYPHER_API_KEY from blockexplorer.walletname import lookup_wallet_name, is_valid_wallet_name from homepage.forms import SearchForm from blockcypher.api import get_transaction_details, get_block_overview, get_blocks_overview, get_latest_block_height, get_broadcast_transactions from blockcypher.utils import is_valid_hash, is_valid_block_num, is_valid_sha_block_hash, is_valid_address from blockcypher.constants import SHA_COINS, SCRYPT_COINS, COIN_SYMBOL_MAPPINGS from operator import itemgetter @render_to('home.html') def home(request): form = SearchForm(initial={ 'search_string': '16Fg2yjwrbtC6fZp61EV9mNVKmwCzGasw5', 'coin_symbol': 'btc', }) if request.method == 'POST': form = SearchForm(data=request.POST) if form.is_valid(): redirect_url = None search_string = form.cleaned_data['search_string'] coin_symbol = form.cleaned_data['coin_symbol'] kwargs = {'coin_symbol': coin_symbol} if is_valid_block_num(search_string): kwargs['block_representation'] = search_string redirect_url = reverse('block_overview', kwargs=kwargs) elif is_valid_hash(search_string): if coin_symbol in SHA_COINS: if is_valid_sha_block_hash(search_string): kwargs['block_representation'] = search_string redirect_url = reverse('block_overview', kwargs=kwargs) else: kwargs['tx_hash'] = search_string redirect_url = reverse('transaction_overview', kwargs=kwargs) elif coin_symbol in SCRYPT_COINS: # Try to see if it's a valid TX hash tx_details = get_transaction_details( tx_hash=search_string, coin_symbol=coin_symbol, limit=1, api_key=BLOCKCYPHER_API_KEY, ) if 'error' in tx_details: # Not a valid TX hash, see if it's a block hash by checking blockchain block_details = get_block_overview( block_representation=search_string, coin_symbol=coin_symbol, txn_limit=1, api_key=BLOCKCYPHER_API_KEY, ) if 'error' in block_details: msg = _("Sorry, that's not a valid transaction or block hash for %(currency)s" % {'currency': coin_symbol}) messages.error(request, msg) else: kwargs['block_representation'] = search_string redirect_url = reverse('block_overview', kwargs=kwargs) else: kwargs['tx_hash'] = search_string redirect_url = reverse('transaction_overview', kwargs=kwargs) elif is_valid_address(search_string): # It's an address kwargs['address'] = search_string first_char = search_string[0] # Override coin_symbol if we can infer it from the blockchain # There is now generic constants in the python library (constants.py) # Not migrating because this is custom (those constants have overlap/ambiguity) if first_char in ('1', ): # Do not force addresses starting with 3 to be BTC because that's also used by LTC kwargs['coin_symbol'] = 'btc' elif first_char in ('m', 'n', '2'): # Note that addresses starting in 2 can be LTC testnet, but since we don't support that it's okay to include kwargs['coin_symbol'] = 'btc-testnet' elif first_char in ('D', '9', 'A'): kwargs['coin_symbol'] = 'doge' elif first_char in ('L', ): # Do not force addresses starting with 3 to be LTC because that's also used by BTC kwargs['coin_symbol'] = 'ltc' elif first_char in ('U', ): kwargs['coin_symbol'] = 'uro' elif first_char in ('B', 'C', 'D'): kwargs['coin_symbol'] = 'bcy' redirect_url = reverse('address_overview', kwargs=kwargs) elif is_valid_wallet_name(search_string): addr = lookup_wallet_name(search_string, kwargs['coin_symbol']) if addr: kwargs['address'] = addr kwargs['wallet_name'] = search_string redirect_url = reverse('address_overview', kwargs=kwargs) else: msg = _("Sorry, that's not a valid wallet name") messages.error(request, msg) if redirect_url: return HttpResponseRedirect(redirect_url) else: currency = COIN_SYMBOL_MAPPINGS[request.POST['coin_symbol']]['display_shortname'] msg = _("Sorry, that's not a valid %(currency)s address, wallet name, transaction or block" % { 'currency': currency}) messages.error(request, msg) return { 'is_home': True, 'form': form } @assert_valid_coin_symbol @render_to('coin_overview.html') def coin_overview(request, coin_symbol): initial = { 'coin_symbol': coin_symbol, 'search_string': COIN_SYMBOL_MAPPINGS[coin_symbol]['example_address'] } form = SearchForm(initial=initial) latest_bh = get_latest_block_height(coin_symbol=coin_symbol, api_key=BLOCKCYPHER_API_KEY) recent_blocks = get_blocks_overview( block_representation_list=list(reversed(range(latest_bh-4, latest_bh+1))), coin_symbol=coin_symbol, api_key=BLOCKCYPHER_API_KEY) recent_blocks = sorted(recent_blocks, key=lambda k: k['height'], reverse=True) #import pprint; pprint.pprint(recent_blocks, width=1) recent_txs = get_broadcast_transactions(coin_symbol=coin_symbol, api_key=BLOCKCYPHER_API_KEY, limit=10) recent_txs_filtered = [] tx_hashes_seen = set([]) for recent_tx in recent_txs: if recent_tx['hash'] in tx_hashes_seen: continue else: tx_hashes_seen.add(recent_tx['hash']) recent_txs_filtered.append(recent_tx) # sort recent txs by order (they're not always returning in order) recent_txs_filtered = sorted(recent_txs_filtered, key=itemgetter('received'), reverse=True) return { 'coin_symbol': coin_symbol, 'form': form, 'recent_blocks': recent_blocks, 'recent_txs': recent_txs_filtered, 'BLOCKCYPHER_PUBLIC_KEY': BLOCKCYPHER_PUBLIC_KEY, } @render_to('highlights.html') def highlights(request): return {} def fail500(request): raise Exception('IntentionalFail: This Was On Purpose')
Python
0.003528
@@ -4375,13 +4375,8 @@ in ( -'D', '9',
6d7c21979a741e60053faf6d4e444ad4bf01dcde
Fix unittests
backward/backends/session.py
backward/backends/session.py
try: import cPickle as pickle except ImportError: import pickle from .base import Backend from backward import settings class SessionBackend(Backend): def get_url_redirect(self, request): return request.session.get(settings.URL_REDIRECT_NAME, None) def save_url_redirect(self, request, response, url_redirect): request.session[settings.URL_REDIRECT_NAME] = url_redirect def get_next_action(self, request): if settings.NEXT_ACTION_NAME in request.session: return pickle.loads(request.session[settings.NEXT_ACTION_NAME]) return {} def save_next_action(self, request, response, data): request.session[settings.NEXT_ACTION_NAME] = pickle.dumps(data, pickle.HIGHEST_PROTOCOL) def delete_next_action(self, request): try: del request.session[settings.NEXT_ACTION_NAME] except KeyError: return False return True
Python
0.000005
@@ -781,32 +781,42 @@ on(self, request +, response ):%0A try:%0A
81cfcd62dacebac895fd819ccf0640597cc2822f
define a one-to-many relation from PostState to Post. post.state will be a lazy backref.
models/blog.py
models/blog.py
from datetime import datetime from flask_misaka import markdown from extensions import db posts_to_tags = db.Table('posts_to_tags', db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')), db.Column('post_id', db.Integer, db.ForeignKey('post.id'))) class Post(db.Model): def render_markdown(markdown_text): """Render markdown_text into HTML with code blocks and math rendering.""" html = markdown(markdown_text, fenced_code=True, math=True) return html id = db.Column(db.Integer, primary_key=True) title = db.Column(db.Text) content = db.Column(db.Text) author_id = db.Column(db.Integer, db.ForeignKey('user.id')) url = db.Column(db.Text) created_at = db.Column(db.DateTime) published_at = db.Column(db.DateTime) edited_at = db.Column(db.DateTime) view_count = db.Column(db.Integer, default=0) state_id = db.Column(db.Integer, db.ForeignKey('post_state.id')) tags = db.relationship('Tag', secondary=posts_to_tags, backref=db.backref('posts', lazy='dynamic')) def __init__(self, title, content, author_id): self.title = title self.url = 'random_url_' + generate_random_string() self.content = content self.author_id = author_id self.created_at = datetime.utcnow() self.state_id = 1 def render_content(self): """Render the content of the post written in markdown to HTML. :return: html render of markdown """ return Post.render_markdown(self.content) def set_url_from_title(self): self.url = self.title.lower().replace(' ', '_') def generate_random_string(size=32): import random import string choices = string.ascii_letters + string.digits random_characters = [random.choice(choices) for _ in range(size)] return ''.join(random_characters) class Tag(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text, unique=True) def __init__(self, name): self.name = name def __str__(self): return self.name def serialize(self): """Return object data in a jsonable format.""" return {'id': self.id, 'name': self.name} class PostState(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text, unique=True) def __init__(self, name): self.name = name def __str__(self): return self.name
Python
0.00001
@@ -2351,32 +2351,101 @@ xt, unique=True) +%0A posts = db.relationship('Post', backref='state', lazy='dynamic') %0A%0A def __init
fdbaaa6c1f20a48d0891106455c91d600c8236f7
Change client.skia.fyi ports
masters/master.client.skia.fyi/master_site_config.py
masters/master.client.skia.fyi/master_site_config.py
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ActiveMaster definition.""" from config_bootstrap import Master class SkiaFYI(Master.Master3): project_name = 'SkiaFYI' master_port = 8094 slave_port = 8194 master_port_alt = 8294 repo_url = 'https://skia.googlesource.com/skia.git' buildbot_url = 'http://build.chromium.org/p/client.skia.fyi/' code_review_site = 'https://codereview.chromium.org'
Python
0.000001
@@ -304,17 +304,17 @@ rt = 809 -4 +8 %0A slave @@ -324,17 +324,17 @@ rt = 819 -4 +8 %0A maste @@ -349,17 +349,17 @@ lt = 829 -4 +8 %0A repo_
9a688a0311cbf802bc541e267afac968bcf7ae2c
break down sample with a little bit extra info and show each step along the way
ocv_image_artihmetic.py
ocv_image_artihmetic.py
import numpy as np import cv2 as cv blue = np.zeros((300,512,3), np.uint8) img = cv.imread('messi5.jpg') cv.imshow('b', blue) k = 0 while k != ord('q'): k = cv.waitKey(100) cv.destroyAllWindows()
Python
0
@@ -31,16 +31,209 @@ as cv%0A%0A%0A +def ims(img, key, windows_name='dummy'):%0A cv.destroyAllWindows()%0A cv.imshow(windows_name, img)%0A k = 0%0A while k != ord(key):%0A k = cv.waitKey(10)%0A cv.destroyAllWindows()%0A%0A# blue = n @@ -263,16 +263,18 @@ .uint8)%0A +# img = cv @@ -300,100 +300,1237 @@ ')%0A%0A -cv.imshow('b', blue)%0A%0Ak = 0%0Awhile k != ord('q'):%0A k = cv.waitKey(100)%0A%0Acv.destroyAllWindows() +# main image%0Aimg = cv.imread('messi5.jpg')%0Aims(img, 'q', 'img')%0A%0A# overlay image%0Aovl = cv.imread('logo.png')%0Aims(ovl, 'q', 'ovl')%0A%0A# create ROI (region of image) on top left corner at the size of the overlay%0Arows, cols,channels = ovl.shape%0Aroi = img%5B0:rows, 0:cols%5D%0Aims(roi, 'q', 'roi of img')%0A%0A# Now create a mask of logo and create its inverse mask also%0A# get overlay grayscale%0Aovlgray = cv.cvtColor(ovl,cv.COLOR_BGR2GRAY)%0Aims(ovlgray, 'q', 'ovl in grayscale')%0A# get bi-level image out of the grayscale%0Aret, ovlmask = cv.threshold(ovlgray, 10, 255, cv.THRESH_BINARY)%0Aims(ovlmask, 'q', 'mask after thresholding')%0A# invert and get the actual overlay mask%0Aovlmask_inv = cv.bitwise_not(ovlmask)%0Aims(ovlmask_inv, 'q', 'inverted mask')%0A%0A# Now black-out the area of overlay in ROI%0Aroi_bg = cv.bitwise_and(roi, roi, mask=ovlmask_inv)%0Aims(roi_bg, 'q', 'roi bitwise with inverted mask')%0A%0A# Take only region of logo from logo image.%0Aovl_fg = cv.bitwise_and(ovl, ovl, mask=ovlmask)%0Aims(ovl_fg, 'q', 'overlay bitwise with mask')%0A%0A%0A# Put overlay in ROI and modify the main image%0Adst_roi = cv.add(roi_bg, ovl_fg)%0Aims(dst_roi, 'q', 'roi bg with ovl fg')%0A%0A# and put it back on the img%0Aimg%5B0:rows, 0:cols%5D = dst_roi%0A%0Aims(img, 'q', 'final result')%0A%0A%0A
4a12f00012b1a49d5a3b6876c563a58ab4583b26
Add comments and MSVS settings
lib/node_modules/@stdlib/math/base/blas/dasum/binding.gyp
lib/node_modules/@stdlib/math/base/blas/dasum/binding.gyp
{ "targets": [ { "target_name": "addon", "link_settings": { "libraries": [ "<(module_root_dir)/src/c_dasum.o", "<(module_root_dir)/src/dasum.o", "<(module_root_dir)/src/dasumsub.o" ] }, "include_dirs": [ "<!(node -e \"require('nan')\")", "include" ], "sources": [ "./src/addon.cpp" ] } ] }
Python
0
@@ -1,17 +1,17 @@ %7B%0A -%22 +' targets -%22 +' : %5B%0A @@ -22,17 +22,78 @@ %7B%0A -%22 +# The target name should match the add-on export name:%0A ' target_n @@ -99,27 +99,231 @@ name -%22: %22 +': ' addon -%22 +' ,%0A +%0A -%22 +# Allow developer to choose whether to build a static or shared library:%0A 'type': '%3C(library)',%0A%0A # Settings that should be applied when a target's object files are used as linker input:%0A ' link @@ -335,21 +335,22 @@ ings -%22 +' : %7B%0A +%0A %22lib @@ -345,17 +345,23 @@ -%22 +# List librarie @@ -365,24 +365,203 @@ ries -%22: %5B%0A %22 + (object files, etc) which should be linked during linking:%0A 'libraries': %5B%0A # Note: %60module_root_dir%60 is provided by %60node-gyp%60based on the current working directory.%0A ' %3C(mo @@ -584,25 +584,25 @@ rc/c_dasum.o -%22 +' ,%0A @@ -593,33 +593,33 @@ m.o',%0A -%22 +' %3C(module_root_di @@ -632,17 +632,17 @@ /dasum.o -%22 +' ,%0A @@ -645,17 +645,17 @@ -%22 +' %3C(module @@ -679,17 +679,17 @@ sumsub.o -%22 +' %0A @@ -700,23 +700,91 @@ %7D,%0A +%0A -%22 +# Define directories which contain relevant include headers:%0A ' include_ @@ -787,17 +787,17 @@ ude_dirs -%22 +' : %5B%0A @@ -800,17 +800,17 @@ -%22 +' %3C!(node @@ -817,17 +817,17 @@ -e %5C -%22 +' require( 'nan @@ -826,18 +826,18 @@ ire( -'nan')%5C%22)%22 +%22nan%22)%5C')' ,%0A @@ -846,17 +846,17 @@ -%22 +' include -%22 +' %0A @@ -865,23 +865,66 @@ %5D,%0A +%0A -%22sources%22 +# List the source files to compile:%0A 'sources' : %5B%0A @@ -931,17 +931,17 @@ -%22 +' ./src/ad @@ -951,26 +951,1245 @@ .cpp -%22%0A %5D%0A +'%0A %5D,%0A%0A # C compiler flags:%0A 'cflags': %5B%0A # Generate platform-independent code:%0A '-fPIC'%0A %5D,%0A%0A # Apply conditions based on the runtime environment:%0A 'conditions': %5B%0A %5B%0A 'OS==%22win%22',%0A %7B%0A 'msvs_settings': %7B%0A 'VCCLCompilerTool': %7B%0A 'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG%0A 'OmitFramePointers': 'true',%0A 'EnableFunctionLevelLinking': 'true',%0A 'EnableIntrinsicFunctions': 'true',%0A 'RuntimeTypeInfo': 'false',%0A 'ExceptionHandling': '1',%0A %7D,%0A 'VCLibrarianTool': %7B%0A 'AdditionalOptions': %5B%0A '/LTCG', # link time code generation%0A %5D,%0A %7D +, %0A -%5D + 'VCLinkerTool': %7B%0A 'LinkTimeCodeGeneration': 1, # link-time code generation%0A 'OptimizeReferences': 2, # /OPT:REF%0A 'EnableCOMDATFolding': 2, # /OPT:ICF%0A 'LinkIncremental': 1, # disable incremental linking%0A %7D,%0A %7D%0A %7D%0A %5D # end OS==%22win%22%0A %5D # end conditions%0A %7D%0A %5D # end targets %0A%7D%0A
139cfb7756aa6c01d547c0a88cac939c6e88e926
Print where file is saved to.
util/tsne.py
util/tsne.py
#!/usr/bin/env python2 import numpy as np import pandas as pd from sklearn.decomposition import PCA from sklearn.manifold import TSNE import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm plt.style.use('bmh') import os import sys import argparse print(""" Note: This example assumes that `name i` corresponds to `label i` in `labels.csv`. """) parser = argparse.ArgumentParser() parser.add_argument('workDir', type=str) parser.add_argument('--names', type=str, nargs='+', required=True) args = parser.parse_args() y = pd.read_csv("{}/labels.csv".format(args.workDir)).as_matrix()[:, 0] X = pd.read_csv("{}/reps.csv".format(args.workDir)).as_matrix() target_names = np.array(args.names) colors = cm.gnuplot2(np.linspace(0, 0.7, len(target_names))) X_pca = PCA(n_components=50).fit_transform(X, X) tsne = TSNE(n_components=2, init='random', random_state=0) X_r = tsne.fit_transform(X_pca) for c, i, target_name in zip(colors, list(range(1, len(target_names) + 1)), target_names): plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name) plt.legend() plt.savefig("{}/tsne.pdf".format(args.workDir))
Python
0
@@ -1192,20 +1192,15 @@ d()%0A -plt.savefig( +%0Aout = %22%7B%7D/ @@ -1229,10 +1229,60 @@ workDir) +%0Aplt.savefig(out)%0Aprint(%22Saved to: %7B%7D%22.format(out) )%0A
4a064569802edb08084458be3decdac9528d9a57
no need for semicolon
odo/backends/sql_csv.py
odo/backends/sql_csv.py
from __future__ import absolute_import, division, print_function import os import re import subprocess import uuid import mmap from contextlib import closing from functools import partial from distutils.spawn import find_executable import sqlalchemy as sa from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.elements import Executable, ClauseElement from toolz import merge from multipledispatch import MDNotImplementedError from ..append import append from ..convert import convert from .csv import CSV, infer_header from ..temp import Temp from .aws import S3 from .sql import fullname class CopyFromCSV(Executable, ClauseElement): def __init__(self, element, csv, delimiter=',', header=None, na_value='', lineterminator='\n', quotechar='"', escapechar='\\', encoding='utf8', skiprows=0, **kwargs): if not isinstance(element, sa.Table): raise TypeError('element must be a sqlalchemy.Table instance') self.element = element self.csv = csv self.delimiter = delimiter self.header = (header if header is not None else (csv.has_header if csv.has_header is not None else infer_header(csv))) self.na_value = na_value self.lineterminator = lineterminator self.quotechar = quotechar self.escapechar = escapechar self.encoding = encoding self.skiprows = int(skiprows or self.header) for k, v in kwargs.items(): setattr(self, k, v) @property def bind(self): return self.element.bind @compiles(CopyFromCSV, 'sqlite') def compile_from_csv_sqlite(element, compiler, **kwargs): if not find_executable('sqlite3'): raise MDNotImplementedError("Could not find sqlite executable") t = element.element if not element.header: csv = element.csv else: csv = Temp(CSV)('.%s' % uuid.uuid1()) assert csv.has_header, \ 'SQLAlchemy element.header is True but CSV inferred no header' # write to a temporary file after skipping the first line chunksize = 1 << 24 # 16 MiB lineterminator = element.lineterminator.encode(element.encoding) with open(element.csv.path, 'rb') as f: with closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mf: index = mf.find(lineterminator) if index == -1: raise ValueError("'%s' not found" % lineterminator) mf.seek(index + len(lineterminator)) # len because \r\n with open(csv.path, 'wb') as g: for chunk in iter(partial(mf.read, chunksize), b''): g.write(chunk) fullpath = os.path.abspath(csv.path).encode('unicode-escape').decode() cmd = ['sqlite3', '-nullvalue', repr(element.na_value), '-separator', element.delimiter, '-cmd', '.import "%s" %s' % (fullpath, t.name), element.bind.url.database] stdout, stderr = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE).communicate() assert not stdout, \ 'error: %s from command: %s' % (stdout, ' '.join(cmd)) return '' @compiles(CopyFromCSV, 'mysql') def compile_from_csv_mysql(element, compiler, **kwargs): if element.na_value: raise ValueError('MySQL does not support custom NULL values') encoding = {'utf-8': 'utf8'}.get(element.encoding.lower(), element.encoding or 'utf8') escapechar = element.escapechar.encode('unicode-escape').decode() lineterminator = element.lineterminator.encode('unicode-escape').decode() result = r""" LOAD DATA {local} INFILE '{path}' INTO TABLE {0.element.name} CHARACTER SET {encoding} FIELDS TERMINATED BY '{0.delimiter}' ENCLOSED BY '{0.quotechar}' ESCAPED BY '{escapechar}' LINES TERMINATED BY '{0.lineterminator}' IGNORE {0.skiprows} LINES; """.format(element, path=os.path.abspath(element.csv.path), local=getattr(element, 'local', ''), encoding=encoding, lineterminator=lineterminator, escapechar=escapechar).strip() return result @compiles(CopyFromCSV, 'postgresql') def compile_from_csv_postgres(element, compiler, **kwargs): encoding = {'utf8': 'utf-8'}.get(element.encoding.lower(), element.encoding or 'utf8') if len(element.escapechar) != 1: raise ValueError('postgres does not allow escapechar longer than 1 ' 'byte') statement = """ COPY {fullname} FROM '{path}' (FORMAT CSV, DELIMITER E'{0.delimiter}', NULL '{0.na_value}', QUOTE '{0.quotechar}', ESCAPE '{0.escapechar}', HEADER {header}, ENCODING '{encoding}');""" return statement.format(element, fullname=fullname(element.element, compiler), path=os.path.abspath(element.csv.path), header=str(element.header).upper(), encoding=encoding).strip() try: import boto from odo.backends.aws import S3 from redshift_sqlalchemy.dialect import CopyCommand import sqlalchemy as sa except ImportError: pass else: @compiles(CopyFromCSV, 'redshift') def compile_from_csv_redshift(element, compiler, **kwargs): assert isinstance(element.csv, S3(CSV)) assert element.csv.path.startswith('s3://') cfg = boto.Config() aws_access_key_id = cfg.get('Credentials', 'aws_access_key_id') aws_secret_access_key = cfg.get('Credentials', 'aws_secret_access_key') options = dict(delimiter=element.delimiter, ignore_header=int(element.header), empty_as_null=True, blanks_as_null=False, compression=getattr(element, 'compression', '')) if getattr(element, 'schema_name', None) is None: # 'public' by default, this is a postgres convention schema_name = (element.element.schema or sa.inspect(element.bind).default_schema_name) cmd = CopyCommand(schema_name=schema_name, table_name=element.element.name, data_location=element.csv.path, access_key=aws_access_key_id, secret_key=aws_secret_access_key, options=options, format='CSV') return re.sub(r'\s+(;)', r'\1', re.sub(r'\s+', ' ', str(cmd))).strip() @append.register(sa.Table, CSV) def append_csv_to_sql_table(tbl, csv, **kwargs): dialect = tbl.bind.dialect.name # move things to a temporary S3 bucket if we're using redshift and we # aren't already in S3 if dialect == 'redshift' and not isinstance(csv, S3(CSV)): csv = convert(Temp(S3(CSV)), csv, **kwargs) elif dialect != 'redshift' and isinstance(csv, S3(CSV)): csv = convert(Temp(CSV), csv, has_header=csv.has_header, **kwargs) elif dialect == 'hive': from .ssh import SSH return append(tbl, convert(Temp(SSH(CSV)), csv, **kwargs), **kwargs) kwargs = merge(csv.dialect, kwargs) stmt = CopyFromCSV(tbl, csv, **kwargs) with tbl.bind.begin() as conn: conn.execute(stmt) return tbl
Python
0.676492
@@ -5108,17 +5108,16 @@ oding%7D') -; %22%22%22%0A
7f05631528a80adec1375b9e26364b0fa8fb05e0
fix getting game winner
backgammon/model/utils.py
backgammon/model/utils.py
# Copyright (c) 2015, Bartlomiej Puget <larhard@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the Bartlomiej Puget nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL BARTLOMIEJ PUGET BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import itertools as it import random from copy import copy from utils.math import signum def roll_dice(): return random.randint(1, 6) def players(): return ['w', 'b'] def player_modifier(player): return 1 if player == 'w' else -1 def player_from_number(count): modifier = signum(count) if modifier == 0: return None return 'w' if modifier == 1 else 'b' def jail_field(player): return 0 if player == 'w' else 25 def non_home_fields(player): return slice(1, 19) if player == 'w' else slice(7, 25) def goes_offboard(player, k): return k > 24 if player == 'w' else k < 1 def enemy(player): return 'b' if player == 'w' else 'w' def valid_distance(player): return range(0, 7) if player == 'w' else range(-6, 1) def board_range(): return range(1, 25) def board_slice(): return slice(1, 25) def get_winner(board): white_checkers = sum(k for k in board if k < 0) black_checkers = sum(k for k in board if k > 0) if white_checkers == 0: return 'w' if black_checkers == 0: return 'b' return None def verify_move(board, position, distance, player): distance = distance * player_modifier(player) new_position = position + distance if position not in board_range() \ and position != jail_field(player): return False if distance not in valid_distance(player): return False if position in board_range() \ and signum(board[position]) != player_modifier(player): return False if position == jail_field(player) \ and board[jail_field(player)] == 0: return False if board[jail_field(player)] and position != jail_field(player): return False if goes_offboard(player, new_position): for field in board[non_home_fields(player)]: if player_from_number(field) == player: return False if not goes_offboard(player, new_position) \ and board[new_position] * player_modifier(enemy(player)) > 1: return False return True def make_move(board, position, distance, player): if not verify_move(board, position, distance, player): return None new_position = position + distance * player_modifier(player) board = board.copy() checker = player_modifier(player) board[position] -= checker if new_position in board_range(): if signum(board[new_position] == player_modifier(enemy(player))): board[jail_field(enemy(player))] += board[new_position] board[new_position] = 0 board[new_position] += checker return board def is_any_legal_move(board, dice, player): if board[jail_field(player)]: for distance in dice: if verify_move(board, jail_field(player), distance, player): return True else: for position in board_range(): for distance in dice: if verify_move(board, position, distance, player): return True return False def player_fields(board, player): for i, field in enumerate(board): if player_from_number(player) == player: yield i def available_moves(board, dices, player, history=[]): yielded = False if not dices: yielded = True yield copy(history), copy(board) for dice in dices: new_dices = list(dices) new_dices.remove(dice) new_history = list(history) for position in range(0, 26): new_history.append((position, dice)) new_board = make_move(board, position, dice, player) if new_board is not None: for h, b in available_moves(new_board, new_dices, player, new_history): yielded = True yield h, b new_history.pop() if not yielded: yield copy(history), copy(board)
Python
0.000001
@@ -2494,17 +2494,17 @@ rd if k -%3C +%3E 0)%0A @@ -2542,25 +2542,25 @@ board if k -%3E +%3C 0)%0A%0A if
fa6db1cd5d58393f7abd2603979cb20f706f592e
fix sorting
base/views/api/api_strain.py
base/views/api/api_strain.py
from base.models2 import strain_m from base.application import app from base.utils.decorators import jsonify_request from sqlalchemy import or_ from flask import request from logzero import logger @app.route('/api/strain/query/<string:query>') @jsonify_request def search_strains(query): base_query = strain_m.query.filter(strain_m.isotype != None) query = query.upper() results = base_query.filter(or_(strain_m.isotype == query, strain_m.isotype.like(f"{query}%"), strain_m.strain == query, strain_m.strain.like(f"{query}%"), strain_m.previous_names.like(f"%{query}|%"), strain_m.previous_names.like(f"%,{query}|"), strain_m.previous_names.like(f"%{query}"), strain_m.previous_names == query)) results = list([x.to_json() for x in results]) return results @app.route('/api/strain/') @app.route('/api/strain/<string:strain_name>') @app.route('/api/strain/isotype/<string:isotype_name>') @jsonify_request def query_strains(strain_name=None, isotype_name=None, release=None, all_strain_names=False, resolve_isotype=False): """ Return the full strain database set strain_name - Returns data for only one strain isotype_name - Returns data for all strains of an isotype release - Filters results released prior to release data all_strain_names - Return list of all possible strain names (internal use). resolve_isotype - Use to search for strains and return their isotype """ base_query = strain_m.query if release: base_query = base_query.filter(strain_m.release <= release) if strain_name or resolve_isotype: results = base_query.filter(or_(strain_m.previous_names.like(f"%{strain_name}|%"), strain_m.previous_names.like(f"%,{strain_name}|"), strain_m.previous_names.like(f"%{strain_name}"), strain_m.previous_names == strain_name, strain_m.strain == strain_name)).first() elif isotype_name: results = base_query.filter(strain_m.isotype == isotype_name).all() else: results = base_query.all() if all_strain_names: previous_strain_names = sum([x.previous_names.split("|") for x in results if x.previous_names], []) results = [x.strain for x in results] + previous_strain_names if resolve_isotype: if results: # LSJ1/LSJ2 prev. N2; So N2 needs to be specific. if strain_name == 'N2': return 'N2' return results.isotype return results def get_strains(known_origin=False): """ Returns a list of strains; Represents all strains Args: known_origin: Returns only strains with a known origin list_only: Returns a list of isotypes (internal use) """ ref_strain_list = strain_m.query.filter(strain_m.reference_strain == True).all() ref_strain_list = {x.isotype: x.strain for x in ref_strain_list} result = strain_m.query if known_origin or 'origin' in request.path: result = result.filter(strain_m.latitude != None) result = result.all() for strain in result: strain.reference_strain = ref_strain_list[strain.isotype] logger.error(strain.reference_strain) return result @app.route('/api/isotype') @app.route('/api/isotype/origin') @jsonify_request def get_isotypes(known_origin=False, list_only=False): """ Returns a list of strains when reference_strain == True; Represents ONE strain per isotype. This is the reference strain. Args: known_origin: Returns only strains with a known origin list_only: Returns a list of isotypes (internal use) """ result = strain_m.query.filter(strain_m.reference_strain == True) \ .order_by(strain_m.reference_strain) if known_origin or 'origin' in request.path: result = result.filter(strain_m.latitude != None) result = result.all() if list_only: result = [x.isotype for x in result] return result
Python
0.000011
@@ -4156,32 +4156,23 @@ train_m. -reference_strain +isotype )%0A if
680d71646773737b4c41543df55d292b9f4f388a
add doc
banana/views/mixins.py
banana/views/mixins.py
from django.db import models from banana.db import check_database class MultiDbMixin(object): """ This mxin makes a Django class based views support multiple databases. It requires a db variable in your request. """ def get_queryset(self): self.db_name = self.kwargs.get('db', 'default') check_database(self.db_name) return super(MultiDbMixin, self).get_queryset().using(self.db_name) def get_context_data(self, **kwargs): context = super(MultiDbMixin, self).get_context_data(**kwargs) context['db_name'] = self.db_name return context class HybridTemplateMixin(object): def get_template_names(self): format = self.request.GET.get('format', 'html') if format == 'json': self.content_type = 'application/json' extension = format elif format == 'csv': self.content_type = 'text/csv' extension = format else: extension = 'html' if hasattr(self, 'object') and \ isinstance(self.object, models.Model) and \ hasattr(self.object, 'model'): opts = self.object.model._meta elif hasattr(self, 'model') and self.model is not None and \ issubclass(self.model, models.Model): opts = self.model._meta else: return [] return ["%s/%s%s.%s" % (opts.app_label, opts.object_name.lower(), self.template_name_suffix, extension)] def render_to_response(self, context, **response_kwargs): format = self.request.GET.get('format', 'html') if format == 'json': response_kwargs['content_type'] = 'application/json' elif format == 'csv': response_kwargs['content_type'] = 'text/csv' return super(HybridTemplateMixin, self).render_to_response(context, **response_kwargs) class SortListMixin(object): """ View mixin which provides sorting for ListView. """ default_order = 'id' def get_order(self): return self.request.GET.get('order', self.default_order) def get_queryset(self): order = self.get_order() # TODO: this does not work with annotated fields #if order not in self.model._meta.get_all_field_names(): # raise Http404 qs = super(SortListMixin, self).get_queryset() return qs.order_by(order) def get_context_data(self, *args, **kwargs): context = super(SortListMixin, self).get_context_data(*args, **kwargs) order = self.get_order() context.update({ 'order': order, }) return context class DatasetMixin(object): """ mixin view that checks for a dataset request variable and adds it to the context """ def get_dataset_id(self): return self.request.GET.get("dataset", None) def get_context_data(self, *args, **kwargs): context = super(DatasetMixin, self).get_context_data(*args, **kwargs) context['dataset'] = self.get_dataset_id() return context
Python
0
@@ -641,16 +641,155 @@ bject):%0A + %22%22%22%0A Checks the request for a format variable. If it is json or csv, will%0A set the content_type and template accordingly.%0A %22%22%22 %0A def @@ -2065,24 +2065,45 @@ mplateMixin, +%0A self).rende