commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
35c3fa719b57186a63bdf1110e76fc78b620c818
order presentations
pyconca2017/pycon_schedule/models.py
pyconca2017/pycon_schedule/models.py
from datetime import datetime from django.db import models """ Presentation """ class Speaker(models.Model): """ Who """ email = models.EmailField(unique=True) full_name = models.CharField(max_length=255) bio = models.TextField(default='') twitter_username = models.CharField(max_length=255, null=True, blank=True) company_name = models.CharField(max_length=255, null=True, blank=True) url = models.URLField(max_length=2048, null=True, blank=True) shirt_size = models.CharField(max_length=255) location = models.CharField(max_length=255, null=True, blank=True) is_keynote = models.BooleanField(default=False) def __str__(self): return self.full_name @property def twitter_url(self): if not self.twitter_username: return None return 'https://twitter.com/{}'.format(self.twitter_username) class Presentation(models.Model): """ What """ papercall_id = models.IntegerField(null=True, blank=True, unique=True) title = models.CharField(max_length=255) description = models.TextField(default='') notes = models.TextField(default='') abstract = models.TextField(default='') audience_level = models.CharField(max_length=255) presentation_format = models.CharField(max_length=255) speaker = models.ForeignKey(Speaker) def __str__(self): return self.title """ Schedule """ class Schedule(models.Model): """ When (what day) """ day = models.DateField(unique=True) def __str__(self): return self.day.strftime('%b %d') class Location(models.Model): """ Where """ name = models.CharField(max_length=255) order = models.PositiveIntegerField(default=0) capacity = models.PositiveIntegerField(default=0) notes = models.TextField(default='', blank=True) def __str__(self): return self.name class ScheduleSlot(models.Model): """ When (what time) """ schedule = models.ForeignKey(Schedule, related_name='slots') start_time = models.TimeField() end_time = models.TimeField() def __str__(self): return '{} - {} ({})'.format(self.start_time, self.end_time, self.schedule) class Meta: unique_together = (('schedule', 'start_time', 'end_time'),) ordering = ('schedule', 'start_time', 'end_time') @property def duration(self): return datetime.combine(self.schedule.day, self.end_time) - datetime.combine(self.schedule.day, self.start_time) @property def start_events(self): return SlotEvent.objects.filter(slot__schedule=self.schedule, slot__start_time=self.start_time) class SlotEvent(models.Model): """ Glue what with when and where """ slot = models.ForeignKey(ScheduleSlot, related_name='events') location = models.ForeignKey(Location, null=True, blank=True) content = models.TextField(blank=True) presentation = models.OneToOneField(Presentation, null=True, blank=True) def __str__(self): return self.title class Meta: unique_together = ( ('slot', 'location'), ) @property def title(self): if self.presentation: return self.presentation.title return self.content @property def is_presentation(self): return bool(self.presentation) @property def duration(self): return self.slot.duration @property def duration_str(self): return ':'.join(str(self.duration).split(':')[:2]) @property def presenter(self): if self.presentation: return self.presentation.speaker
Python
0.000001
@@ -1388,16 +1388,63 @@ .title%0A%0A + class Meta:%0A ordering = ('title',)%0A%0A %0A%22%22%22 Sch
6d4038653bf237a285f99e68288454ce9ebdfc92
Add allowed hosts
cinderella/cinderella/settings/production.py
cinderella/cinderella/settings/production.py
from .base import * DEBUG = False ALLOWED_HOSTS = ['cinderella.io'] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.environ['DB_NAME'], 'USER': os.environ['DB_USER'], 'PASSWORD': os.environ['DB_PASSWORD'], 'HOST': '127.0.0.1', 'PORT': '5432', } }
Python
0
@@ -46,16 +46,34 @@ OSTS = %5B +'188.226.249.33', 'cindere @@ -80,10 +80,10 @@ lla. +l i -o '%5D%0A%0A
c0787c468e1b71d7e9db93b5f5990ae9bb506d82
FIX other two sample data load for Windows
pystruct/datasets/dataset_loaders.py
pystruct/datasets/dataset_loaders.py
import cPickle from os.path import dirname from os.path import join import numpy as np def load_letters(): """Load the OCR letters dataset. This is a chain classification task. Each example consists of a word, segmented into letters. The first letter of each word is ommited from the data, as it was a capital letter (in contrast to all other letters). """ module_path = dirname(__file__) data_file = open(join(module_path, 'letters.pickle'),'rb') data = cPickle.load(data_file) # we add an easy to use image representation: data['images'] = [np.hstack([l.reshape(16, 8) for l in word]) for word in data['data']] return data def load_scene(): module_path = dirname(__file__) data_file = open(join(module_path, 'scene.pickle')) return cPickle.load(data_file) def load_snakes(): module_path = dirname(__file__) data_file = open(join(module_path, 'snakes.pickle')) return cPickle.load(data_file)
Python
0
@@ -794,32 +794,37 @@ 'scene.pickle') +,'rb' )%0A return cPi @@ -956,16 +956,21 @@ pickle') +,'rb' )%0A re
19a9465424400ca1a194e5516c44ca77a0f5591a
add Alpha Vantage API key to config file
moneywatch/moneywatchconfig.py
moneywatch/moneywatchconfig.py
#!/usr/bin/python #=============================================================================== # Copyright (c) 2016, James Ottinger. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # MoneyWatch - https://github.com/jamesottinger/moneywatch #=======================================================+======================= db_creds = { 'host':'xxx.xxx.xxx.xxx', 'user':'youruser', 'passwd':'yourpassword', 'db':'thedbname' } dirlogs = '/log/' direrrors = '/log/errors/' weather = { 'latitude':'39.7344443', 'longitude':'-75.0072787', 'title':'Chipotle - Sicklerville, NJ' } uilinks = [ [ ('Google Drive', 'https://drive.google.com/#my-drive') ], [ ('Capital One 360', 'https://home.capitalone360.com'), ('Discover Bank', 'https://www.discover.com/online-banking/') ], [ ('Fidelity', 'https://www.fidelity.com'), ('Vanguard', 'https://www.vanguard.com/') ] ]
Python
0
@@ -651,16 +651,45 @@ e, NJ' %7D +%0Aalphavantage_apikey = %22----%22 %0A%0Auilink
1b1578cbfbe0e03f1341017f20027d164c61a6ce
bump version
pb_job_manager/__init__.py
pb_job_manager/__init__.py
# coding: utf-8 from __future__ import print_function from __future__ import unicode_literals __version__ = "0.2.1" import os import sys import time import binascii import collections import multiprocessing as mp PY2 = sys.version_info[0] == 2 if PY2: iteritems = lambda d: d.iteritems() else: iteritems = lambda d: iter(d.items()) DEFAULT_MAX_PROCS = mp.cpu_count() DEFAULT_POLL_INTERVAL = 0.001 MAX_POLL_INTERVAL = 3 class PBJobManager(object): def __init__( self, max_procs=DEFAULT_MAX_PROCS, verbose=False, job_timeout=None): self._verbose = verbose self._job_timeout = job_timeout self.max_procs = max(1, int(max_procs)) self.clear() self._poll_interval = DEFAULT_POLL_INTERVAL # plumbum is imported here so we can to run # setup.py without any dependencies import plumbum self.pb = plumbum def clear(self): self._jobs = collections.OrderedDict() # Since job_ids are handed out linearly by the manager, # we can safely assume that there are no circular dependencies self._deps = {} self._start_times = {} self._futures = {} self._done = collections.OrderedDict() self._failed = {} def mk_job_id(self): return binascii.hexlify(os.urandom(8)).decode('ascii') def add_job(self, job, dep_job_id=None): # list implies a serial dependency among the elements if isinstance(job, list): subjob_iter = iter(job) job_id = self.add_job(next(subjob_iter)) for sub_job in subjob_iter: # job_id of the last job is returned job_id = self.add_job(sub_job, dep_job_id=job_id) else: job_id = self.mk_job_id() if dep_job_id: self._deps[job_id] = dep_job_id self._jobs[job_id] = job return job_id def _get_next_leaf_job(self): for job_id, job in iteritems(self._jobs): dep_job_id = self._deps.get(job_id) if dep_job_id and dep_job_id not in self._done: # wait for dep job to finish continue if isinstance(job, self.pb.commands.base.BaseCommand): return job_id def _update_branch_jobs(self): for job_id, job in iteritems(self._jobs): dep_job_id = self._deps.get(job_id) if dep_job_id and dep_job_id not in self._done: # wait for dep job to finish continue # TODO: see if this breaks with remote commands if not isinstance(job, self.pb.commands.base.BaseCommand): del self._jobs[job_id] self.add_job(job()) return True def _get_next_job(self): self._update_branch_jobs() return self._get_next_leaf_job() def _postproc_done_futures(self): for job_id, job_future in iteritems(self._futures): exit_code = job_future.proc.poll() timeout_exceeded = ( self._job_timeout and (time.time() - self._start_times[job_id]) >= self._job_timeout ) job_is_running = exit_code is None and not timeout_exceeded if job_is_running: continue if self._verbose: if timeout_exceeded: print("aborting", job_id) else: print("finished", job_id) # Yeah, yeah, dangerous modification during iteration, but # we're finished iterating by now and exit immediatly, so :P del self._futures[job_id] self._done[job_id] = job_future if timeout_exceeded: try: job_future.proc.kill() except OSError: # job may have finshed after all pass # poll may say we are done, but we still need to call wait() # so that file handles of popen get closed try: job_future.wait() except (OSError, self.pb.ProcessExecutionError) as err: self._failed[job_id] = (job_future, err) return def _increase_poll_interval(self): self._poll_interval = min( 2 * self._poll_interval, MAX_POLL_INTERVAL ) def _wait_on_running(self, max_procs): max_procs = max(max_procs, 0) self._poll_interval = DEFAULT_POLL_INTERVAL while True: # first check if any jobs are done self._postproc_done_futures() next_job_id = self._get_next_job() if self._jobs and next_job_id is None: # there are more jobs, # but they have unfinished dependencies time.sleep(self._poll_interval) self._increase_poll_interval() continue if len(self._futures) <= max_procs: return time.sleep(self._poll_interval) self._increase_poll_interval() def dispatch(self): self._wait_on_running(self.max_procs) job_id = self._get_next_job() if job_id is None: return if self._verbose: print("starting", job_id) job = self._jobs.pop(job_id) job_future = job & self.pb.BG self._start_times[job_id] = time.time() self._futures[job_id] = job_future return job_id def wait(self): self._wait_on_running(max_procs=0) def run(self): while len(self._jobs) > 0: self.dispatch() self.wait() def run_and_iter(self): yielded_job_ids = set() while True: if len(self._jobs) > 0: self.dispatch() self._postproc_done_futures() if len(self._futures) >= self.max_procs: # wait until one is finished self._wait_on_running(self.max_procs - 1) done_job_ids = set(self._done) unyielded_job_ids = done_job_ids.difference(yielded_job_ids) for job_id, job in iteritems(self._done): if job_id in unyielded_job_ids: yield self._done[job_id] yielded_job_ids.update(unyielded_job_ids) unfinished_jobs = len(self._jobs) + len(self._futures) if unfinished_jobs == 0 and len(unyielded_job_ids) == 0: return def __getitem__(self, job_id): return self._done[job_id] def __iter__(self): """Iterates over the futures as they are finished. Note: Ordering is not guaranteed. """ return self.run_and_iter()
Python
0
@@ -107,17 +107,17 @@ = %220.2. -1 +2 %22%0A%0Aimpor @@ -2848,17 +2848,16 @@ (self):%0A -%0A
87a14ba6b2a2fff0ab80ff204119795a8e4173f8
Update mongodb_atlas.py
mongodb_atlas/mongodb_atlas.py
mongodb_atlas/mongodb_atlas.py
#!/usr/bin/python import json import urllib import urllib.request as urlconnection from urllib.error import URLError, HTTPError from urllib.request import ProxyHandler plugin_version = 1 heartbeat_required = "true" resultjson={} metrics_units={ "disksize":"GB" } public_key = "" private_key = "" group_id= "" def metrics_collector(): resultjson={} try: url = "https://cloud.mongodb.com/api/atlas/v1.0/groups/"+group_id+"/clusters/Cluster0?pretty=true" if public_key and private_key: password_mgr = urlconnection.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None,url, public_key ,private_key) auth_handler = urlconnection.HTTPDigestAuthHandler(password_mgr) if auth_handler is not None : opener = urlconnection.build_opener(auth_handler) urlconnection.install_opener(opener) data = urlconnection.urlopen(url).read() data=json.loads(data) new_data = {} new_data["clustertype"]=data["clusterType"] new_data["disksize"]=data["diskSizeGB"] new_data["mongodb.majorversion"]=data["mongoDBMajorVersion"] new_data["mongodb.version"]=data["mongoDBVersion"] new_data["mongo.uri.updated"]=data["mongoURIUpdated"] new_data["name"]=data["name"] new_data["numshards"]=data["numShards"] new_data["pitenabled"]=data["pitEnabled"] new_data["provider.backup.enabled"]=data["providerBackupEnabled"] new_data["providername"]=data["providerSettings"]["providerName"] new_data["maxinstance.size"]=data["providerSettings"]["autoScaling"]["compute"]["maxInstanceSize"] new_data["mininstance.size"]=data["providerSettings"]["autoScaling"]["compute"]["minInstanceSize"] new_data["replication.factor"]=data["replicationFactor"] new_data["analytics.nodes"]=data["replicationSpec"]["ASIA_SOUTH_1"]["analyticsNodes"] new_data["electable.nodes"]=data["replicationSpec"]["ASIA_SOUTH_1"]["electableNodes"] new_data["priority"]=data["replicationSpec"]["ASIA_SOUTH_1"]["priority"] new_data["readonly.nodes"]=data["replicationSpec"]["ASIA_SOUTH_1"]["readOnlyNodes"] new_data["zonename"]=data["replicationSpecs"][0]["zoneName"] new_data["rootcert.type"]=data["rootCertType"] new_data["srvaddress"]=data["srvAddress"] new_data["statename"]=data["stateName"] return new_data except Exception as e: resultjson["msg"]=str(e) resultjson["status"]=0 return resultjson if __name__ == '__main__': import argparse parser=argparse.ArgumentParser() parser.add_argument('--group_id',help="group ID of mongodb_atlas_cluster",type=str) parser.add_argument('--public_key',help="public key of mongodb_atlas",type=str) parser.add_argument('--private_key',help="Private key for mongodb_atlas",type=str) args=parser.parse_args() if args.group_id: group_id=args.group_id if args.public_key: public_key=args.public_key if args.private_key: private_key=args.private_key resultjson=metrics_collector() resultjson['plugin_version'] = plugin_version resultjson['heartbeat_required'] = heartbeat_required resultjson['units'] = metrics_units print(json.dumps(resultjson, indent=4, sort_keys=True))
Python
0.000001
@@ -1143,17 +1143,17 @@ %22mongodb -. +_ majorver @@ -1212,17 +1212,17 @@ %22mongodb -. +_ version%22 @@ -1273,13 +1273,13 @@ ongo -.uri. +_uri_ upda @@ -1474,16 +1474,16 @@ ider -. +_ backup -. +_ enab @@ -1613,33 +1613,33 @@ ata%5B%22maxinstance -. +_ size%22%5D=data%5B%22pro @@ -1728,17 +1728,17 @@ instance -. +_ size%22%5D=d @@ -1835,17 +1835,17 @@ lication -. +_ factor%22%5D @@ -1898,17 +1898,17 @@ nalytics -. +_ nodes%22%5D= @@ -1992,17 +1992,17 @@ lectable -. +- nodes%22%5D= @@ -2166,17 +2166,17 @@ readonly -. +_ nodes%22%5D= @@ -2327,17 +2327,17 @@ rootcert -. +_ type%22%5D=d
04c67e99af363cd8eea4414f59a9294a84faaa6d
Fix test layout
tests/api/test_views.py
tests/api/test_views.py
# -*- coding: utf-8 -*- import json from django.test import TestCase from django.utils.encoding import smart_str from bakery.auth.models import BakeryUser from bakery.cookies.models import Cookie from bakery.utils.test import read import httpretty class TestApi(TestCase): def test_cookies_list_empty(self): resp = self.client.get('/api/v1/cookies/list/') self.assertEqual(resp.content, b'[]') def test_cookies_list(self): BakeryUser.objects.create_user('user') user = BakeryUser.objects.get(username='user') cookie = Cookie.objects.create( name='test', owner_name='test', url='http://example.com/unique', owner=user, backend='github' ) resp = self.client.get('/api/v1/cookies/list/') data = json.loads(smart_str(resp.content)) self.assertEqual( data, [{ "url": "http://example.com/unique", "description": "", "name": "test", "last_change": None }] ) @httpretty.activate def test_cookies_list(self): httpretty.register_uri(httpretty.GET, 'https://api.github.com/repos/muffins-on-dope/bakery', body=read(__file__, '..', '_replay_data', 'bakery-repository'), content_type='application/json; charset=utf-8' ) self.client.post('/api/v1/cookies/new/', json.dumps({'url': 'git@github.com:muffins-on-dope/bakery.git'}), content_type='application/json', ) self.assertEqual(Cookie.objects.count(), 1)
Python
0.000001
@@ -16,16 +16,34 @@ tf-8 -*- +%0A%0Aimport httpretty %0Aimport @@ -47,16 +47,17 @@ rt json%0A +%0A from dja @@ -167,16 +167,17 @@ eryUser%0A +%0A from bak @@ -244,33 +244,16 @@ rt read%0A -import httpretty%0A %0A%0Aclass @@ -560,17 +560,8 @@ - cookie = Coo @@ -1127,38 +1127,41 @@ def test_cookie -s_l +_reg ist +er (self):%0A @@ -1400,16 +1400,16 @@ =utf-8'%0A + @@ -1411,17 +1411,16 @@ )%0A%0A -%0A
376ba2cf694dec62fc784ee2346b5cff2c7a7d93
change debug level
python/erase-pdf-wartermark-image.py
python/erase-pdf-wartermark-image.py
#!/usr/bin/env python3 #ref: https://github.com/pymupdf/PyMuPDF-Utilities/blob/master/image-replacement/remover.py #required pkgs on Fedora: mupdf python3-PyMuPDF import fitz #python3 pymupdf module import io,os,sys from PIL import Image usage = f"Usage: {sys.argv[0]} <pdf-file-with-wartermark-image> [-h] [-d|-debug] [-n<page-number>] [-c<cmp-index>]" path = None base_page_idx = 0 cmpidx = -1 debug = 0 for arg in sys.argv[1:]: if (arg[0] != '-'): if (path == None): path = arg else: if (arg[:2] == "-h"): print(usage); exit(0) elif (arg[:2] == "-d"): debug += 1 elif (arg[:2] == "-n"): base_page_idx = int(arg[2:]) elif (arg[:2] == "-c"): cmpidx = int(arg[2:]) if (path == None): print(usage) exit(1) if (not os.path.isfile(path)): print(f"[ERROR] file {path} not exist or is not a file.") exit(1) pdfname = path.split(".")[0] def imginfo_cmp(img1, img2): if (img1[2] == img2[2] and img1[3] == img2[3] and img1[4] == img2[4] and img1[5] == img2[5] and img1[8] == img2[8]): return True else: return False def imgfobj_cmp(imgf_obj1, imgf_obj2): if (len(imgf_obj1['image']) == len(imgf_obj2['image'])): return True else: return False def percent_of(num_a, num_b): return round(((num_a / num_b) * 100.0), 2) from collections import namedtuple WMImage = namedtuple('WMImage', ['info', 'imgf', 'pct']) pdf = fitz.open(path) npages = len(pdf) min_occurrence = 99.90 base_page = pdf[base_page_idx] base_imgs = base_page.get_images() if (debug): print(f"[DEBUG] base_page_imgs {base_imgs}") #scan and detect the wartermark image. here we assume: #if an image appears in all pages, intend it's wartermark image #Note: if this assumption does not hold, more code & options need #to be added to allow the user to specify the 'base_page_idx' and #frequency of occurrence of warter-mark in pages 'min_occurrence' print(f"[INFO] scanning and detecting wartermark image in pdf {path} ...") wm_images = [] #yes, I found there are more than 1 image match, don't know why for baseimg in base_imgs: baseimg_obj = pdf.extract_image(baseimg[0]) nimg = 0 for index in range(npages): page = pdf[index] for img in page.get_images(): if (imginfo_cmp(img, baseimg)): imgf_obj = pdf.extract_image(img[0]) if (imgfobj_cmp(imgf_obj, baseimg_obj)): nimg += 1 pct = percent_of(nimg, npages) if (pct >= min_occurrence): wm_images.append(WMImage(baseimg, baseimg_obj, pct)) if (len(wm_images) == 0): print(f"[WARN] did not find wartermark image in {path}") exit(1) nwmimg = len(wm_images) if (cmpidx < 0): cmpidx += nwmimg print(f"[INFO] detected {nwmimg} wartermark images:") for i in range(nwmimg): info, imgf, pct = wm_images[i].info, wm_images[i].imgf, wm_images[i].pct print(f"[INFO] wartermark image {i}:\n[INFO] |-> info: {info}\n[INFO] |-> data-size: {len(imgf['image'])}\n[INFO] `-> occurrence: {pct}") #generate wartermark image file[s] for debug if (debug): for i in range(nwmimg): info, imgf = wm_images[i].info, wm_images[i].imgf wmimg_bytes = imgf["image"] #get wm-image data/bytes wmimg_ext = imgf["ext"] #get wm-image extension/type wmimgf_path = f"{path.replace('.pdf','')}-wartermark-image{i}.{wmimg_ext}" print(f"[INFO] generate warter makr image file: {wmimgf_path} ...") wmimgf = Image.open(io.BytesIO(wmimg_bytes)) wmimgf.save(wmimgf_path) print(f"[INFO] erase wartermark image {cmpidx} from pages ...") # make a small 100% transparent pixmap (of just any dimension) pix = fitz.Pixmap(fitz.csGRAY, (0, 0, 1, 1), 1) pix.clear_with() # clear all samples bytes to 0x00 wm_image, wm_imagef_obj = wm_images[cmpidx].info, wm_images[cmpidx].imgf for index in range(npages): page = pdf[index] imgs = page.get_images() if (debug and index == base_page_idx): print(f"[DEBUG] base_page_imgs {imgs}") if (debug > 1): print(f"page{index}: {imgs}") page.clean_contents() # unify page's /Contents into one wmimg_xrefs = [] for img in imgs: if (imginfo_cmp(img, wm_image)): imgf_obj = pdf.extract_image(img[0]) if (imgfobj_cmp(imgf_obj, wm_imagef_obj)): wmimg_xrefs.append(img[0]) # insert new image just anywhere new_xref = page.insert_image(page.rect, pixmap=pix) # copy over definition and stream of new image for img_xref in wmimg_xrefs: pdf.xref_copy(new_xref, img_xref) # there now is a second /Contents object, showing new image cont_xrefs = page.get_contents() # make sure that new /Contents(cont_xrefs[1]) is forgotten page.set_contents(cont_xrefs[0]) page.clean_contents() # unify page's /Contents into one again new_path = f"{path.replace('.pdf','')}-no-wartermark.pdf" print(f"[INFO] generate new pdf: {new_path} ...") pdf.ez_save(new_path, garbage=4) import subprocess subprocess.run(["ls", "-lh", new_path, path])
Python
0
@@ -1608,24 +1608,28 @@ ()%0Aif (debug + %3E 1 ):%0A print @@ -3995,16 +3995,20 @@ (debug +%3E 1 and inde @@ -4088,25 +4088,25 @@ if (debug %3E -1 +2 ):%0A p
6940035d7827a6a2aa719e537f122c07a91bd7c1
support werkzeug==1.0.0
tests/apps/multi/app.py
tests/apps/multi/app.py
import os from flask import Flask, render_template from werkzeug.wsgi import SharedDataMiddleware app = Flask(__name__) @app.route('/') def hello_world(): return render_template('index.html') app.wsgi_app = SharedDataMiddleware(app.wsgi_app, { '/': os.path.join(os.path.dirname(__file__), 'static') }) app.wsgi_app = SharedDataMiddleware(app.wsgi_app, { '/': os.path.join(os.path.dirname(__file__), 'static/.tmp') }) if __name__ == '__main__': app.run(host='0.0.0.0', debug=True)
Python
0.000028
@@ -44,16 +44,25 @@ emplate%0A +try:%0A from wer @@ -99,16 +99,105 @@ ddleware +%0Aexcept ImportError:%0A from werkzeug.middleware.shared_data import SharedDataMiddleware %0A%0Aapp =
d45bbe102efec23656d1329b5c3e6a785c69acee
switch base test cases of pickle backend from tests.backend.ini to tests.backend.common
tests/backend/pickle.py
tests/backend/pickle.py
# # Copyright (C) 2017 Satoru SATOH <ssato @ redhat.com> # License: MIT # # pylint: disable=missing-docstring from __future__ import absolute_import try: import anyconfig.backend.pickle as TT except ImportError: TT = None import tests.backend.ini from tests.common import dicts_equal CNF_0 = dict(a=0, b="bbb", c=5, sect0=dict(d=["x", "y", "z"])) class Test10(tests.backend.ini.Test10): cnf = CNF_0 cnf_s = TT.pickle.dumps(cnf) load_options = dump_options = dict(protocol=TT.pickle.HIGHEST_PROTOCOL) is_order_kept = False def setUp(self): self.psr = TT.Parser() class Test20(tests.backend.ini.Test20): psr_cls = TT.Parser cnf = CNF_0 cnf_s = TT.pickle.dumps(cnf, protocol=TT.pickle.HIGHEST_PROTOCOL) cnf_fn = "conf0.pkl" def test_22_dump__w_special_option(self): self.psr.dump(self.cnf, self.cpath, protocol=TT.pickle.HIGHEST_PROTOCOL) cnf = self.psr.load(self.cpath) self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf)) # vim:sw=4:ts=4:et:
Python
0.000001
@@ -50,16 +50,51 @@ at.com%3E%0A +# Copyright (C) 2017 Red Hat, Inc.%0A # Licens @@ -137,16 +137,52 @@ ocstring +,invalid-name,too-few-public-methods %0Afrom __ @@ -218,17 +218,8 @@ rt%0A%0A -try:%0A impo @@ -256,456 +256,84 @@ TT%0A -except ImportError:%0A TT = None%0A%0Aimport tests.backend.ini%0Afrom tests.common import dicts_equal%0A%0A%0ACNF_0 = dict(a=0, b=%22bbb%22, c=5, sect0=dict(d=%5B%22x%22, %22y%22, %22z%22%5D))%0A%0A%0Aclass Test10(tests.backend.ini.Test10):%0A%0A cnf = CNF_0%0A cnf_s = TT.pickle.dumps(cnf)%0A load_options = dump_options = dict(protocol=TT.pickle.HIGHEST_PROTOCOL)%0A is_order_kept = False%0A%0A def setUp(self):%0A self.psr = TT.Parser()%0A%0A%0Aclass Test20(tests.backend.ini.Test20 +import tests.backend.common as TBC%0A%0A%0Aclass HasParserTrait(TBC.HasParserTrait ):%0A%0A @@ -343,12 +343,8 @@ psr -_cls = T @@ -351,16 +351,18 @@ T.Parser +() %0A cnf @@ -368,13 +368,17 @@ f = +TBC. CNF_ -0 +1 %0A @@ -409,323 +409,218 @@ (cnf -, protocol=TT.pickle.HIGHEST_PROTOCOL)%0A cnf_fn = %22conf0.pkl%22%0A%0A def t +)%0A%0A%0Aclass Test_10(TBC.T est_ -22 +10 _dump -__w_special_option(self):%0A self.psr.dump(self.cnf, self.cpath,%0A protocol=TT.pickle.HIGHEST_PROTOCOL)%0A cnf = self.psr.load(self.cpath)%0A self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf)) +s_and_loads, HasParserTrait):%0A%0A load_options = dump_options = dict(protocol=TT.pickle.HIGHEST_PROTOCOL)%0A%0A%0Aclass Test_20(TBC.Test_20_dump_and_load, HasParserTrait):%0A%0A pass %0A%0A#
779e74593b40f0e5e5c50c684dd250f771918b77
Fix tests
pendulum/locales/locale.py
pendulum/locales/locale.py
# -*- coding: utf-8 -*- import os import re from typing import Union, Optional, Any from importlib import import_module from pendulum.utils._compat import basestring from pendulum.utils._compat import decode class Locale: """ Represent a specific locale. """ _cache = {} def __init__(self, locale, data): # type: (str, Any) -> None self._locale = locale self._data = data self._key_cache = {} @classmethod def load(cls, locale): # type: (Union[str, Locale]) -> Locale if isinstance(locale, Locale): return locale locale = cls.normalize_locale(locale) if locale in cls._cache: return cls._cache[locale] # Checking locale existence actual_locale = locale locale_path = os.path.join(os.path.dirname(__file__), actual_locale) while not os.path.exists(locale_path): if actual_locale == locale: raise ValueError("Locale [{}] does not exist.".format(locale)) actual_locale = actual_locale.split("_")[0] m = import_module("pendulum.locales.{}.locale".format(actual_locale)) cls._cache[locale] = cls(locale, m.locale) return cls._cache[locale] @classmethod def normalize_locale(cls, locale): # type: (str) -> str m = re.match("([a-z]{2})[-_]([a-z]{2})", locale, re.I) if m: return "{}_{}".format(m.group(1).lower(), m.group(2).lower()) else: return locale.lower() def get(self, key, default=None): # type: (str, Optional[Any]) -> Any if key in self._key_cache: return self._key_cache[key] parts = key.split(".") try: result = self._data[parts[0]] for part in parts[1:]: result = result[part] except KeyError: result = default if isinstance(result, basestring): result = decode(result) self._key_cache[key] = result return self._key_cache[key] def translation(self, key): # type: (str) -> Any return self.get("translations.{}".format(key)) def plural(self, number): # type: (int) -> str return decode(self._data["plural"](number)) def ordinal(self, number): # type: (int) -> str return decode(self._data["ordinal"](number)) def ordinalize(self, number): # type: (int) -> str ordinal = self.get("custom.ordinal.{}".format(self.ordinal(number))) if not ordinal: return decode("{}".format(number)) return decode("{}{}".format(number, ordinal)) def match_translation(self, key, value): translations = self.translation(key) if value not in translations.values(): return None return {v: k for k, v in translations.items()}[value] def __repr__(self): return "{}('{}')".format(self.__class__.__name__, self._locale)
Python
0.000003
@@ -16,16 +16,56 @@ tf-8 -*- +%0Afrom __future__ import unicode_literals %0A%0Aimport
5ee67f15bff1111a9ae40bb8972e4bca4987102b
fix code style #9660
python/paddle/dataset/uci_housing.py
python/paddle/dataset/uci_housing.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UCI Housing dataset. This module will download dataset from https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and parse training set and test set into paddle reader creators. """ import os import numpy as np import tempfile import tarfile import os import paddle.dataset.common __all__ = ['train', 'test'] URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' MD5 = 'd4accdce7a25600298819f8e28e8d593' feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'convert' ] UCI_TRAIN_DATA = None UCI_TEST_DATA = None FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar' FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b' def feature_range(maximums, minimums): import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig, ax = plt.subplots() feature_num = len(maximums) ax.bar(range(feature_num), maximums - minimums, color='r', align='center') ax.set_title('feature scale') plt.xticks(range(feature_num), feature_names) plt.xlim([-1, feature_num]) fig.set_figheight(6) fig.set_figwidth(10) if not os.path.exists('./image'): os.makedirs('./image') fig.savefig('image/ranges.png', dpi=48) plt.close(fig) def load_data(filename, feature_num=14, ratio=0.8): global UCI_TRAIN_DATA, UCI_TEST_DATA if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None: return data = np.fromfile(filename, sep=' ') data = data.reshape(data.shape[0] / feature_num, feature_num) maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum( axis=0) / data.shape[0] feature_range(maximums[:-1], minimums[:-1]) for i in xrange(feature_num - 1): data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) offset = int(data.shape[0] * ratio) UCI_TRAIN_DATA = data[:offset] UCI_TEST_DATA = data[offset:] def train(): """ UCI_HOUSING training set creator. It returns a reader creator, each sample in the reader is features after normalization and price number. :return: Training reader creator :rtype: callable """ global UCI_TRAIN_DATA load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) def reader(): for d in UCI_TRAIN_DATA: yield d[:-1], d[-1:] return reader def test(): """ UCI_HOUSING test set creator. It returns a reader creator, each sample in the reader is features after normalization and price number. :return: Test reader creator :rtype: callable """ global UCI_TEST_DATA load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) def reader(): for d in UCI_TEST_DATA: yield d[:-1], d[-1:] return reader def fluid_model(): parameter_tar = paddle.dataset.common.download(FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar') tar = tarfile.TarFile(parameter_tar, mode='r') dirpath = tempfile.mkdtemp() tar.extractall(path=dirpath) return dirpath def predict_reader(): """ It returns just one tuple data to do inference. :return: one tuple data :rtype: tuple """ global UCI_TEST_DATA load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) return (UCI_TEST_DATA[0][:-1],) def fetch(): paddle.dataset.common.download(URL, 'uci_housing', MD5) def convert(path): """ Converts dataset to recordio format """ paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train") paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
Python
0.000001
@@ -4059,16 +4059,17 @@ :-1%5D,)%0A%0A +%0A def fetc
f0ef4f5e269d7f2d7fd347e8f458c1c9ce1ffb34
Fix bug in redis hook
mqueue/hooks/redis/__init__.py
mqueue/hooks/redis/__init__.py
import redis import time from mqueue.conf import DOMAIN from mqueue.hooks.redis import serializer from mqueue.conf import HOOKS conf = HOOKS["redis"] R = redis.StrictRedis(host=conf["host"], port=conf["port"], db=conf["db"]) event_num = int(time.time()) def save(event, conf): name = DOMAIN+"_event"+str(event_num) event.request = event.request.replace("\n", "//") data = serializer.Pack(event) R.set(name, data)
Python
0
@@ -247,17 +247,17 @@ .time()) - +%0A %0A%0Adef sa @@ -273,16 +273,50 @@ conf):%0A + global event_num%0A global R%0A name @@ -328,17 +328,19 @@ MAIN -+ + + %22_event%22 +str @@ -335,17 +335,19 @@ %22_event%22 -+ + + str(even @@ -440,18 +440,16 @@ k(event) - %0A R.s @@ -462,8 +462,28 @@ e, data) +%0A event_num += 1%0A
54035774d3b9aece86e68f047e9ff4a270d344cb
add mountain climbing emoji #2001
c2corg_ui/format/emoji_databases/c2c_activities.py
c2corg_ui/format/emoji_databases/c2c_activities.py
SVG_CDN = "/static/img/documents/activities/" emoji = { ":rock_climbing:": { "category": "activitiy", "name": "rock climbing", "svg_name": "rock_climbing", "unicode": "1f9d7", }, ":skitouring:": { "category": "activitiy", "name": "ski touring", "svg_name": "skitouring", "unicode": "26f7" }, ":hiking:": { "category": "activitiy", "name": "hiking", "svg_name": "hiking", }, ":ice_climbing:": { "category": "activitiy", "name": "ice climbing", "svg_name": "ice_climbing", }, ":mountain_biking:": { "category": "activitiy", "name": "mountain biking", "svg_name": "mountain_biking", }, ":paragliding:": { "category": "activitiy", "name": "paragliding", "svg_name": "paragliding", }, ":slacklining:": { "category": "activitiy", "name": "slacklining", "svg_name": "slacklining", }, ":snow_ice_mixed:": { "category": "activitiy", "name": "snow ice mixed", "svg_name": "snow_ice_mixed", }, ":snowshoeing:": { "category": "activitiy", "name": "snowshoeing", "svg_name": "snowshoeing", }, ":via_ferrata:": { "category": "activitiy", "name": "via ferrata", "svg_name": "via_ferrata", }, } aliases = {}
Python
0.999999
@@ -1407,16 +1407,162 @@ %0A %7D,%0A + %22:mountain_climbing:%22: %7B%0A %22category%22: %22activitiy%22,%0A %22name%22: %22mountain climbing%22,%0A %22svg_name%22: %22mountain_climbing%22,%0A %7D%0A %7D%0A%0Aalias
f58d82173b7defbed651a1eaec2c318e7bc17911
add giant queue test
qiita_db/test/test_sql_connection.py
qiita_db/test/test_sql_connection.py
from unittest import TestCase, main from qiita_db.sql_connection import SQLConnectionHandler from qiita_db.exceptions import QiitaDBExecutionError from qiita_core.util import qiita_test_checker @qiita_test_checker() class TestConnHandler(TestCase): def test_create_queue(self): self.conn_handler.create_queue("toy_queue") self.assertEqual(self.conn_handler.list_queues(), ["toy_queue"]) def test_run_queue_placeholders(self): self.conn_handler.create_queue("toy_queue") self.conn_handler.add_to_queue( "toy_queue", "INSERT INTO qiita.qiita_user (email, name, password," "phone) VALUES (%s, %s, %s, %s) RETURNING email, password", ['insert@foo.bar', 'Toy', 'pass', '111-111-11112']) self.conn_handler.add_to_queue( "toy_queue", "UPDATE qiita.qiita_user SET user_level_id = 1, " "phone = '222-222-2221' WHERE email = %s AND password = %s", ['{0}', '{1}']) self.conn_handler.execute_queue("toy_queue") obs = self.conn_handler.execute_fetchall( "SELECT * from qiita.qiita_user WHERE email = %s", ['insert@foo.bar']) exp = [['insert@foo.bar', 1, 'pass', 'Toy', None, None, '222-222-2221', None, None, None]] self.assertEqual(obs, exp) def test_queue_fail(self): """Fail if no results data exists for substitution""" with self.assertRaises(QiitaDBExecutionError): self.conn_handler = SQLConnectionHandler() self.conn_handler.create_queue("toy_queue") self.conn_handler.add_to_queue( "toy_queue", "INSERT INTO qiita.qiita_user (email, name, password) VALUES " "(%s, %s, %s)", ['somebody@foo.bar', 'Toy', 'pass']) self.conn_handler.add_to_queue( "toy_queue", "UPDATE qiita.qiita_user SET user_level_id = 1 " "WHERE email = %s and password = %s", [{0}, {1}]) self.conn_handler.execute_queue("toy_queue") # make sure roll back correctly obs = self.conn_handler.execute_fetchall( "SELECT * from qiita.qiita_user WHERE email = %s", ['somebody@foo.bar']) self.assertEqual(obs, []) if __name__ == "__main__": main()
Python
0
@@ -1417,67 +1417,8 @@ %22%22%22%0A - with self.assertRaises(QiitaDBExecutionError):%0A @@ -1464,20 +1464,16 @@ ndler()%0A - @@ -1516,36 +1516,32 @@ queue%22)%0A - - self.conn_handle @@ -1560,36 +1560,32 @@ ue(%0A - %22toy_queue%22,%0A @@ -1593,20 +1593,16 @@ - - %22INSERT @@ -1656,20 +1656,16 @@ ALUES %22%0A - @@ -1721,36 +1721,32 @@ pass'%5D)%0A - self.conn_handle @@ -1765,36 +1765,32 @@ ue(%0A - - %22toy_queue%22, %22UP @@ -1847,20 +1847,16 @@ - %22WHERE e @@ -1896,16 +1896,72 @@ %7D, %7B1%7D%5D) +%0A%0A with self.assertRaises(QiitaDBExecutionError): %0A @@ -2002,32 +2002,33 @@ ue(%22toy_queue%22)%0A +%0A # make s @@ -2233,16 +2233,1224 @@ s, %5B%5D)%0A%0A + def test_huge_queue(self):%0A self.conn_handler = SQLConnectionHandler()%0A self.conn_handler.create_queue(%22toy_queue%22)%0A # add tons of inserts to queue%0A for x in range(120):%0A self.conn_handler.add_to_queue(%0A %22toy_queue%22,%0A %22INSERT INTO qiita.qiita_user (email, name, password) VALUES %22%0A %22(%25s, %25s, %25s)%22, %5B'%25dsomebody@foo.bar' %25 x, 'Toy', 'pass'%5D)%0A # add failing insert as final item in queue%0A self.conn_handler.add_to_queue(%0A %22toy_queue%22,%0A %22INSERT INTO qiita.qiita_BADTABLE (email, name, password) VALUES %22%0A %22(%25s, %25s, %25s)%22, %5B'%25dsomebody@foo.bar' %25 x, 'Toy', 'pass'%5D)%0A self.conn_handler.add_to_queue(%0A %22toy_queue%22, %22UPDATE qiita.qiita_user SET user_level_id = 1 %22%0A %22WHERE email = %25s and password = %25s%22, %5B%7B0%7D, %7B1%7D%5D)%0A with self.assertRaises(QiitaDBExecutionError):%0A self.conn_handler.execute_queue(%22toy_queue%22)%0A%0A # make sure roll back correctly%0A obs = self.conn_handler.execute_fetchall(%0A %22SELECT * from qiita.qiita_user WHERE email LIKE %22%0A %22'%25somebody@foo.bar%25'%22)%0A self.assertEqual(obs, %5B%5D)%0A if __nam
d5167d8ba1b3107e5ce121eca76b5496bf8d6448
Truncate a long log message.
qipipe/registration/ants/template.py
qipipe/registration/ants/template.py
import os import logging import envoy from .ants_error import ANTSError def create_template(metric, files): """ Builds a template from the given image files. :param metric: the similarity metric :param files: the image files :return: the template file name """ CMD = "buildtemplateparallel.sh -d 2 -c 2 -j 4 -d 2 -s {metric} -o {output} {files}" PREFIX = 'reg_' SUFFIX = 'template.nii.gz' tmpl = PREFIX + SUFFIX if os.path.exists(tmpl): logging.info("Registration template already exists: %s" % tmpl) return tmpl cmd = CMD.format(metric=metric.name, output=PREFIX, files=' '.join(files)) logging.info("Building the %s registration template with the following command:" % tmpl) logging.info(cmd) r = envoy.run(cmd) if r.status_code: logging.error("Build registration template failed with error code %d" % r.status_code) logging.error(r.std_err) raise ANTSError("Build registration template unsuccessful; see the log for details") if not os.path.exists(tmpl): logging.error("Build registration template was not created.") raise ANTSError("Build registration template unsuccessful; see the log for details") logging.info("Built the registration template %s." % tmpl) return tmpl
Python
0.000003
@@ -761,24 +761,29 @@ ing.info(cmd +%5B:80%5D )%0A r = en
dddcc1574b3468d777b2a2f70cbc3410b8b955e0
Fix docstring in dilated_convolution_2d
chainer/links/connection/dilated_convolution_2d.py
chainer/links/connection/dilated_convolution_2d.py
import numpy from chainer.functions.connection import dilated_convolution_2d from chainer import initializers from chainer import link class DilatedConvolution2D(link.Link): """Two-dimensional dilated convolutional layer. This link wraps the :func:`~chainer.functions.dilated_convolution_2d` function and holds the filter weight and bias vector as parameters. Args: in_channels (int): Number of channels of input arrays. If it is ``None`` or omitted, parameter initialization will be deferred until the first forward data pass at which time the size will be determined. out_channels (int): Number of channels of output arrays. ksize (int or pair of ints): Size of filters (a.k.a. kernels). ``ksize=k`` and ``ksize=(k, k)`` are equivalent. stride (int or pair of ints): Stride of filter applications. ``stride=s`` and ``stride=(s, s)`` are equivalent. pad (int or pair of ints): Spatial padding width for input arrays. ``pad=p`` and ``pad=(p, p)`` are equivalent. dilate (int or pair of ints): Dilation factor of filter applications. ``dilate=d`` and ``dilate=(d, d)`` are equivalent. nobias (bool): If ``True``, then this link does not use the bias term. initialW (4-D array): Initial weight value. If ``None``, the defaul initializer is used. May also be a callable that takes ``numpy.ndarray`` or ``cupy.ndarray`` and edits its value. initial_bias (1-D array): Initial bias value. If ``None``, the default initializer is used. May also be a callable that takes ``numpy.ndarray`` or ``cupy.ndarray`` and edits its value. .. seealso:: See :func:`chainer.functions.dilated_convolution_2d` for the definition of two-dimensional dilated convolution. Attributes: W (~chainer.Variable): Weight parameter. b (~chainer.Variable): Bias parameter. .. admonition:: Example There are several ways to make a DilatedConvolution2D link. Let an input vector ``x`` be: >>> x = np.arange(1 * 3 * 10 * 10, dtype='f').reshape(1, 3, 10, 10) 1. Give the first three arguments explicitly: >>> l = L.DilatedConvolution2D(3, 7, 5) >>> y = l(x) >>> y.shape (1, 7, 6, 6) 2. Omit ``in_channels`` or fill it with ``None``: The below two cases are the same. >>> l = L.DilatedConvolution2D(7, 5) >>> y = l(x) >>> y.shape (1, 7, 6, 6) >>> l = L.DilatedConvolution2D(None, 7, 5) >>> y = l(x) >>> y.shape (1, 7, 6, 6) When you omit the first argument, you need to specify the other subsequent arguments from ``stride`` as keyword auguments. So the below two cases are the same. >>> l = L.DilatedConvolution2D(None, 7, 5, 1, 0, 2) >>> y = l(x) >>> y.shape >>> (1, 7, 2, 2) >>> l = L.DilatedConvolution2D(7, 5, stride=1, pad=0, dilate=2) >>> y = l(x) >>> y.shape >>> (1, 7, 2, 2) """ def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, dilate=1, nobias=False, initialW=None, initial_bias=None): super(DilatedConvolution2D, self).__init__() if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.ksize = ksize self.stride = _pair(stride) self.pad = _pair(pad) self.dilate = _pair(dilate) self.out_channels = out_channels if initialW is None: self.initialW = initializers.HeNormal(1.0 / numpy.sqrt(2)) else: self.initialW = initialW # For backward compatibility, the scale of weights is proportional to # the square root of wscale. self.add_param('W', initializer=initializers._get_initializer( initialW)) if in_channels is not None: self._initialize_params(in_channels) if nobias: self.b = None else: if initial_bias is None: initial_bias = 0 initial_bias = initializers._get_initializer(initial_bias) self.add_param('b', out_channels, initializer=initial_bias) def _initialize_params(self, in_channels): kh, kw = _pair(self.ksize) W_shape = (self.out_channels, in_channels, kh, kw) self.W.initialize(W_shape) def __call__(self, x): """Applies the convolution layer. Args: x (~chainer.Variable): Input image. Returns: ~chainer.Variable: Output of the convolution. """ if self.W.data is None: self._initialize_params(x.shape[1]) return dilated_convolution_2d.dilated_convolution_2d( x, self.W, self.b, self.stride, self.pad, self.dilate) def _pair(x): if hasattr(x, '__getitem__'): return x return x, x
Python
0.000279
@@ -3068,36 +3068,32 @@ hape%0A - %3E%3E%3E (1, 7, 2, 2)%0A%0A @@ -3227,20 +3227,16 @@ - %3E%3E%3E (1, 7,
682864af6827c47a2c2d814a656af0f44ffe8495
Fix resource used items call
nodeconductor/billing/tasks.py
nodeconductor/billing/tasks.py
import logging from datetime import timedelta, datetime from celery import shared_task from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.db import transaction from django.utils import timezone from nodeconductor.billing.backend import BillingBackend, BillingBackendError from nodeconductor.billing.models import PaidResource from nodeconductor.cost_tracking import CostTrackingRegister from nodeconductor.cost_tracking.models import DefaultPriceListItem from nodeconductor.structure import SupportedServices from nodeconductor.structure.models import Customer, Resource logger = logging.getLogger(__name__) @shared_task(name='nodeconductor.billing.debit_customers') def debit_customers(): """ Fetch a list of shared services (services based on shared settings). Calculate the amount of consumed resources "yesterday" (make sure this task executed only once a day) Reduce customer's balance accordingly Stop online resource if needed """ date = datetime.now() - timedelta(days=1) start_date = date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = start_date + timedelta(days=1, microseconds=-1) # XXX: it's just a placeholder, it doesn't work properly now nor implemented anyhow # perhaps it should merely use price estimates.. for model in SupportedServices.get_resource_models().keys(): resources = model.objects.filter( service_project_link__service__settings__shared=True) for resource in resources: try: data = resource.get_cost(start_date, end_date) except NotImplementedError: continue else: resource.customer.debit_account(data['total_amount']) @shared_task(name='nodeconductor.billing.sync_pricelist') def sync_pricelist(): backend = BillingBackend() try: backend.propagate_pricelist() except BillingBackendError as e: logger.error("Can't propagade pricelist to %s: %s", backend, e) @shared_task(name='nodeconductor.billing.sync_billing_customers') def sync_billing_customers(customer_uuids=None): if not isinstance(customer_uuids, (list, tuple)): customer_uuids = Customer.objects.all().values_list('uuid', flat=True) map(sync_billing_customer.delay, customer_uuids) @shared_task def sync_billing_customer(customer_uuid): customer = Customer.objects.get(uuid=customer_uuid) backend = customer.get_billing_backend() backend.sync_customer() backend.sync_invoices() @shared_task(name='nodeconductor.billing.update_today_usage') def update_today_usage(): """ Calculate usage for all paid resources. Task counts exact usage amount for numerical options and boolean value for the rest. Example: 2015-08-20 13:00 storage-1Gb 20 2015-08-20 13:00 flavor-g1.small1 1 2015-08-20 13:00 license-os-centos7 1 2015-08-20 13:00 support-basic 1 """ nc_settings = getattr(settings, 'NODECONDUCTOR', {}) if not nc_settings.get('ENABLE_ORDER_PROCESSING'): return for model in PaidResource.get_all_models(): for resource in model.objects.all(): update_today_usage_of_resource.delay(resource.to_string()) @shared_task def update_today_usage_of_resource(resource_str): # XXX: this method does ignores cases then VM was offline or online for small periods of time. # It could to be rewritten if more accurate calculation will be needed with transaction.atomic(): resource = next(Resource.from_string(resource_str)) backend = CostTrackingRegister.get_resource_backend(resource) used_items = backend.get_used_items() numerical = ['storage', 'users'] # XXX: use consistent method for usage calculation content_type = ContentType.objects.get_for_model(resource) units = { (item.item_type, None if item.item_type in numerical else item.key): item.units for item in DefaultPriceListItem.objects.filter(resource_content_type=content_type)} now = timezone.now() last_update_time = resource.last_usage_update_time or resource.created minutes_from_last_usage_update = (now - last_update_time).total_seconds() / 60 usage = {} for item_type, key, val in used_items: if val: try: unit = units[item_type, None if item_type in numerical else key] usage_per_min = int(round(val * minutes_from_last_usage_update)) if usage_per_min: usage[unit] = usage_per_min except KeyError: logger.error("Can't find price for usage item %s:%s", key, val) resource.order.add_usage(usage) resource.last_usage_update_time = timezone.now() resource.save(update_fields=['last_usage_update_time'])
Python
0
@@ -3773,16 +3773,24 @@ d_items( +resource )%0A%0A
52b98755a8b26fb50d90b7988ee8ee16053e5c11
Update lint.py to automatically find .py files
lint.py
lint.py
# coding: utf-8 from __future__ import unicode_literals import os from pylint.lint import Run cur_dir = os.path.dirname(__file__) rc_path = os.path.join(cur_dir, './.pylintrc') print('Running pylint...') files = [ '__init__.py', '_osx_ctypes.py', '_osx_public_key.py', '_osx_symmetric.py', '_osx_util.py', '_win_util.py', 'errors.py', 'kdf.py', 'keys.py', 'public_key.py', 'symmetric.py', 'util.py', ] args = ['--rcfile=%s' % rc_path] args += ['oscrypto/' + f for f in files] Run(args)
Python
0
@@ -216,319 +216,250 @@ = %5B -%0A '__init__.py',%0A '_osx_ctypes.py',%0A '_osx_public_key.py',%0A '_osx_symmetric.py',%0A '_osx_util.py',%0A '_win_util.py',%0A 'errors +%5D%0Afor root, dirnames, filenames in os.walk('oscrypto/'):%0A for filename in filenames:%0A if not filename.endswith(' .py' -, +): %0A -'kdf.py',%0A 'keys.py',%0A 'public_key.py',%0A 'symmetric.py',%0A 'util.py',%0A%5D%0A%0Aargs = %5B'--rcfile=%25s' %25 rc_path%5D%0Aargs += %5B'oscrypto/' + f for f in + continue%0A files.append(os.path.join(root, filename))%0A%0Aargs = %5B'--rcfile=%25s' %25 rc_path%5D + files -%5D %0A%0ARu
566c97f99668691b19dad7f0cb737157338ec57b
Add language attribute to dataset holder.
load.py
load.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=C0103 """ Load CoNLL16st/CoNLL15st dataset. """ __author__ = "GW [http://gw.tnode.com/] <gw.2016@tnode.com>" __license__ = "GPLv3+" from files import load_parses, load_raws, load_relations_gold from words import get_words, get_pos_tags, get_word_metas from dependencies import get_dependencies from parsetrees import get_parsetrees from relations import get_rel_parts, get_rel_types, get_rel_senses, add_relation_tags def load_all(dataset_dir, doc_ids=None, filter_types=None, filter_senses=None, filter_fn=None): """Load whole CoNLL16st dataset by document id.""" # load all provided files untouched parses = load_parses(dataset_dir, doc_ids=doc_ids) doc_ids = sorted(parses.keys()) raws = load_raws(dataset_dir, doc_ids=doc_ids) relations_gold = load_relations_gold(dataset_dir, doc_ids=doc_ids, with_senses=True, filter_types=filter_types, filter_senses=filter_senses, filter_fn=filter_fn) if relations_gold: relationsnos_gold = relations_gold else: relationsnos_gold = load_relations_gold(dataset_dir, doc_ids=doc_ids, with_senses=False, filter_types=filter_types, filter_senses=filter_senses, filter_fn=filter_fn) # extract data by document id and token id words = get_words(parses) pos_tags = get_pos_tags(parses) word_metas = get_word_metas(parses, raws) # extract data by document id and token id pairs dependencies = get_dependencies(parses) # extract data by document id parsetrees = get_parsetrees(parses) # extract data by relation id rel_parts = get_rel_parts(relationsnos_gold) rel_ids = sorted(rel_parts.keys()) rel_types = get_rel_types(relations_gold) rel_senses = get_rel_senses(relations_gold) # add extra fields add_relation_tags(word_metas, rel_types, rel_senses) return doc_ids, words, word_metas, pos_tags, dependencies, parsetrees, rel_ids, rel_parts, rel_types, rel_senses, relations_gold class Conll16stDataset(dict): """CoNLL16st dataset holder as dict.""" def __init__(self, dataset_dir, doc_ids=None, filter_types=None, filter_senses=None, filter_fn=None): self.dataset_dir = dataset_dir self.filter_types = filter_types self.filter_senses = filter_senses self.filter_fn = filter_fn self['doc_ids'], self['words'], self['word_metas'], self['pos_tags'], self['dependencies'], self['parsetrees'], self['rel_ids'], self['rel_parts'], self['rel_types'], self['rel_senses'], self['relations_gold'] = load_all(dataset_dir, doc_ids, filter_types, filter_senses, filter_fn) if not self['doc_ids']: raise IOError("Failed to load dataset ({})!".format(dataset_dir)) def summary(self): return " {}: doc_ids: {}, words: {}, rel_ids: {}, relation tokens: {}".format(self.dataset_dir, len(self['doc_ids']), sum([ len(s) for s in self['words'].itervalues() ]), len(self['rel_ids']), sum([ self['rel_parts'][rel_id]['TokenCount'] for rel_id in self['rel_parts'] ]))
Python
0
@@ -2097,32 +2097,42 @@ lf, dataset_dir, + lang='?', doc_ids=None, f @@ -2345,16 +2345,44 @@ ter_fn%0A%0A + self%5B'lang'%5D = lang%0A @@ -2819,16 +2819,26 @@ n %22 %7B%7D: + lang: %7B%7D, doc_ids @@ -2912,16 +2912,30 @@ set_dir, + self%5B'lang'%5D, len(sel
851d53a68b4c9d8a7ea926d031c9e136b069a820
add abstraction to message handler
examples/accounts/app.py
examples/accounts/app.py
import json import asyncio from nats.aio.client import Client as NATS from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers # TODO ADD possible actions list! # TODO ADD abstractions to Message Handler! class ServiceBroker: def __init__(self, io_loop, **settings): self.io_loop = io_loop self.nc = NATS() self.logger = settings.get('logger') __services = {} def create_service(self, service): for action_name, action_method in service.actions.items(): service_name = f"{service.name}.{action_name}" self.__services.setdefault(service_name, action_method) def available_services(self): return self.__services async def call_service(self, name, *args, **kwargs): try: result = await self.__services.get(name)(*args, **kwargs) except Exception as err: print(f'error {err}') return result async def message_handler(self, msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) data = json.loads(data) result = await self.call_service(subject, **data) dumped = json.dumps({"result": result}) await self.nc.publish(reply, dumped.encode()) async def start(self): await self.nc.connect(io_loop=self.io_loop) for service_key in self.__services.keys(): await self.nc.subscribe(f"{service_key}", cb=self.message_handler) data = { 'user_id': 1 } response = await self.nc.timed_request("users.get", json.dumps(data).encode(), 0.050) print('response:', response.data.decode()) class Service: name = None actions = {} class Users(Service): def __init__(self): self.name = "users" self.actions = { 'get': self.get_user_name } async def get_user_name(self, user_id: int) -> dict: users = { 1: { 'firstname': 'Antonio', 'lastname': 'Rodrigas' } } user_obj = users.get(user_id, {}) return user_obj async def main(loop): settings = {'logger': 'console'} broker = ServiceBroker(io_loop=loop, **settings) broker.create_service(Users()) print(broker.available_services()) await broker.start() if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main(loop)) loop.run_forever() loop.close()
Python
0.000001
@@ -215,16 +215,1119 @@ Handler! +%0A# MessageHandler must be able to call methods of Service and control requests%0A%0Aclass MessageHandler:%0A%0A def __init__(self, io_loop, **settings):%0A self.io_loop = io_loop%0A self.nc = NATS()%0A self.logger = settings.get('logger')%0A%0A async def message_handler(self, msg):%0A subject = msg.subject%0A reply = msg.reply%0A data = msg.data.decode()%0A print(%22Received a message on '%7Bsubject%7D %7Breply%7D': %7Bdata%7D%22.format(%0A subject=subject, reply=reply, data=data))%0A data = json.loads(data)%0A result = await self.call_service(subject, **data)%0A dumped = json.dumps(%7B%22result%22: result%7D)%0A await self.nc.publish(reply, dumped.encode())%0A%0A async def start(self):%0A await self.nc.connect(io_loop=self.io_loop)%0A for service_key in self.__services.keys():%0A await self.nc.subscribe(f%22%7Bservice_key%7D%22, cb=self.message_handler)%0A data = %7B%0A 'user_id': 1%0A %7D%0A response = await self.nc.timed_request(%22users.get%22, json.dumps(data).encode(), 0.050)%0A print('response:', response.data.decode()) %0A%0Aclass
10a685d69d6866f86c3db7997e3fcf8b837470e4
add posible actions list
examples/accounts/app.py
examples/accounts/app.py
import json import asyncio from nats.aio.client import Client as NATS from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers class ServiceBroker: def __init__(self, io_loop, **settings): self.io_loop = io_loop self.nc = NATS() self.logger = settings.get('logger') __services = {} def create_service(self, service): for action_name, action_method in service.actions.items(): service_name = f"{service.name}.{action_name}" self.__services.setdefault(service_name, action_method) def available_services(self): return self.__services async def call_service(self, name, *args, **kwargs): try: result = await self.__services.get(name)(*args, **kwargs) except Exception as err: print(f'error {err}') return result async def message_handler(self, msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) data = json.loads(data) result = await self.call_service(subject, **data) dumped = json.dumps({"result": result}) await self.nc.publish(reply, dumped.encode()) async def start(self): await self.nc.connect(io_loop=self.io_loop) sid = await self.nc.subscribe("users.*", cb=self.message_handler) data = { 'user_id': 1 } response = await self.nc.timed_request("users.get", json.dumps(data).encode(), 0.050) print('response:', response.data.decode()) class Service: name = None actions = {} class Users(Service): def __init__(self): self.name = "users" self.actions = { 'get': self.get_user_name } async def get_user_name(self, user_id: int) -> dict: users = { 1: { 'firstname': 'Antonio', 'lastname': 'Rodrigas' } } user_obj = users.get(user_id, {}) return user_obj async def main(loop): settings = {'logger': 'console'} broker = ServiceBroker(io_loop=loop, **settings) broker.create_service(Users()) print(broker.available_services()) await broker.start() if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main(loop)) loop.run_forever() loop.close()
Python
0.000019
@@ -138,16 +138,51 @@ ervers%0A%0A +# TODO ADD possible actions list!%0A%0A class Se @@ -1432,13 +1432,62 @@ -sid = +for service_key in self.__services.keys():%0A awa @@ -1511,16 +1511,23 @@ ibe( -%22users.* +f%22%7Bservice_key%7D %22, c
671ff419731084681edaf3fdc826c1139383c325
add Raises to orthogonal_procrustes docstring
scipy/linalg/_procrustes.py
scipy/linalg/_procrustes.py
""" Solve the orthogonal Procrustes problem. """ from __future__ import division, print_function, absolute_import import numpy as np from .decomp_svd import svd __all__ = ['orthogonal_procrustes'] def orthogonal_procrustes(A, B, compute_scale=False, check_finite=True): """ Compute the matrix solution of the orthogonal Procrustes problem. Given matrices A and B of equal shape, find an orthogonal matrix R that most closely maps A to B [1]_. Note that unlike higher level Procrustes analyses of spatial data, this function only uses orthogonal transformations like rotations and reflections, and it does not use scaling or translation. Parameters ---------- A : (M, N) array_like Matrix to be mapped. B : (M, N) array_like Target matrix. compute_scale : bool, optional True to return the sum of singular values of an intermediate matrix. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- R : (N, N) ndarray The matrix solution of the orthogonal Procrustes problem. Minimizes the Frobenius norm of dot(A, R) - B, subject to dot(R.T, R) == I. scale : float, optional The sum of singular values of an intermediate matrix. This value is not returned unless specifically requested. References ---------- .. [1] Peter H. Schonemann, "A generalized solution of the orthogonal Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1996. """ if check_finite: A = np.asarray_chkfinite(A) B = np.asarray_chkfinite(B) else: A = np.asanyarray(A) B = np.asanyarray(B) if A.ndim != 2: raise ValueError('expected ndim to be 2, but observed %s' % A.ndim) if A.shape != B.shape: raise ValueError('the shapes of A and B differ (%s vs %s)' % ( A.shape, B.shape)) # Be clever with transposes, with the intention to save memory. u, w, vt = svd(B.T.dot(A).T) R = u.dot(vt) # Always return R, and maybe return a scaling factor. if compute_scale: scale = w.sum() return R, scale else: return R
Python
0
@@ -1541,16 +1541,273 @@ ested.%0A%0A + Raises%0A ------%0A ValueError%0A If the input arrays are incompatibly shaped.%0A This may also be raised if matrix A or B contains an inf or nan%0A and check_finite is True, or if the matrix product AB contains%0A an inf or nan.%0A%0A Refe
d8b4dbfed17be90846ea4bc47b5f7b39ad944c24
Remove raw SQL from oscar_calculate_scores
oscar/apps/analytics/scores.py
oscar/apps/analytics/scores.py
from django.db import connection, transaction from oscar.core.loading import get_model ProductRecord = get_model('analytics', 'ProductRecord') Product = get_model('catalogue', 'Product') class Calculator(object): # Map of field name to weight weights = {'num_views': 1, 'num_basket_additions': 3, 'num_purchases': 5} def __init__(self, logger): self.logger = logger self.cursor = connection.cursor() def run(self): self.calculate_scores() self.update_product_models() def calculate_scores(self): self.logger.info("Calculating product scores") # Build the "SET ..." part of the SQL statement weighted_sum = " + ".join( ['%s*"%s"' % (weight, field) for field, weight in self.weights.items()]) ctx = {'table': ProductRecord._meta.db_table, 'weighted_total': weighted_sum, 'total_weight': sum(self.weights.values())} sql = '''UPDATE "%(table)s" SET score = %(weighted_total)s / %(total_weight)s''' % ctx self.logger.debug(sql) self.cursor.execute(sql) transaction.commit_unless_managed() def update_product_models(self): self.logger.info("Updating product records") qs = ProductRecord.objects.all() for record in qs: record.product.score = record.score record.product.save() self.logger.info("Updated scores for %d products" % qs.count())
Python
0
@@ -7,24 +7,31 @@ jango.db +.models import connecti @@ -26,31 +26,9 @@ ort -connection, transaction +F %0Afro @@ -244,16 +244,25 @@ ghts = %7B +%0A 'num_vie @@ -273,31 +273,24 @@ 1,%0A - - 'num_basket_ @@ -312,23 +312,16 @@ - 'num_pur @@ -330,16 +330,21 @@ ases': 5 +%0A %7D%0A%0A d @@ -402,50 +402,8 @@ gger -%0A self.cursor = connection.cursor() %0A%0A @@ -576,25 +576,24 @@ cores%22)%0A -%0A # Build @@ -588,162 +588,134 @@ -# Build the %22SET ...%22 part of the SQL statement%0A weighted_sum = %22 + %22.join(%0A %5B'%25s*%22%25s%22' %25 (weight, field) for field, weight%0A +total_weight = float(sum(self.weights.values()))%0A weighted_fields = %5B%0A self.weights%5Bname%5D * F(name) for name in @@ -731,18 +731,15 @@ hts. -item +key s()%5D -)%0A %0A @@ -747,226 +747,59 @@ -ctx = %7B'table': ProductRecord._meta.db_table,%0A 'weighted_total': weighted_sum,%0A 'total_weight': sum(self.weights.values())%7D%0A sql = '''UPDATE %22%25(table)s%22%0A SET +ProductRecord.objects.update(%0A score - = %25 +=sum (wei @@ -808,20 +808,16 @@ ted_ -total)s / %25( +fields)/ tota @@ -829,127 +829,8 @@ ght) -s''' %25 ctx%0A%0A self.logger.debug(sql)%0A self.cursor.execute(sql)%0A transaction.commit_unless_managed() %0A%0A @@ -925,17 +925,22 @@ -q +record s = Prod @@ -961,12 +961,32 @@ cts. -all( +select_related('product' )%0A @@ -1005,17 +1005,22 @@ cord in -q +record s:%0A @@ -1160,16 +1160,18 @@ %22 %25 -qs.count( +len(records ))%0A
a66cba1f1c41ae371c752dc7b4b0f4609c8e8161
Fix Houzz usufy wrapper
osrframework/wrappers/houzz.py
osrframework/wrappers/houzz.py
# !/usr/bin/python # -*- coding: cp1252 -*- # ################################################################################## # # Copyright 2016-2018 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com) # # This program is part of OSRFramework. You can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################## __author__ = "i3visio <contacto@i3visio.com>" __version__ = "1.1" import argparse import json import re import sys import urllib2 import osrframework.utils.browser as browser from osrframework.utils.platforms import Platform class Houzz(Platform): """ A <Platform> object for Houzz. """ def __init__(self): """ Constructor... """ self.platformName = "Houzz" self.tags = ["jobs"] ######################## # Defining valid modes # ######################## self.isValidMode = {} self.isValidMode["phonefy"] = False self.isValidMode["usufy"] = True self.isValidMode["searchfy"] = False ###################################### # Search URL for the different modes # ###################################### # Strings with the URL for each and every mode self.url = {} #self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>" self.url["usufy"] = "https://www.houzz.com/pro/" + "<usufy>" #self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>" ###################################### # Whether the user needs credentials # ###################################### self.needsCredentials = {} #self.needsCredentials["phonefy"] = False self.needsCredentials["usufy"] = False #self.needsCredentials["searchfy"] = False ################# # Valid queries # ################# # Strings that will imply that the query number is not appearing self.validQuery = {} # The regular expression '.+' will match any query #self.validQuery["phonefy"] = ".*" self.validQuery["usufy"] = ".+" #self.validQuery["searchfy"] = ".*" ################### # Not_found clues # ################### # Strings that will imply that the query number is not appearing self.notFoundText = {} #self.notFoundText["phonefy"] = [] self.notFoundText["usufy"] = ["<title>Best Home Improvement Professionals - Reviews, Past Projects & Photos | Houzz</title>"] #self.notFoundText["searchfy"] = [] ######################### # Fields to be searched # ######################### self.fieldsRegExp = {} # Definition of regular expressions to be searched in phonefy mode #self.fieldsRegExp["phonefy"] = {} # Example of fields: #self.fieldsRegExp["phonefy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in usufy mode self.fieldsRegExp["usufy"] = {} # Example of fields: #self.fieldsRegExp["usufy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in searchfy mode #self.fieldsRegExp["searchfy"] = {} # Example of fields: #self.fieldsRegExp["searchfy"]["i3visio.location"] = "" ################ # Fields found # ################ # This attribute will be feeded when running the program. self.foundFields = {}
Python
0
@@ -2017,19 +2017,20 @@ uzz.com/ -pro +user /%22 + %22%3Cu @@ -3078,102 +3078,72 @@ = %5B -%22%3Ctitle%3EBest Home Improvement Professionals - Reviews, Past Projects & Photos %7C Houzz%3C/title%3E%22 +%0A '%3Cmeta name=%22rating%22 content=%22General%22 /%3E'%0A %5D%0A
52b1448c6eb91ca9d59c7f2be5fa60f93262d52b
Bump version to 2.2.0
pip_check_reqs/__init__.py
pip_check_reqs/__init__.py
__version__ = '2.1.1'
Python
0
@@ -14,9 +14,9 @@ '2. -1.1 +2.0 '%0A
695ea0a9b9768c713e03f9d3ec3ca4a702de0347
Update repeaterBotScript.py
script/repeaterBotScript.py
script/repeaterBotScript.py
# For the Repeater Bot import telepot # your bot key bot = telepot.Bot("228412441:AAH0hKP-WOlcFGsZRaSCETVKIFBZf7C4gXc") def handle(msg): chat_id = msg['chat']['id'] chat_msg = msg['text'] bot.sendMessage(chat_id, chat_msg) bot.message_loop(handle)
Python
0.000001
@@ -71,53 +71,20 @@ ot(%22 -228412441:AAH0hKP-WOlcFGsZRaSCETVKIFBZf7C4gXc +%5BYOUR_TOKEN%5D %22)%0A%0A
2033166b57ec566c763a9ff70df9a4123243160d
Bump version to 1.17.4
platformio_api/__init__.py
platformio_api/__init__.py
# Copyright 2014-present Ivan Kravets <me@ikravets.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging.config import os from time import tzset VERSION = (1, 17, 3) __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio-api" __description__ = ("An API for PlatformIO") __url__ = "https://github.com/ivankravets/platformio-api" __author__ = "Ivan Kravets" __email__ = "me@ikravets.com" __license__ = "MIT License" __copyright__ = "Copyright (C) 2014-2015 Ivan Kravets" config = dict( SQLALCHEMY_DATABASE_URI=None, GITHUB_LOGIN=None, GITHUB_PASSWORD=None, DL_PIO_DIR=None, DL_PIO_URL=None, MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb # Fuzzy search will not be applied to words shorter than the value below SOLR_FUZZY_MIN_WORD_LENGTH=3, LOGGING=dict(version=1) ) assert "PIOAPI_CONFIG_PATH" in os.environ with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f: config.update(json.load(f)) # configure logging for packages logging.basicConfig() logging.config.dictConfig(config['LOGGING']) # setup time zone to UTC globally os.environ['TZ'] = "+00:00" tzset()
Python
0
@@ -682,17 +682,17 @@ (1, 17, -3 +4 )%0A__vers
1ce9101f4ac12e400e0ba09a2221c16f3b02f0ab
Add iter_plugins_dirs() test
podoc/tests/test_plugin.py
podoc/tests/test_plugin.py
# -*- coding: utf-8 -*- """Test plugin system.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import os.path as op from ..core import save_text from ..plugin import IPluginRegistry, IPlugin, discover_plugins #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ def setup(): IPluginRegistry.plugins = [] def teardown(): IPluginRegistry.plugins = [] #------------------------------------------------------------------------------ # Tests #------------------------------------------------------------------------------ def test_plugin_registration(): class MyPlugin(IPlugin): pass assert IPluginRegistry.plugins == [MyPlugin] def test_discover_plugins(tempdir): path = op.join(tempdir, 'my_plugin.py') contents = '''from podoc import IPlugin\nclass MyPlugin(IPlugin): pass''' save_text(path, contents) plugins = discover_plugins([tempdir]) assert plugins assert plugins[0].__name__ == 'MyPlugin'
Python
0.000001
@@ -285,24 +285,25 @@ ugin import +( IPluginRegis @@ -332,16 +332,58 @@ _plugins +,%0A iter_plugins_dirs) %0A%0A%0A#---- @@ -1237,8 +1237,147 @@ Plugin'%0A +%0A%0Adef test_iter_plugins_dirs():%0A assert 'json' in %5Bop.basename(plugin_dir)%0A for plugin_dir in iter_plugins_dirs()%5D%0A
9c73bba34609226d065ed75603d15abafc616072
make a plot too
scripts/make-orbitfit-w0.py
scripts/make-orbitfit-w0.py
# coding: utf-8 """ Make w0.npy files from orbitfit results Call like: TODO python fitorbit.py --output-path=../output/orbitfits/ --potential=barred_mw \ -v --nsteps=256 --nwalkers=64 --mpi --fixtime """ from __future__ import division, print_function __author__ = "adrn <adrn@astro.columbia.edu>" # Standard library import cPickle as pickle import os # Third-party from astropy import log as logger import numpy as np # This project from ophiuchus.data import OphiuchusData def main(top_output_path, split_ix=256, potential_name=None, overwrite=False): all_ophdata = OphiuchusData() # top-level output path where orbitfit saved top_output_path = os.path.abspath(os.path.expanduser(top_output_path)) output_path = os.path.join(top_output_path, "orbitfit") if potential_name is not None: paths = [potential_name] else: paths = os.listdir(output_path) for potential_name in paths: if potential_name.startswith("."): continue this_path = os.path.join(output_path, potential_name) w0_filename = os.path.join(this_path, "w0.npy") if os.path.exists(w0_filename) and overwrite: os.remove(w0_filename) if os.path.exists(w0_filename): logger.debug("File {} exists".format(w0_filename)) continue with open(os.path.join(this_path, "sampler.pickle")) as f: sampler = pickle.load(f) _x0 = np.vstack(sampler.chain[:,split_ix:,:5]) w0 = all_ophdata._mcmc_sample_to_w0(_x0.T).T # convert to w0 and save np.save(w0_filename, w0) if __name__ == "__main__": from argparse import ArgumentParser import logging parser = ArgumentParser(description="") parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Be chatty! (default = False)") parser.add_argument("-q", "--quiet", action="store_true", dest="quiet", default=False, help="Be quiet! (default = False)") parser.add_argument("-o", "--overwrite", dest="overwrite", default=False, action="store_true", help="Overwrite any existing data.") parser.add_argument("--output-path", dest="output_path", required=True, help="Path to save the output file.") parser.add_argument("--potential", dest="potential_name", default=None, help="Name of the potential YAML file.") parser.add_argument("--ix", dest="ix", type=int, default=None, help="Chain split.") args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) elif args.quiet: logger.setLevel(logging.ERROR) else: logger.setLevel(logging.INFO) main(args.output_path, potential_name=args.potential_name, split_ix=args.ix, overwrite=args.overwrite)
Python
0.000026
@@ -476,16 +476,146 @@ chusData +%0Afrom ophiuchus.util import integrate_forward_backward%0Afrom ophiuchus.plot import plot_data_orbit%0Aimport ophiuchus.potential as op %0A%0Adef ma @@ -1726,16 +1726,663 @@ e, w0)%0A%0A + potential = op.load_potential(potential_name)%0A%0A ix = np.random.randint(len(sampler.flatchain), size=64)%0A fig = plot_data_orbit(all_ophdata)%0A for sample in sampler.flatchain%5Bix%5D:%0A sample_w0 = all_ophdata._mcmc_sample_to_w0(sample%5B:5%5D)%5B:,0%5D%0A tf,tb = (3.,-3.)%0A w = integrate_forward_backward(potential, sample_w0, t_forw=tf, t_back=tb)%0A fig = plot_data_orbit(all_ophdata, orbit_w=w, data_style=dict(marker=None),%0A orbit_style=dict(color='#2166AC', alpha=0.1), fig=fig)%0A fig.savefig(os.path.join(this_path, %22orbits-split.png%22), dpi=300)%0A%0A if __nam
5a865e7f9fd8745006c581519802dfc321acfeea
Fix LZ4 Compression for Python-LZ4
_lz4.py
_lz4.py
try: # Try to import the python-lz4 package import lz4.block except ImportError: # If python-lz4 isn't present, fallback to using pure python from io import BytesIO try: from six import byte2int from six.moves import xrange except ImportError: xrange = range def byte2int(_bytes): return ord(_bytes[0]) class CorruptError(Exception): pass __support_mode__ = 'pure Python' def uncompress(src, offset=4): """uncompress a block of lz4 data. :param bytes src: lz4 compressed data (LZ4 Blocks) :param int offset: offset that the uncompressed data starts at (Used to implicitly read the uncompressed data size) :returns: uncompressed data :rtype: bytearray .. seealso:: http://cyan4973.github.io/lz4/lz4_Block_format.html """ src = BytesIO(src) if offset > 0: src.read(offset) # if we have the original size, we could pre-allocate the buffer with # bytearray(original_size), but then we would have to use indexing # instad of .append() and .extend() dst = bytearray() min_match_len = 4 def get_length(src, length): """get the length of a lz4 variable length integer.""" if length != 0x0f: return length while True: read_buf = src.read(1) if len(read_buf) != 1: raise CorruptError("EOF at length read") len_part = byte2int(read_buf) length += len_part if len_part != 0xff: break return length while True: # decode a block read_buf = src.read(1) if not read_buf: raise CorruptError("EOF at reading literal-len") token = byte2int(read_buf) literal_len = get_length(src, (token >> 4) & 0x0f) # copy the literal to the output buffer read_buf = src.read(literal_len) if len(read_buf) != literal_len: raise CorruptError("not literal data") dst.extend(read_buf) read_buf = src.read(2) if not read_buf: if token & 0x0f != 0: raise CorruptError( "EOF, but match-len > 0: %u" % (token % 0x0f, )) break if len(read_buf) != 2: raise CorruptError("premature EOF") offset = byte2int([read_buf[0]]) | (byte2int([read_buf[1]]) << 8) if offset == 0: raise CorruptError("offset can't be 0") match_len = get_length(src, (token >> 0) & 0x0f) match_len += min_match_len # append the sliding window of the previous literals for _ in xrange(match_len): dst.append(dst[-offset]) return dst def compress(data): ''' Accepts a byte array as input - returns a LZ4 compatible (uncompressed) byte array ''' length = len(data) if length > 15: result = [15 << 4 | 0] # Add the token # Add the literal size bytes result.extend([255] * (int)((length - 15) / 255)) result.append((int)((length - 15) % 255)) else: # length <= 15 result = [length << 4 | 0] # Add the token if length == 15: result.append(0) # Add the empty length byte result.extend(data) return bytearray(result) else: # Use python-lz4 if present __support_mode__ = 'python-lz4' compress = lz4.block.compress uncompress = lz4.block.decompress support_info = 'LZ4: Using %s' % __support_mode__
Python
0.010003
@@ -3724,20 +3724,25 @@ z4'%0A +%0A +def compress = l @@ -3741,29 +3741,74 @@ ress - = lz4.block.compress +(data):%0A return lz4.block.compress(data, store_size=False)%0A %0A
7fa19f13df92a3ddea6f69519539ead6cdeab3af
implement list --version option
reno/lister.py
reno/lister.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function from reno import scanner from reno import utils def list_cmd(args): "List notes files based on query arguments" reporoot = args.reporoot.rstrip('/') + '/' notesdir = utils.get_notes_dir(args) notes = scanner.get_notes_by_version(reporoot, notesdir) for version, notefiles in notes.items(): print(version) for n in notefiles: if n.startswith(reporoot): n = n[len(reporoot):] print('\t%s' % n) return
Python
0.000105
@@ -853,48 +853,162 @@ -for version, notefiles in notes.items(): +if args.version:%0A versions = args.version%0A else:%0A versions = notes.keys()%0A for version in versions:%0A notefiles = notes%5Bversion%5D %0A
090dd65c051b3cb9e9f1bf8274f1d75276ed274f
Fix build after swig_bot_lib changes.
scripts/prepare_bindings.py
scripts/prepare_bindings.py
#!/usr/bin/env python """ The LLVM Compiler Infrastructure This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. Prepares language bindings for LLDB build process. Run with --help to see a description of the supported command line arguments. """ # Python modules: import argparse import logging import os import platform import sys # LLDB modules: import use_lldb_suite from lldbsuite.support import fs def prepare_binding_for_language(scripts_dir, script_lang, options): """Prepares the binding for a specific language. @param scripts_dir the full path to the scripts source directory. @param script_lang the name of the script language. Should be a child directory within the scripts dir, and should contain a prepare_scripts_{script_lang}.py script file in it. @param options the dictionary of parsed command line options. There is no return value. If it returns, the process succeeded; otherwise, the process will exit where it fails. """ # Ensure the language-specific prepare module exists. script_name = "prepare_binding_{}.py".format(script_lang) lang_path = os.path.join(scripts_dir, script_lang) script_path = os.path.join(lang_path, script_name) if not os.path.exists(script_path): logging.error( "failed to find prepare script for language '%s' at '%s'", script_lang, script_path) sys.exit(-9) # Include this language-specific directory in the Python search # path. sys.path.append(os.path.normcase(lang_path)) # Execute the specific language script module_name = os.path.splitext(script_name)[0] module = __import__(module_name) module.main(options) # Remove the language-specific directory from the Python search path. sys.path.remove(os.path.normcase(lang_path)) def prepare_all_bindings(options): """Prepares bindings for each of the languages supported. @param options the parsed arguments from the command line @return the exit value for the program. 0 is success, all othes indicate some kind of failure. """ # Check for the existence of the SWIG scripts folder scripts_dir = os.path.join(options.src_root, "scripts") if not os.path.exists(scripts_dir): logging.error("failed to find scripts dir: '%s'", scripts_dir) sys.exit(-8) # Collect list of child directories. We expect there to be one # for each supported script language. child_dirs = [f for f in os.listdir(scripts_dir) if os.path.isdir(os.path.join(scripts_dir, f))] # Remove directories that do not represent script languages. for removal_dir in [".svn", "interface", "__pycache__", "sphinx"]: if removal_dir in child_dirs: child_dirs.remove(removal_dir) logging.info("found script directories: %s", child_dirs) # Iterate script directory find any script language directories for script_lang in child_dirs: logging.info("executing language script for: '%s'", script_lang) prepare_binding_for_language(scripts_dir, script_lang, options) def process_args(args): """Returns options processed from the provided command line. @param args the command line to process. """ # Setup the parser arguments that are accepted. parser = argparse.ArgumentParser( description="Prepare language bindings for LLDB build.") # Arguments to control logging verbosity. parser.add_argument( "--debug", "-d", action="store_true", help="Set program logging level to DEBUG.") parser.add_argument( "--verbose", "-v", action="count", default=0, help=( "Increase logging verbosity level. Default: only error and " "higher are displayed. Each -v increases level of verbosity.")) # Arguments to control whether we're building an OS X-style # framework. This is the opposite of the older "-m" (makefile) # option. parser.add_argument( "--config-build-dir", "--cfgBldDir", help=( "Configuration build dir, will use python module path " "if unspecified.")) parser.add_argument( "--find-swig", action="store_true", help=( "Indicates the swig executable should be searched for " "if not eplicitly provided. Either this or the explicit " "swig executable option must be provided.")) parser.add_argument( "--framework", action="store_true", help="Prepare as OS X-style framework.") parser.add_argument( "--generate-dependency-file", "-M", action="store_true", help="Make the dependency (.d) file for the wrappers.") parser.add_argument( "--prefix", help="Override path where the LLDB module is placed.") parser.add_argument( "--src-root", "--srcRoot", "-s", # Default to the parent directory of this script's directory. default=os.path.abspath( os.path.join( os.path.dirname(os.path.realpath(__file__)), os.path.pardir)), help="Specifies the LLDB source root directory.") parser.add_argument( "--swig-executable", "--swigExecutable", help="Path to the swig executable.") parser.add_argument( "--target-dir", "--targetDir", required=True, help=( "Specifies the build dir where the language binding " "should be placed")) # Process args. options = parser.parse_args(args) # Set logging level based on verbosity count. if options.debug: log_level = logging.DEBUG else: # See logging documentation for error levels. We'll default # to showing ERROR or higher error messages. For each -v # specified, we'll shift to the next lower-priority log level. log_level = logging.ERROR - 10 * options.verbose if log_level < logging.NOTSET: # Displays all logged messages. log_level = logging.NOTSET logging.basicConfig(level=log_level) logging.info("logging is using level: %d", log_level) return options def main(args): """Drives the main script preparation steps. @param args list of command line arguments. """ # Process command line arguments. options = process_args(args) logging.debug("Processed args: options=%s", options) # Ensure we have a swig executable. if not options.swig_executable or len(options.swig_executable) == 0: if options.find_swig: try: options.swig_executable = fs.find_executable("swig") except Exception as e: logging.error("Unable to find swig executable: %s" % e.message) sys.exit(-6) else: logging.error( "The --find-swig option must be specified " "when the swig executable location is not " "explicitly provided.") sys.exit(-12) # Check if the swig file exists. swig_path = os.path.normcase( os.path.join(options.src_root, "scripts", "lldb.swig")) if not os.path.isfile(swig_path): logging.error("swig file not found at '%s'", swig_path) sys.exit(-3) # Prepare bindings for each supported language binding. # This will error out if it doesn't succeed. prepare_all_bindings(options) sys.exit(0) if __name__ == "__main__": # Run the main driver loop. main(sys.argv[1:])
Python
0.000004
@@ -2791,16 +2791,32 @@ %22sphinx%22 +, %22swig_bot_lib%22 %5D:%0A
98edde06a481cc18727a8c030bc670f7ab43b73f
increase timeout
scripts/pyinstaller/sign.py
scripts/pyinstaller/sign.py
import argparse import os import pathlib import sys from subprocess import STDOUT, check_call if sys.platform != "darwin": raise NotImplementedError parser = argparse.ArgumentParser() parser.add_argument( "--application-id", required=True, help="Certificate ID (should be added to the keychain).", ) args = parser.parse_args() path = pathlib.Path(__file__).parent.absolute() dvc = path / "dist" / "dvc" for root, _, fnames in os.walk(dvc): for fname in fnames: fpath = os.path.join(root, fname) print(f"signing {fpath}") check_call( [ "codesign", "--force", "--verbose", "-s", args.application_id, "-o", "runtime", "--entitlements", "entitlements.plist", fpath, ], stderr=STDOUT, timeout=5, )
Python
0.999426
@@ -940,9 +940,10 @@ out= -5 +10 ,%0A
713f9188ad5b25c604ac8736609b1ce5fefa224d
Include Cache-Control headers to avoid caching private files
private_storage/servers.py
private_storage/servers.py
""" Sending files efficiently for different kind of webservers. """ import os import sys import time from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.http import FileResponse, HttpResponse, HttpResponseNotModified from django.utils.http import http_date from django.utils.lru_cache import lru_cache from django.utils.module_loading import import_string from django.views.static import serve, was_modified_since @lru_cache() def get_server_class(path): if '.' in path: return import_string(path) elif path == 'streaming': return DjangoStreamingServer elif path == 'django': return DjangoServer elif path == 'apache': return ApacheXSendfileServer elif path == 'nginx': return NginxXAccelRedirectServer else: raise ImproperlyConfigured( "PRIVATE_STORAGE_SERVER setting should be 'nginx', 'apache', 'django' or a python class path." ) class DjangoStreamingServer(object): """ Serve static files through ``wsgi.file_wrapper`` or streaming chunks. This method also works for content that doesn't exist at the local filesystem, such as files on S3. """ @staticmethod def serve(private_file): # Support If-Last-Modified if sys.version_info >= (3,): mtime = private_file.modified_time.timestamp() else: mtime = time.mktime(private_file.modified_time.timetuple()) size = private_file.size if not was_modified_since(private_file.request.META.get('HTTP_IF_MODIFIED_SINCE'), mtime, size): return HttpResponseNotModified() # As of Django 1.8, FileResponse triggers 'wsgi.file_wrapper' in Django's WSGIHandler. # This uses efficient file streaming, such as sendfile() in uWSGI. # When the WSGI container doesn't provide 'wsgi.file_wrapper', it submits the file in 4KB chunks. response = FileResponse(private_file.open()) response['Content-Type'] = private_file.content_type response['Content-Length'] = size response["Last-Modified"] = http_date(mtime) return response class DjangoServer(DjangoStreamingServer): """ Serve static files from the local filesystem through Django or ``wsgi_file_wrapper``. Django 1.8 and up support ``wsgi.file_wrapper``, which helps to send a file in the most efficient way. When the WSGI server provides this feature, the file is send using an efficient method such as ``sendfile()`` on UNIX. Without ``file.file_wrapper``, the file will be streamed in 4K chunks, causing the file data to be read and copied multiple times in kernel memory as the file is read by Django, written to WSGI, read by webserver, and written to the socket. In some situations, such as Gunicorn behind Nginx/Apache, it's recommended to use the :class:`ApacheXSendfileServer` or :class:`NginxXAccelRedirectServer` servers instead. """ @staticmethod def serve(private_file): # This supports If-Modified-Since and sends the file in 4KB chunks try: full_path = private_file.full_path except NotImplementedError: # S3 files, fall back to streaming server return DjangoStreamingServer.serve(private_file) else: # Using Django's serve gives If-Modified-Since support out of the box. return serve(private_file.request, full_path, document_root='/', show_indexes=False) class ApacheXSendfileServer(object): """ Serve files for Apache with ``X-Sendfile``. """ @staticmethod def serve(private_file): response = HttpResponse() response['X-Sendfile'] = private_file.full_path response['Content-Type'] = private_file.content_type return response class NginxXAccelRedirectServer(object): """ Serve the files for Nginx with ``X-Accel-Redirect``. Add the following configuration:: location /private-x-accel-redirect/ ( internal; alias /home/user/my/path/to/private/media/; ) Or update the ``PRIVATE_STORAGE_INTERNAL_URL`` setting to use a different URL prefix. """ @staticmethod def serve(private_file): internal_url = os.path.join(settings.PRIVATE_STORAGE_INTERNAL_URL, private_file.relative_name) response = HttpResponse() response['X-Accel-Redirect'] = internal_url response['Content-Type'] = private_file.content_type return response
Python
0
@@ -93,16 +93,44 @@ ort time +%0Afrom functools import wraps %0A%0Afrom d @@ -997,24 +997,569 @@ )%0A%0A%0A +def add_no_cache_headers(func):%0A %22%22%22%0A Makes sure the retrieved file is not cached on disk, or cached by proxy servers in between.%0A This would circumvent any checking whether the user may even access the file.%0A %22%22%22%0A%0A @wraps(func)%0A def _dec(*args, **kwargs):%0A response = func(*args, **kwargs)%0A response%5B'Expires'%5D = 'Thu, 01 Jan 1970 00:00:00 GMT' # HTTP 1.0 proxies%0A response%5B'Cache-Control'%5D = 'max-age=0, no-cache, must-revalidate, proxy-revalidate' # HTTP 1.1%0A return response%0A return _dec%0A%0A%0A class Django @@ -1789,32 +1789,58 @@ @staticmethod%0A + @add_no_cache_headers%0A def serve(pr @@ -3598,32 +3598,58 @@ @staticmethod%0A + @add_no_cache_headers%0A def serve(pr @@ -4255,32 +4255,58 @@ @staticmethod%0A + @add_no_cache_headers%0A def serve(pr @@ -4866,32 +4866,32 @@ refix.%0A %22%22%22%0A%0A - @staticmetho @@ -4888,24 +4888,50 @@ taticmethod%0A + @add_no_cache_headers%0A def serv
8b8ce9c4d3eb34894dbddb66cc06faa364963cd9
Determine highest semantic version
scripts/update_changelog.py
scripts/update_changelog.py
import os import pprint from copy import copy from datetime import datetime, timezone from pathlib import Path import git from github import Github # The owner and repository name. For example, octocat/Hello-World. GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY') GITHUB_TOKEN = os.getenv('GITHUB_TOKEN') PRODUCTION = os.getenv('PRODUCTION', False) CURRENT_FILE = Path(__file__) ROOT = CURRENT_FILE.parents[1] CHANGELOG_PATH = ROOT / 'CHANGELOG.md' # TODO: Adjust number of recent pull requests to include likely number of # pull requests since the last release. RECENT_PULL_REQUEST_LIMIT = 10 def main(): # Find most recent tag and timestamp. # git for-each-ref --format="%(refname:short) | %(creatordate)" "refs/tags/*" local_repo = git.Repo(ROOT) # Fetch tags since `git fetch' is run with --no-tags during actions/checkout. # git fetch --tags for remote in local_repo.remotes: remote.fetch('--tags') # Sort the tags by version. # git tag --list | sort --reverse --version-sort tags = sorted( local_repo.tags, key=lambda tag: list(map(int, tag.name.split('.'))), reverse=True) most_recent_tag = tags[0] print('most_recent_tag: {}'.format(most_recent_tag)) most_recent_tag_datetime = most_recent_tag.commit.committed_datetime print('most_recent_tag_datetime: {}'.format(most_recent_tag_datetime)) # Find merged pull requests since the most recent tag. github_repo = Github(login_or_token=GITHUB_TOKEN).get_repo(GITHUB_REPOSITORY) recent_pulls = github_repo.get_pulls( state='closed', sort='updated', direction='desc', )[:RECENT_PULL_REQUEST_LIMIT] pull_request_changes = [] # Group pull requests by semantic version change type. pull_request_by_type = { 'major': [], 'minor': [], 'patch': [], 'unspecified': [], } # Track if any pull request is missing a semantic version change type. pulls_missing_semver_label = [] for pull in recent_pulls: # print('-' * 10) if not pull.merged: # print('skipping since not merged: {}'.format(pull.title)) # print(pull.html_url) continue # Make merged_at timestamp offset-aware. Without this, the following # error will appear: # TypeError: can't compare offset-naive and offset-aware datetimes pull_merged_at = copy(pull.merged_at).replace(tzinfo=timezone.utc) if pull_merged_at < most_recent_tag_datetime: # print('skipping since merged prior to last release: {}'.format(pull.title)) # print(pull.html_url) continue pull_labels = { label.name for label in pull.labels } if 'major-incompatible-changes' in pull_labels: group_name = 'major' elif 'minor-backwards-compatible-added-functionality' in pull_labels: group_name = 'minor' elif 'patch-backwards-compatible-bug-fixes' in pull_labels: group_name = 'patch' else: group_name = 'unspecified' pulls_missing_semver_label.append(pull) pull_request_by_type[group_name].append(pull) # pprint.pprint(dir(pull)) pprint.pprint(pull.title) pprint.pprint('most recent: {}'.format(most_recent_tag_datetime)) pprint.pprint('merged at: {}'.format(pull.merged_at)) print(pull.html_url) pull_request_changes.append( '- {} ([#{}]({}))'.format(pull.title, pull.number, pull.html_url) ) print('-' * 10) pprint.pprint(pull_request_changes) # TODO: Fetch next actual semantic version. release_version = most_recent_tag release_date = datetime.today().strftime('%Y-%m-%d') release_title = '{} - {}'.format(release_version, release_date) print('release_title: {}'.format(release_title)) release_content = ( '## {}\n' '\n' '{}' ).format(release_title, '\n'.join(pull_request_changes)) old_content = CHANGELOG_PATH.read_text() new_content = old_content.replace( '<!-- CHANGELOG_PLACEHOLDER -->', '<!-- CHANGELOG_PLACEHOLDER -->\n\n{}'.format(release_content), ) print(new_content[:800]) # CHANGELOG_PATH.write_text(new_content) # Raise error if any pull request is missing a semantic version change type. if pulls_missing_semver_label: error_message = ( 'Merged pull request(s) found without semantic version label:\n' '{}'.format('\n'.join( ' {}'.format(pull.html_url) for pull in pulls_missing_semver_label))) raise Exception(error_message) if __name__ == '__main__': main()
Python
0.999999
@@ -3238,41 +3238,8 @@ # - pprint.pprint(dir(pull))%0A ppr @@ -3260,32 +3260,34 @@ l.title)%0A + # pprint.pprint(' @@ -3344,16 +3344,18 @@ %0A + # pprint. @@ -3406,24 +3406,26 @@ at))%0A + # print(pull. @@ -3568,16 +3568,18 @@ %0A + # print(' @@ -3584,24 +3584,435 @@ ('-' * 10)%0A%0A + pprint.pprint(pull_request_by_type)%0A highest_semantic_version = None%0A if pull_request_by_type.get('major'):%0A highest_semantic_version = 'major'%0A elif pull_request_by_type.get('minor'):%0A highest_semantic_version = 'minor'%0A elif pull_request_by_type.get('patch'):%0A highest_semantic_version = 'patch'%0A print('highest_semantic_version: %7B%7D'.format(highest_semantic_version))%0A%0A pprint.p
abc31fd4e6b61cc45dbb606a98e450773951e52d
Use lmots.txt
anav.py
anav.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import string import random from itertools import chain import networkx as nx import pickle import Levenshtein letters = list(string.ascii_lowercase) def out(G): for n in sorted(G) : print(n, G.nodes[n]) def leven(word1,word2): """Calcul de la distance de Levenshtein entre 2 mots""" #word1 = tri(word1) #word2 = tri(word2) return Levenshtein.distance(word1, word2) def tri(s) : """ Renvoit le mot trié pour repérage anagramme """ return "".join(sorted(list(s))) def cree_dico(): """constitue dictionnaire des anagrammes depuis dico Le dictionnaire renvoyé est de la forme : * clé : "mot" constitué des lettres triés * valeur : liste des mots anagrammes Exemple : 'aimnos': ['aimons', 'amnios', 'maison'] """ print('Début lecture') with open("gut.txt") as f: anag = {} for l in f: l = l.strip() tll = tri(l) if tll not in anag: anag[tll] = [l] else: anag[tll].append(l) print('Fin lecture') with open("gut.pickle", "wb") as f: pickle.dump(anag, f) return anag def lis_anag(infile): print('Début lecture') with open(infile, 'rb') as f: anag = pickle.load(f) print('Fin lecture') return anag def mots_from(mot): """renvoie la liste des mots relié à mot""" ch = tri(mot) s = set() for le in letters : s.add(tri(ch + le)) # ajoute for i, l in enumerate(ch) : base = ch[:i] + ch[i+1:] # enlève lettre s.add(base) # enlève lettre for le in letters : s.add(tri(base + le)) # substitue return chain(*(anag.get(ch, []) for ch in s)) def expand(G, curr, cible, atteint=True, explore=False): """Etend le graphe depuis curr Le node curr passe à l'état explore à True On récupère ses voisins et on les ajoute au graphe à l'état explore à False """ dist_curr = leven(cible, curr) G.add_node(curr, explore=True, dist=dist_curr, atteint=atteint) for u in mots_from(curr): if u not in G: dist = leven(cible, u) G.add_node(u, explore=explore, dist=dist, atteint=atteint) else : G.nodes[u]['atteint'] = atteint G.add_edge(curr, u) def analyse(G, fin, opti): """ Analyse du graphe Teste si on a une solution Définit les noeuds à explorer Lance leur exploration """ # Limite recherches # On cherche le min des dist + opti min_dist = None if opti >= 0 : min_dist = min([G.nodes[n]['dist'] for n in G if G.nodes[n]['atteint']]) # les nodes trop lointains sont considérés comme déjà explorés for n in G: if G.nodes[n]['dist'] > min_dist + opti : G.nodes[n]['explore'] = True # constition de la liste des nodes non explorés, donc à explorer nodes = [n for n in G if not G.nodes[n]['explore']] print('Analyse : ', len(nodes), 'nouveaux nodes à explorer - Distance mini :', min_dist) for node in nodes : expand(G, node, fin) def cherche(G, debut, fin, max_loop=20, opti=-1): """ Boucle principale * explore le premier node (debut) * puis lance l'analyse des différents niveaux (maximum max_loop) * et vérifie si on a trouvé une solution """ # on génère un morceau de graphe par la fin expand(G, fin, fin, atteint=False, explore=True) nodes = list(G.nodes()) for n in nodes: expand(G, n, fin, atteint=False, explore=True) # on génère le début du graphe expand(G, debut, fin) flag = False # puis on élargit progressivement l'analyse et la constitution du graphe for level in range(max_loop): analyse(G, fin, opti) # s'il y a un chemin, on sort if nx.has_path(G, debut, fin): flag = True break # indique les différents chemins if flag : print('100 solutions au hasard :') sol = list(nx.all_shortest_paths(G,source=debut,target=fin)) random.shuffle(sol) for i, p in enumerate(sol): if i < 100: print(p) print('Nombre total de solutions :', i+1) else: print("Pas de chemin trouvé") if __name__ == '__main__': # cree_dico() anag = lis_anag("gut.pickle") ###cherche(G, 'toiture', 'abricot', opti=2) #anag = cree_anag("lmots.txt", "lmots.pickle") #cherche(G, 'boite', 'maison', opti=2) G = nx.Graph() # cherche(G, 'stylo', 'zoulou', opti=2) # cherche(G, 'ire', 'hydrotherapique', max_loop=30, opti=4) cherche(G, 'vent', 'moulin', opti=2)
Python
0.000001
@@ -619,21 +619,36 @@ ef cree_ -dico( +anag(infile, outfile ):%0A %22 @@ -954,17 +954,14 @@ pen( -%22gut.txt%22 +infile ) as @@ -1211,28 +1211,23 @@ th open( -%22gut.pick +outfi le -%22 , %22wb%22) @@ -4415,19 +4415,52 @@ # - cree_dico( +anag = cree_anag(%22lmots.txt%22, %22lmots.pickle%22 )%0A @@ -4482,11 +4482,13 @@ ag(%22 -gut +lmots .pic @@ -4497,16 +4497,36 @@ e%22)%0A +G = nx.Graph()%0A # ###cherc @@ -4570,120 +4570,55 @@ # -anag = cree_anag(%22lmots.txt%22, %22lmots.pickle%22)%0A #cherche(G, 'boite', 'maison', opti=2)%0A G = nx.Graph()%0A +# cherche(G, 'pipo', 'squelette', opti=2)%0A # # ch
ab704f6f3cb77fc89faab938a6474c0cbe831815
Fix verdana check
seaborn/tests/test_rcmod.py
seaborn/tests/test_rcmod.py
import numpy as np import matplotlib as mpl from distutils.version import LooseVersion import nose import matplotlib.pyplot as plt import nose.tools as nt import numpy.testing as npt from .. import rcmod class RCParamTester(object): def flatten_list(self, orig_list): iter_list = map(np.atleast_1d, orig_list) flat_list = [item for sublist in iter_list for item in sublist] return flat_list def assert_rc_params(self, params): for k, v in params.items(): if k == "svg.embed_char_paths": # This param causes test issues and is deprecated anyway continue elif isinstance(v, np.ndarray): npt.assert_array_equal(mpl.rcParams[k], v) else: nt.assert_equal((k, mpl.rcParams[k]), (k, v)) class TestAxesStyle(RCParamTester): styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"] def test_default_return(self): current = rcmod.axes_style() self.assert_rc_params(current) def test_key_usage(self): _style_keys = set(rcmod._style_keys) for style in self.styles: nt.assert_true(not set(rcmod.axes_style(style)) ^ _style_keys) def test_bad_style(self): with nt.assert_raises(ValueError): rcmod.axes_style("i_am_not_a_style") def test_rc_override(self): rc = {"axes.facecolor": "blue", "foo.notaparam": "bar"} out = rcmod.axes_style("darkgrid", rc) nt.assert_equal(out["axes.facecolor"], "blue") nt.assert_not_in("foo.notaparam", out) def test_set_style(self): for style in self.styles: style_dict = rcmod.axes_style(style) rcmod.set_style(style) self.assert_rc_params(style_dict) def test_style_context_manager(self): rcmod.set_style("darkgrid") orig_params = rcmod.axes_style() with rcmod.axes_style("whitegrid"): context_params = rcmod.axes_style("whitegrid") self.assert_rc_params(context_params) self.assert_rc_params(orig_params) def test_style_context_independence(self): nt.assert_true(set(rcmod._style_keys) ^ set(rcmod._context_keys)) def test_set_rc(self): rcmod.set(rc={"lines.linewidth": 4}) nt.assert_equal(mpl.rcParams["lines.linewidth"], 4) rcmod.set() def test_reset_defaults(self): # Changes to the rc parameters make this test hard to manage # on older versions of matplotlib, so we'll skip it if LooseVersion(mpl.__version__) < LooseVersion("1.3"): raise nose.SkipTest rcmod.reset_defaults() self.assert_rc_params(mpl.rcParamsDefault) rcmod.set() def test_reset_orig(self): # Changes to the rc parameters make this test hard to manage # on older versions of matplotlib, so we'll skip it if LooseVersion(mpl.__version__) < LooseVersion("1.3"): raise nose.SkipTest rcmod.reset_orig() self.assert_rc_params(mpl.rcParamsOrig) rcmod.set() class TestPlottingContext(RCParamTester): contexts = ["paper", "notebook", "talk", "poster"] def test_default_return(self): current = rcmod.plotting_context() self.assert_rc_params(current) def test_key_usage(self): _context_keys = set(rcmod._context_keys) for context in self.contexts: missing = set(rcmod.plotting_context(context)) ^ _context_keys nt.assert_true(not missing) def test_bad_context(self): with nt.assert_raises(ValueError): rcmod.plotting_context("i_am_not_a_context") def test_font_scale(self): notebook_ref = rcmod.plotting_context("notebook") notebook_big = rcmod.plotting_context("notebook", 2) font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize", "xtick.labelsize", "ytick.labelsize"] for k in font_keys: nt.assert_equal(notebook_ref[k] * 2, notebook_big[k]) def test_rc_override(self): key, val = "grid.linewidth", 5 rc = {key: val, "foo": "bar"} out = rcmod.plotting_context("talk", rc=rc) nt.assert_equal(out[key], val) nt.assert_not_in("foo", out) def test_set_context(self): for context in self.contexts: context_dict = rcmod.plotting_context(context) rcmod.set_context(context) self.assert_rc_params(context_dict) def test_context_context_manager(self): rcmod.set_context("notebook") orig_params = rcmod.plotting_context() with rcmod.plotting_context("paper"): context_params = rcmod.plotting_context("paper") self.assert_rc_params(context_params) self.assert_rc_params(orig_params) class TestFonts(object): def test_set_font(self): rcmod.set(font="Verdana") _, ax = plt.subplots() ax.set_xlabel("foo") try: nt.assert_equal(ax.xaxis.label.get_fontname(), "Verdana") except AssertionError: if has_verdana: raise else: raise nose.SkipTest("Verdana font is not present") finally: rcmod.set() plt.close("all") def test_set_serif_font(self): rcmod.set(font="serif") _, ax = plt.subplots() ax.set_xlabel("foo") nt.assert_in(ax.xaxis.label.get_fontname(), mpl.rcParams["font.serif"]) rcmod.set() plt.close("all") def test_different_sans_serif(self): if LooseVersion(mpl.__version__) < LooseVersion("1.4"): raise nose.SkipTest rcmod.set() rcmod.set_style(rc={"font.sans-serif": ["Verdana"]}) _, ax = plt.subplots() ax.set_xlabel("foo") try: nt.assert_equal(ax.xaxis.label.get_fontname(), "Verdana") except AssertionError: if has_verdana: raise else: raise nose.SkipTest("Verdana font is not present") finally: rcmod.set() plt.close("all") def has_verdana(): """Helper to verify if Verdana font is present""" # This import is relatively lengthy, so to prevent its import for # testing other tests in this module not requiring this knowledge, # import font_manager here import matplotlib.font_manager as mplfm try: verdana_font = mplfm.findfont('Verdana', fallback_to_default=False) except: # if https://github.com/matplotlib/matplotlib/pull/3435 # gets accepted return False # otherwise check if not matching the logic for a 'default' one try: unlikely_font = mplfm.findfont("very_unlikely_to_exist1234", fallback_to_default=False) except: # if matched verdana but not unlikely, Verdana must exist return True # otherwise -- if they match, must be the same default return verdana_font != unlikely_font
Python
0
@@ -5177,32 +5177,34 @@ if has_verdana +() :%0A @@ -6123,16 +6123,18 @@ _verdana +() :%0A
4020c3dea19ac7624ec15b076224845aa79a2c2c
fix last commit (missing import)
protocols/ircs2s_common.py
protocols/ircs2s_common.py
""" ircs2s_common.py: Common base protocol class with functions shared by TS6 and P10-based protocols. """ from pylinkirc.classes import Protocol from pylinkirc.log import log class IRCS2SProtocol(Protocol): def checkCollision(self, nick): """ Nick collision checker. """ uid = self.irc.nickToUid(nick) # If there is a nick collision, we simply alert plugins. Relay will purposely try to # lose fights and tag nicks instead, while other plugins can choose how to handle this. if uid: log.info('(%s) Nick collision on %s/%s, forwarding this to plugins', self.irc.name, uid, nick) self.irc.callHooks([self.irc.sid, 'SAVE', {'target': uid}]) def handle_kill(self, source, command, args): """Handles incoming KILLs.""" killed = args[0] # Depending on whether the IRCd sends explicit QUIT messages for # killed clients, the user may or may not have automatically been # removed from our user list. # If not, we have to assume that KILL = QUIT and remove them # ourselves. data = self.irc.users.get(killed) if data: self.removeClient(killed) # TS6-style kills look something like this: # <- :GL KILL 38QAAAAAA :hidden-1C620195!GL (test) # What we actually want is to format a pretty kill message, in the form # "Killed (killername (reason))". try: # Get the nick or server name of the caller. killer = self.irc.getFriendlyName(source) except KeyError: # Killer was... neither? We must have aliens or something. Fallback # to the given "UID". killer = source # Get the reason, which is enclosed in brackets. reason = ' '.join(args[1].split(" ")[1:]) killmsg = "Killed (%s %s)" % (killer, reason) return {'target': killed, 'text': killmsg, 'userdata': data} def handle_squit(self, numeric, command, args): """Handles incoming SQUITs.""" return self._squit(numeric, command, args) def handle_away(self, numeric, command, args): """Handles incoming AWAY messages.""" # TS6: # <- :6ELAAAAAB AWAY :Auto-away # P10: # <- ABAAA A :blah # <- ABAAA A try: self.irc.users[numeric].away = text = args[0] except IndexError: # User is unsetting away status self.irc.users[numeric].away = text = '' return {'text': text} def handle_version(self, numeric, command, args): """Handles requests for the PyLink server version.""" return {} # See coremods/handlers.py for how this hook is used def handle_whois(self, numeric, command, args): """Handles incoming WHOIS commands..""" # TS6: # <- :42XAAAAAB WHOIS 5PYAAAAAA :pylink-devel # P10: # <- ABAAA W Ay :PyLink-devel # First argument is the server that should reply to the WHOIS request # or the server hosting the UID given. We can safely assume that any # WHOIS commands received are for us, since we don't host any real servers # to route it to. return {'target': self._getUid(args[-1])} def handle_quit(self, numeric, command, args): """Handles incoming QUIT commands.""" # TS6: # <- :1SRAAGB4T QUIT :Quit: quit message goes here # P10: # <- ABAAB Q :Killed (GL_ (bangbang)) self.removeClient(numeric) return {'text': args[0]} def handle_time(self, numeric, command, args): """Handles incoming /TIME requests.""" return {'target': args[0]} def handle_pong(self, source, command, args): """Handles incoming PONG commands.""" if source == self.irc.uplink: self.irc.lastping = time.time()
Python
0
@@ -101,16 +101,29 @@ s.%0A%22%22%22%0A%0A +import time%0A%0A from pyl
8284279a5e92679976637bc9f966f4b776636e82
Use copies of net to avoid cross-talk between tests.
Testing/test_InitialConditions.py
Testing/test_InitialConditions.py
import copy import unittest import scipy from TestNetwork import net net = copy.deepcopy(net) net.compile() net.setInitialVariableValue('A', 1.0) net.setInitialVariableValue('B', 2.0) class test_ics(unittest.TestCase): def test_default_initial_conditions(self): """Test that default ICs are handled correctly""" traj = net.integrate(scipy.linspace(0, 5, 5)) ICx = traj.getVariableTrajectory('x')[0] ICy = traj.getVariableTrajectory('y')[0] self.assertAlmostEqual(ICx, 1.0, 6, 'Failed on default IC') self.assertAlmostEqual(ICy, 2.0, 6, 'Failed on default IC') def test_resetting_initial_conditions(self): """Test resetting of ICs""" net.set_initial_var_value('x', 0.5) traj = net.integrate(scipy.linspace(0, 5, 5)) ICx = traj.getVariableTrajectory('x')[0] self.assertAlmostEqual(ICx, 0.5, 6, 'Failed on resetting IC') def test_parameter_ics(self): """Test parameters as ICs""" net.set_initial_var_value('x', 'A') traj = net.integrate(scipy.linspace(0, 5, 5)) ICx = traj.getVariableTrajectory('x')[0] self.assertAlmostEqual(ICx, 1.0, 6, 'Failed on parameter IC') def test_resetting_parameter(self): """Test changing parameters as ICs""" net.set_initial_var_value('x', 'A') net.set_initial_var_value('A', 0.9) traj = net.integrate(scipy.linspace(0, 5, 5)) ICx = traj.getVariableTrajectory('x')[0] self.assertAlmostEqual(ICx, 0.9, 6, 'Failed on changing parameter IC') def test_expression_ICs(self): """Test math expression as IC""" net.set_initial_var_value('x', 'A + 1.5*B') traj = net.integrate(scipy.linspace(0, 5, 5)) ICx = traj.getVariableTrajectory('x')[0] self.assertAlmostEqual(ICx, 4.0, 6, 'Failed on changing parameter IC') suite = unittest.makeSuite(test_ics) if __name__ == '__main__': unittest.main()
Python
0
@@ -316,24 +316,54 @@ orrectly%22%22%22%0A + test_net = net.copy()%0A traj @@ -357,32 +357,37 @@ %0A traj = +test_ net.integrate(sc @@ -729,32 +729,67 @@ ICs%22%22%22%0A +test_net = net.copy()%0A test_ net.set_initial_ @@ -815,32 +815,37 @@ %0A traj = +test_ net.integrate(sc @@ -1058,32 +1058,67 @@ ICs%22%22%22%0A +test_net = net.copy()%0A test_ net.set_initial_ @@ -1144,32 +1144,37 @@ %0A traj = +test_ net.integrate(sc @@ -1402,32 +1402,67 @@ ICs%22%22%22%0A +test_net = net.copy()%0A test_ net.set_initial_ @@ -1481,32 +1481,37 @@ ', 'A')%0A +test_ net.set_initial_ @@ -1537,32 +1537,37 @@ %0A traj = +test_ net.integrate(sc @@ -1802,16 +1802,51 @@ +test_net = net.copy()%0A test_ net.set_ @@ -1896,16 +1896,21 @@ traj = +test_ net.inte
b989bc03278cf1049656f7a91bd95806b6a63580
Use lockfile to ensure just one process
bdbc.py
bdbc.py
""" Listen to windows focus changes, and close Dropbox Preference window when opened. """ def only_one(): from tendo import singleton me = singleton.SingleInstance() # will sys.exit(-1) if other instance is running def kill_window(): from pywinauto import Application, findwindows try: for handle in findwindows.find_windows(title_re="(Dropbox Preferences|Preferencias de Dropbox)"): app = Application.connect(Application(), handle=handle) for window in app.windows_(title_re="(Dropbox Preferences|Preferencias de Dropbox)"): window.Close() except findwindows.WindowNotFoundError: pass def foreground_window_hook(): import sys import time import ctypes import ctypes.wintypes EVENT_SYSTEM_FOREGROUND = 0x0003 WINEVENT_OUTOFCONTEXT = 0x0000 user32 = ctypes.windll.user32 ole32 = ctypes.windll.ole32 ole32.CoInitialize(0) WinEventProcType = ctypes.WINFUNCTYPE( None, ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.HWND, ctypes.wintypes.LONG, ctypes.wintypes.LONG, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD ) def callback(hWinEventHook, event, hwnd, idObject, idChild, dwEventThread, dwmsEventTime): length = user32.GetWindowTextLengthA(hwnd) buff = ctypes.create_string_buffer(length + 1) user32.GetWindowTextA(hwnd, buff, length + 1) if b"Dropbox" in buff.value: kill_window() WinEventProc = WinEventProcType(callback) user32.SetWinEventHook.restype = ctypes.wintypes.HANDLE hook = user32.SetWinEventHook( EVENT_SYSTEM_FOREGROUND, EVENT_SYSTEM_FOREGROUND, 0, WinEventProc, 0, 0, WINEVENT_OUTOFCONTEXT ) if hook == 0: print('SetWinEventHook failed') sys.exit(1) msg = ctypes.wintypes.MSG() while user32.GetMessageW(ctypes.byref(msg), 0, 0, 0) != 0: user32.TranslateMessageW(msg) user32.DispatchMessageW(msg) user32.UnhookWinEvent(hook) ole32.CoUninitialize() only_one() foreground_window_hook()
Python
0
@@ -87,212 +87,414 @@ %22%22%22%0A -%0Adef only_one():%0A from tendo import singleton%0A me = singleton.SingleInstance() # will sys.exit(-1) if other instance is running%0A%0Adef kill_window():%0A from pywinauto import Application, find +import logging%0Aimport ctypes%0Afrom sys import exit%0Afrom pywinauto import Application, findwindows%0Afrom fasteners import InterProcessLock%0Afrom tempfile import gettempdir%0A%0Alogger = logging.getLogger()%0Ach = logging.FileHandler(gettempdir() + '/bdbc.log')%0Aformatter = logging.Formatter('%25(asctime)s - %25(name)s - %25(levelname)s - %25(message)s')%0Ach.setFormatter(formatter)%0Alogger.addHandler(ch)%0A%0A%0Adef kill_ window -s +(): %0A @@ -791,21 +791,171 @@ -window.Close( +logger.warning('Configuration window for Dropbox detected')%0A window.Close()%0A logger.warning('Configuration window for Dropbox closed' )%0A @@ -1043,84 +1043,8 @@ k(): -%0A import sys%0A import time%0A import ctypes%0A import ctypes.wintypes %0A%0A @@ -1778,16 +1778,67 @@ .value:%0A + logger.info('Dropbox window detected')%0A @@ -2185,52 +2185,8 @@ -print('SetWinEventHook failed')%0A sys. exit @@ -2425,40 +2425,237 @@ ()%0A%0A -only_one()%0Aforeground_window_hook( +logger.info('Acquiring lock')%0Alock = InterProcessLock(gettempdir() + '/bdbc_lock_file')%0Agotten = lock.acquire(timeout=10)%0A%0Aif gotten:%0A logger.info('Lock acquired')%0A foreground_window_hook()%0Aelse:%0A logger.info('Lock failed' )%0A
50ac5ff1d350360d8c9227f6a05e718ca056dd98
Allow to print non-string messages (#3)
blaz.py
blaz.py
from distutils.spawn import find_executable from os import environ, chdir, getenv from os.path import abspath, basename, dirname, join as join_dir from subprocess import check_call, CalledProcessError from sys import argv from colors import bold from hashlib import md5 from version import __version__ import semantic_version import sys try: from subprocess import DEVNULL # py3k except ImportError: import os DEVNULL = open(os.devnull, 'wb') class Blaz(object): def __init__(self, **kwargs): self.__dict__ = kwargs self.file = abspath(argv[0]) self.script = basename(self.file) self.argv = ' '.join(argv[1:]) self.__dict__.update({ 'dir': dirname(self.file), 'image': getenv('BLAZ_IMAGE', 'amiorin/alpine-blaz'), 'docker_exe': self._find_docker_exe(), 'docker_sock': getenv('DOCKER_SOCK', '/var/run/docker.sock'), 'docker_options': getenv('DOCKER_OPTIONS', '--rm --privileged --net=host'), 'version': __version__ }) chdir(self.dir) if 'BLAZ_CHDIR_REL' in environ: self.mount_dir = abspath(join_dir(self.dir, environ['BLAZ_CHDIR_REL'])) else: self.mount_dir = self.dir self._create_lock() def _find_latest_docker_image(self): image = self.image while True: prev = image image = image.format(self) if prev == image: break self.image = self._do_find_latest_docker_image(image) def _do_find_latest_docker_image(self, image): next_image = self._next_docker_image_version(image) try: check_call(['docker', 'pull', next_image], stdout=DEVNULL, stderr=DEVNULL) except CalledProcessError: return image else: return self._do_find_latest_docker_image(next_image) def _next_docker_image_version(self, image): xs = image.split(':') assert len(xs) == 2, "Your docker image name ({}) doesn't contain the tag".format(image) xs[-1] = str(semantic_version.Version(xs[-1]).next_patch()) return ':'.join(xs) def _find_docker_exe(self): if 'DOCKER_EXE' not in environ: return find_executable('docker') else: return environ['DOCKER_EXE'] def _create_lock(self): m = md5() m.update(bytearray('{0.dir}/{0.script} {0.argv}'.format(self), 'utf-8')) self.lock = m.hexdigest() def before(self): return not self._fresh() def after(self): return self._fresh() def _fresh(self): if 'BLAZ_LOCK' in environ: return environ['BLAZ_LOCK'] == self.lock else: return False def invoke(self, main): if self._fresh() or 'BLAZ_SKIP' in environ: if 'BLAZ_SKIP' in environ: del environ['BLAZ_SKIP'] if 'DOCKER_IMMUTABLE' in environ: del environ['DOCKER_IMMUTABLE'] main(self) else: if 'DOCKER_IMMUTABLE' not in environ: if 'BLAZ_DONT_PULL' not in environ: check_call(['docker', 'pull', "{0.image}".format(self)], stdout=DEVNULL, stderr=DEVNULL) else: self._find_latest_docker_image() self._docker_run() def cd(self, subdir="."): chdir(join_dir(self.mount_dir, subdir)) def log(self, msg='', fg='yellow'): sys.stdout.flush() sys.stderr.write(bold(msg + '\n', fg=fg)) sys.stderr.flush() def run(self, cmd, fg='green'): while True: prev = cmd cmd = cmd.format(self) if prev == cmd: break self.log(cmd, fg=fg) check_call(cmd, shell=True) sys.stdout.flush() sys.stderr.flush() def _forward_blaz_env_vars(self): result = [] for k in environ.keys(): if k.find('BLAZ_') == 0 and k != 'BLAZ_LOCK' and k != 'BLAZ_VERSION' and k != 'BLAZ_CHDIR_REL' and k != 'BLAZ_SKIP': result.append(''' --env={}={} '''.format(k, environ[k])) elif k.find('_BLAZ_') == 0: result.append(''' --env={0}=${0} '''.format(k)) return ''.join(result) def _docker_run(self): cmd = ''' {0.docker_exe} run {0.docker_options} ''' cmd = cmd + self._forward_blaz_env_vars() cmd = cmd + ''' --env=DOCKER_EXE={0.docker_exe} --env=DOCKER_SOCK={0.docker_sock} --env=BLAZ_LOCK={0.lock} --env=BLAZ_VERSION={0.version} --volume={0.mount_dir}:{0.mount_dir} --volume={0.docker_exe}:{0.docker_exe} --volume={0.docker_sock}:{0.docker_sock} {0.image} {0.dir}/{0.script} {0.argv} ''' cmd = '\n '.join([x.strip() + ' \\' for x in cmd.split('\n') if x.strip() is not ''])[:-2] self.run(cmd, fg='blue')
Python
0.000029
@@ -3539,19 +3539,24 @@ te(bold( +str( msg +) + '%5Cn',
906059dc50e3c850da9da61e3eedb7bc73b777b4
remove pager while no blog listed.
blog.py
blog.py
''' Created on 2013-12-12 @author: zhangzhi ''' from google.appengine.api import users, memcache from google.appengine.ext import ndb import webapp2 import jinja2 import gettext import os, logging from model import * from base import BaseRequestHandler #START: BlogsListPage class BlogsList(BaseRequestHandler): def get(self): #6 post per page as default page=self.request.get('page') page=(int(page) if page else 1)#to int 1~& size=Article.query(Article.draft==False).count()#0~& max=(size/6)+(0 if size%6==0 else 1)#1~& articles = Article.query(Article.draft==False).order(-Article.date).fetch(6, offset=int(page-1)*6) older = (None if page==max else page+1) newer = (None if page==1 else page-1) template_values = { 'page_title': 'Blog', 'blog_active': 'active', 'articles': articles, 'older':older, 'newer':newer, } template_values.update(BaseRequestHandler.base_values) template = self.get_env.get_template('bloglist.html') self.response.write(template.render(template_values)) #END: BlogsListPage #START: SingleBlogPage class SingleBlog(BaseRequestHandler): def get(self, archive=None, postid=None): #logging.info(os.environ['PATH_INFO'])#request path article = Article.query(Article.archive==archive, Article.postid==postid).fetch() if not article: return self.error(404) article[0].read += 1 article[0].put() comments = Comment.query(Comment.entry==article[0].key).order(-Comment.date) logging.info(comments) template_values = { 'page_title': 'Blog', 'blog_active': 'active', 'article': article, 'comments': comments, } template_values.update(BaseRequestHandler.base_values) template = self.get_env.get_template('singleblog.html') self.response.write(template.render(template_values)) #END: SingleBlogPage #START: BlogsListPage class BlogsTags(BaseRequestHandler): def get(self, link=None): #6 post per page as default page=self.request.get('page') page=(int(page) if page else 1)#to int 1~& size=Article.query(Article.draft==False, Article.tags.IN([link])).count()#0~& max=(size/6)+(0 if size%6==0 else 1)#1~& articles = Article.query(Article.draft==False, Article.tags.IN([link])).order(-Article.date).fetch(6, offset=int(page-1)*6) older = (None if page==max else page+1) newer = (None if page==1 else page-1) template_values = { 'page_title': 'Blog', 'blog_active': 'active', 'articles': articles, 'older':older, 'newer':newer, } template_values.update(BaseRequestHandler.base_values) template = self.get_env.get_template('bloglist.html') self.response.write(template.render(template_values)) #END: BlogsListPage #START: BlogCommentPage class BlogComment(BaseRequestHandler): def get(self): logging.info('blog comment get arrived') def post(self): logging.info('blog comment post arrived') cmtext=self.request.get('cmtext') keyid=self.request.get('keyid') ipaddr=self.request.remote_addr author=self.request.get('name') email=self.request.get('email') #if author and email not None, cookie it. comment=Comment(entry=ndb.Key('Article',int(keyid)),content=cmtext,ipaddr=ipaddr,author=author,email=email) comment.put() self.response.write('<h3>%s<small>%s</small></h3><p>%s</p>'%(comment.author,comment.date,cmtext)) #END: BlogCommentPage # START: Frame app = webapp2.WSGIApplication([('/blog', BlogsList), ('/blog/(?P<archive>\d{6})/(?P<postid>\d{6})', SingleBlog), ('/blog/tag/(?P<link>\w+)', BlogsTags), ('/blog/comment', BlogComment) ], debug=True) # END: Frame
Python
0
@@ -665,33 +665,57 @@ =int(page-1)*6)%0A -%0A + if articles:%0A older = @@ -738,32 +738,36 @@ ax else page+1)%0A + newer =
c8ba22ce19656c48c7e034adff884d499af14c44
remove debug print
monolithe/generators/sdkdoc/lib/sdkdocwriter.py
monolithe/generators/sdkdoc/lib/sdkdocwriter.py
# -*- coding: utf-8 -*- import os import shutil import importlib import inspect import json from monolithe.lib import Printer, SDKUtils, TaskManager from monolithe.generators.lib import TemplateFileWriter class SDKDocWriter(object): """ Writer of the Python SDK Documentation """ def __init__(self, monolithe_config): """ """ self.writer = None self.monolithe_config = monolithe_config self._sdk_name = self.monolithe_config.get_option("sdk_name", "sdk") self._sdk_output = self.monolithe_config.get_option("sdk_output", "sdk") def _parse_module(self, module): """ """ classes = [] for module_info in inspect.getmembers(module): if not inspect.isclass(module_info[1]): continue inspected_class = module_info[1] inspected_class_name = module_info[0] if inspected_class_name in ("NullHandler"): continue info = {"class_name": inspected_class_name, "constant_names": [], "property_names": [], "inherited_property_names": [], "method_names": [], "inherited_method_names": [], "class_method_names": []} for class_info in inspect.getmembers(inspected_class): inspected_object = class_info[1] inspected_object_name = class_info[0] if inspected_object_name.startswith("_"): continue if inspect.isbuiltin(inspected_object): continue if inspect.ismethod(inspected_object): if inspected_object_name in inspected_class.__dict__: info["method_names"].append(inspected_object_name) else: info["inherited_method_names"].append(inspected_object_name) elif inspect.isdatadescriptor(inspected_object): if inspected_object in inspected_class.__dict__.values(): info["property_names"].append(inspected_object_name) else: info["inherited_property_names"].append(inspected_object_name) elif inspected_object_name.startswith("CONST_"): info["constant_names"].append(inspected_object_name) from pprint import pprint pprint(info) classes.append(info) return classes def write(self): """ """ task_manager = TaskManager() self.writer = SDKDocFileWriter(self.monolithe_config) self.writer.write_index() self.writer.write_conf() self.writer.write_general_concepts() # bambou bambou_module = importlib.import_module("bambou") bambou_classes = self._parse_module(bambou_module) self.writer.write_bambou_reference() for bambou_class in bambou_classes: task_manager.start_task(self._write_class_references, bambou_class, "bambou", "bambou", "bambou") # sdk generated_sdk_path = "%s/%s" % (self._sdk_output, self._sdk_name) for folder in os.listdir(generated_sdk_path): if not os.path.isdir("%s/%s" % (generated_sdk_path, folder)): continue version = SDKUtils.get_float_version(folder) self.writer.write_sdk_version_reference(version) # sdk model sdk_model_module_name = "%s.%s" % (self._sdk_name, folder) sdk_model_module = importlib.import_module(sdk_model_module_name) sdk_model_classes = self._parse_module(sdk_model_module) for sdk_model_class in sdk_model_classes: task_manager.start_task(self._write_class_references, sdk_model_class, sdk_model_module_name, "models", "%s/%s" % (self._sdk_name, version)) # sdk fetchers sdk_fetcher_module_name = "%s.%s.fetchers" % (self._sdk_name, folder) sdk_fetcher_module = importlib.import_module(sdk_fetcher_module_name) sdk_fetcher_classes = self._parse_module(sdk_fetcher_module) for sdk_fetcher_class in sdk_fetcher_classes: task_manager.start_task(self._write_class_references, sdk_fetcher_class, sdk_fetcher_module_name, "fetchers", "%s/%s" % (self._sdk_name, version)) task_manager.wait_until_exit() def _write_class_references(self, class_info, module_name, file_prefix, folder): """ """ self.writer.write_class_reference(class_info, module_name, file_prefix, folder) class SDKDocFileWriter(TemplateFileWriter): """ """ def __init__(self, monolithe_config): """ """ super(SDKDocFileWriter, self).__init__(package="monolithe.generators.sdkdoc") self.monolithe_config = monolithe_config self._sdk_name = self.monolithe_config.get_option("sdk_name", "sdk") self._product_name = self.monolithe_config.get_option("product_name") self._copyright = self.monolithe_config.get_option("copyright") self.output_directory = self.monolithe_config.get_option("sdkdoc_tmp_path", "sdkdoc") def write_conf(self): """ """ self.write( destination=self.output_directory, filename="conf.py", template_name="conf.py.tpl", sdk_name=self._sdk_name, copyright=self._copyright) def write_index(self): """ """ with open("%s/pages.json" % self.output_directory) as f: pages_info = json.loads(f.read()) self.write( destination=self.output_directory, filename="index.rst", template_name="index.rst.tpl", sdk_name=self._sdk_name, product_name=self._product_name, pages_info=pages_info) def write_general_concepts(self): """ """ self.write( destination=self.output_directory, filename="general_concepts.rst", template_name="general_concepts.rst.tpl", sdk_name=self._sdk_name, product_name=self._product_name) def write_bambou_reference(self): """ """ self.write( destination=self.output_directory, filename="bambou_reference.rst", template_name="bambou_reference.rst.tpl", sdk_name=self._sdk_name) def write_sdk_version_reference(self, version): """ """ filename = "%s_%s_reference.rst" % (self._sdk_name, version) self.write( destination=self.output_directory, filename=filename, template_name="sdk_reference.rst.tpl", sdk_name=self._sdk_name, version=version) def write_class_reference(self, class_info, module_name, file_prefix, folder): """ """ destination = "%s/%s" % (self.output_directory, folder) if not os.path.exists(destination): try: os.makedirs(destination) except: pass filename = "%s.%s.rst" % (file_prefix, class_info["class_name"].lower()) self.write( destination=destination, filename=filename, template_name="class_reference.rst.tpl", module_name=module_name, class_name=class_info["class_name"], property_names=class_info["property_names"], inherited_property_names=class_info["inherited_property_names"], constant_names=class_info["constant_names"], class_method_names=class_info["class_method_names"], method_names=class_info["method_names"], inherited_method_names=class_info["inherited_method_names"])
Python
0.000008
@@ -2332,71 +2332,8 @@ e)%0A%0A - from pprint import pprint%0A pprint(info)%0A
c5f9f63a1657fbae97777cbf241df0ef9adbc1d4
Fix info messages
motor_interface/scripts/motor_interface_node.py
motor_interface/scripts/motor_interface_node.py
#!/usr/bin/env python import math import Adafruit_PCA9685 import numpy import rospy from vortex_msgs.msg import Float64ArrayStamped from motor_interface.srv import ThrustersEnable, ThrustersEnableResponse class MotorInterface(object): def __init__(self): rospy.init_node('motor_interface', anonymous=False) self.pub = rospy.Publisher('debug/thruster_pwm', Float64ArrayStamped, queue_size=10) self.sub = rospy.Subscriber('thruster_forces', Float64ArrayStamped, self.callback) self.srv = rospy.Service('/motor_interface/thrusters_enable', ThrustersEnable, self.handle_thrusters_enable) self.PWM_BITS_PER_PERIOD = 4096.0 # 12 bit PWM self.FREQUENCY = 249 # Max 400 Hz self.FREQUENCY_MEASURED = 251.2 # Use this for better precision self.PERIOD_LENGTH_IN_MICROSECONDS = 1000000.0/self.FREQUENCY_MEASURED self.THRUST_RANGE_LIMIT = 100 self.ENABLE_RATE_LIMITER = False self.lookup_thrust = rospy.get_param('/thrusters/characteristics/thrust') self.lookup_pulse_width = rospy.get_param('/thrusters/characteristics/pulse_width') self.num_thrusters = rospy.get_param('/propulsion/thrusters/num') self.max_rate = rospy.get_param('/thrusters/rate_of_change/max') self.motors_connected = rospy.get_param('/motor_interface/motors_connected') self.rate_limiting_enabled = rospy.get_param('/motor_interface/rate_limiting_enabled') self.prev_time = rospy.get_rostime() self.is_initialized = False # The setpoint is the desired value (input) self.thrust_setpoint = numpy.zeros(self.num_thrusters) # The reference is the output value (rate limited) self.thrust_reference = numpy.zeros(self.num_thrusters) self.motors_enabled = True # Initialize the PCA9685 using the default address (0x40) if self.motors_connected: self.pca9685 = Adafruit_PCA9685.PCA9685() self.pca9685.set_pwm_freq(self.FREQUENCY) self.output_to_zero() rospy.loginfo("Launching node %s at %d Hz", rospy.get_name(), self.FREQUENCY) def output_to_zero(self): neutral_pulse_width = self.microsecs_to_bits(self.thrust_to_microsecs(0)) if self.motors_connected and self.motors_enabled: for i in range(self.num_thrusters): self.pca9685.set_pwm(i, 0, neutral_pulse_width) def callback(self, msg): if not self.healthy_message(msg): return if not self.is_initialized: self.prev_time = msg.header.stamp self.is_initialized = True rospy.loginfo('Initialized %s', rospy.get_name()) return curr_time = msg.header.stamp dt = (curr_time - self.prev_time).to_sec() if (dt <= 0) and self.rate_limiting_enabled: rospy.logwarn_throttle(1, '%s: Zero time difference between messages, ignoring...' % rospy.get_name()) return self.prev_time = curr_time thrust_setpoint_list = msg.data self.thrust_setpoint = thrust_setpoint_list self.update_reference(dt) self.set_pwm() def handle_thrusters_enable (self, req): if req.thrusters_enable: rospy.loginfo('%s: Enabling thrusters', rospy.get_name()) self.motors_enabled = True else: rospy.loginfo('%s: Disabling thrusters', rospy.get_name()) self.output_to_zero() self.motors_enabled = False return ThrustersEnableResponse() def thrust_to_microsecs(self, thrust): return numpy.interp(thrust, self.lookup_thrust, self.lookup_pulse_width) def microsecs_to_bits(self, microsecs): duty_cycle_normalized = microsecs / self.PERIOD_LENGTH_IN_MICROSECONDS return int(round(self.PWM_BITS_PER_PERIOD * duty_cycle_normalized)) def update_reference(self, dt): if self.rate_limiting_enabled: rate_of_change = (self.thrust_setpoint - self.thrust_reference)/dt for i in range(self.num_thrusters): if rate_of_change[i] > self.max_rate: self.thrust_reference[i] += dt * self.max_rate elif rate_of_change[i] < -self.max_rate: self.thrust_reference[i] -= dt * self.max_rate else: self.thrust_reference[i] = self.thrust_setpoint[i] else: self.thrust_reference = self.thrust_setpoint def set_pwm(self): microsecs = [None]*self.num_thrusters for i in range(self.num_thrusters): microsecs[i] = self.thrust_to_microsecs(self.thrust_reference[i]) pwm_bits = self.microsecs_to_bits(microsecs[i]) if self.motors_connected and self.motors_enabled: self.pca9685.set_pwm(i, 0, pwm_bits) # Publish outputs for debug debug_msg = Float64ArrayStamped() debug_msg.header.stamp = rospy.get_rostime() debug_msg.data = microsecs self.pub.publish(debug_msg) def healthy_message(self, msg): if (len(msg.data) != self.num_thrusters): rospy.logwarn_throttle(1, '%s: Wrong number of thrusters, ignoring...' % rospy.get_name()) return False for t in msg.data: if math.isnan(t) or math.isinf(t) or (abs(t) > self.THRUST_RANGE_LIMIT): rospy.logwarn_throttle(1, '%s: Message out of range, ignoring...' % rospy.get_name()) return False return True if __name__ == '__main__': try: motor_interface = MotorInterface() rospy.spin() except rospy.ROSInterruptException: pass
Python
0.000001
@@ -2144,16 +2144,20 @@ oginfo(%22 +%25s: Launchin @@ -2162,16 +2162,8 @@ ing -node %25s at %25 @@ -2726,17 +2726,34 @@ oginfo(' -I +%25s: Successfully i nitializ @@ -2758,11 +2758,8 @@ ized - %25s ', r
d246e36c824931613229a6467e21d65d392a2fa3
Revert commit "[FIX] sale_margin: purchase price calculated using the currency of the price type" Reasons: - the currency conversion is done assuming that the cost price currency is the company currency - we support only one price type per field. Defining several price types on the same field using different price types is not supported.
addons/sale_margin/sale_margin.py
addons/sale_margin/sale_margin.py
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class sale_order_line(osv.osv): _inherit = "sale.order.line" def product_id_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False, qty_uos=0, uos=False, name='', partner_id=False, lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None): res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id, lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context) if not pricelist: return res if context is None: context = {} frm_cur = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id to_cur = self.pool.get('product.pricelist').browse(cr, uid, [pricelist])[0].currency_id.id if product: product = self.pool['product.product'].browse(cr, uid, product, context=context) purchase_price = product.price_get(ptype='standard_price', context=dict(context, currency_id=to_cur))[product.id] to_uom = res.get('product_uom', uom) if to_uom != product.uom_id.id: purchase_price = self.pool['product.uom']._compute_price(cr, uid, product.uom_id.id, purchase_price, to_uom) ctx = context.copy() ctx['date'] = date_order price = self.pool.get('res.currency').compute(cr, uid, frm_cur, to_cur, purchase_price, round=False, context=ctx) res['value'].update({'purchase_price': price}) return res def _product_margin(self, cr, uid, ids, field_name, arg, context=None): res = {} for line in self.browse(cr, uid, ids, context=context): res[line.id] = 0 if line.product_id: res[line.id] = round(line.price_subtotal - ((line.purchase_price or line.product_id.standard_price) * line.product_uos_qty), 2) return res _columns = { 'margin': fields.function(_product_margin, string='Margin', store = True), 'purchase_price': fields.float('Cost Price', digits=(16,2)) } sale_order_line() class sale_order(osv.osv): _inherit = "sale.order" def _product_margin(self, cr, uid, ids, field_name, arg, context=None): result = {} for sale in self.browse(cr, uid, ids, context=context): result[sale.id] = 0.0 for line in sale.order_line: result[sale.id] += line.margin or 0.0 return result def _get_order(self, cr, uid, ids, context=None): result = {} for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context): result[line.order_id.id] = True return result.keys() _columns = { 'margin': fields.function(_product_margin, string='Margin', help="It gives profitability by calculating the difference between the Unit Price and the cost price.", store={ 'sale.order.line': (_get_order, ['margin'], 20), 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 20), }), } sale_order() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -2105,96 +2105,22 @@ uct. -price_get(ptype='standard_price', context=dict(context, currency_id=to_cur))%5Bproduct.id%5D +standard_price %0A
a40f87d700d37590a44d67b3044bd06cea52b495
Delete snapshots once gif is rendered
gifbox/core/management/commands/render_gifs.py
gifbox/core/management/commands/render_gifs.py
import os import subprocess import requests import shutil import tempfile from datetime import timedelta from django.utils import timezone from django.core.management.base import BaseCommand, CommandError from core.models import Image, AnimatedGif class Command(BaseCommand): help = """ Generates animated gifs If it has been 3 hours since the last 3 hourly gif was made it generates a new one If it has been a day since the last daily gif it will generate another one """ GIF_PROFILES = ( { 'period': AnimatedGif.PERIOD_3_HOURLY, 'created__gte': timezone.now() - timedelta(hours=3), 'output_file_path': '/tmp/{}.gif'.format(AnimatedGif.PERIOD_3_HOURLY), }, # { # 'period': AnimatedGif.PERIOD_DAILY, # 'created__gte': timezone.now() - timedelta(days=1), # 'output_file_path': '/tmp/{}.gif'.format(AnimatedGif.PERIOD_DAILY), # }, ) GIF_VERSIONS = ( { 'name': 'full size', 'thumb_size': None, }, { 'name': 'preview', 'thumb_size': '200x200', }, ) def make_gif(self, snapshot_image_urls, output_file_path, delay_ms='30', resize_px='800'): scratch_dir = tempfile.mkdtemp() self.stdout.write('Using scratch dir {}'.format(scratch_dir)) snapshot_file_paths = [] for url in snapshot_image_urls: # Download and save all the images to the scratch_dir so that convert can process # them self.stdout.write('Downloading {}'.format(url)) response = requests.get(url, stream=True) if response.ok: file_path = os.path.join(scratch_dir, os.path.basename(url)) with open(file_path, 'wb') as out_file: self.stdout.write('Saving to {}'.format(file_path)) shutil.copyfileobj(response.raw, out_file) snapshot_file_paths.append(file_path) else: self.stderr.write('Error {} when getting {}'.format(response.status_code, url)) # Construct the convert command command = ['convert'] if delay_ms is not None: command += ['-delay', delay_ms] if resize_px is not None: command += ['-resize', resize_px] command += ['-loop', '0'] + snapshot_file_paths + [output_file_path] # Generate the gif result = subprocess.run(command) # Delete the scratch dir shutil.rmtree(scratch_dir) if result.returncode: raise CommandError( 'Received error code {} when running {}'.format(result.returncode, result.args)) def handle(self, *args, **options): for gif_profile in self.GIF_PROFILES: output_file_path = gif_profile.pop('output_file_path') if not AnimatedGif.objects.filter(**gif_profile).exists(): # No gif for the defired time period. Generate a new one based on any new snapshots # uploaded in that time snapshot_image_fields = [ snapshot.image for snapshot in ( Image.objects .filter(created__gte=gif_profile['created__gte']) .order_by('created') ) ] # Create the new AnimatedGif instance but don't save it yet gif_instance = AnimatedGif( title=os.path.basename(output_file_path), period=gif_profile['period']) # Render the gif versions (ie full size and thumbnail) for version in self.GIF_VERSIONS: if version['thumb_size'] is None: snapshot_urls = [field.url for field in snapshot_image_fields] gif_path = output_file_path self.make_gif(snapshot_urls, gif_path) image_field = gif_instance.image else: snapshot_urls = [ field.thumbnail[version['thumb_size']].url for field in snapshot_image_fields] path_bits = output_file_path.rsplit('.', maxsplit=1) path_bits[0] = '{}_{}'.format(path_bits[0], version['thumb_size']) gif_path = '.'.join(path_bits) self.make_gif(snapshot_urls, gif_path, resize_px=None) image_field = getattr(gif_instance, 'image_{}'.format(version['name'])) # Read the gif data into a buffer and save it on the appropriate image field with open(gif_path, 'rb') as image_data: file_name = os.path.basename(gif_path) image_field.save(file_name, image_data) gif_instance.save() self.stdout.write(self.style.SUCCESS('Process complete'))
Python
0
@@ -3152,103 +3152,24 @@ shot -_image_fields = %5B%0A snapshot.image%0A for snapshot in (%0A +s_to_render = (%0A @@ -3198,20 +3198,16 @@ objects%0A - @@ -3296,20 +3296,16 @@ - - .order_b @@ -3316,16 +3316,18 @@ reated') +)%0A %0A @@ -3339,38 +3339,83 @@ - )%0A +snapshot_image_fields = %5Bsnapshot.image for snapshot in snapshots_to_render %5D%0A%0A @@ -4985,16 +4985,136 @@ save()%0A%0A + # Now go ahead and delete the snapshots to conserve space%0A snapshots_to_render.delete()%0A%0A
aac855cb0339f87e1046811b9097858201cfe841
isolate helper functions
grove/alpha/jordan_gradient/gradient_helper.py
grove/alpha/jordan_gradient/gradient_helper.py
import numpy as np from jordan_gradient import gradient_estimator def real_to_binary(number, precision=16): """ Convert real decimal to precision-bit binary fraction :param float number: Real decimal over [0, 1). :param int precision: Number of bits of binary precision. :return float bf: Binary fraction representation of real decimal. """ n_sign = np.sign(number) number = abs(number) bf = '' for val in range(precision): number = 2 * (number % 1) bf += str(int(number)) bf = n_sign * float('.' + bf) return bf def binary_to_real(number): """ Convert binary fraction to real decimal :param float number: Floating point representation of binary fraction. :return float deci: Real decimal representation of binary fraction. """ if isinstance(number, str): if number[0] == '-': n_sign = -1 else: n_sign = 1 elif isinstance(number, float): n_sign = np.sign(number) deci = 0 for ndx, val in enumerate(str(number).split('.')[-1]): deci += float(val) / 2**(ndx+1) deci *= n_sign return deci def stats_to_bf(stats): """ Convert measurement into gradient binary fraction :param np.array stats: Output measurement statistics of gradient program. :return float bf: Binary fraction representation of gradient estimate. """ stats_str = [str(int(i)) for i in np.ceil(stats[::-1][1:])] bf_str = '0.' + ''.join(stats_str) bf = float(bf_str) return bf def gradient_error(f_h, precision=5, n_measurements=100): """ Computes error of gradient estimates for an input perturbation value :param np.array/float f_h: Value of f at perturbation h. :param int n_measurements: Number of times to run the gradient program. :return float error: Error of gradient estimate. """ if isinstance(f_h, float): d = 1 # f_h = np.array(f_h) else: d = f_h.ndim # enumerate qubit register N_qi = d * precision input_qubits = list(range(N_qi)) ancilla_qubits = [N_qi] # build program and run n_measurements times p_g = gradient_estimator(f_h, input_qubits, ancilla_qubits, precision) from pyquil.api import SyncConnection qvm = SyncConnection() measurements = np.array(qvm.run(p_g, input_qubits, n_measurements)) # summarize measurements and compute error stats = measurements.sum(axis=0) / len(measurements) bf_estimate = stats_to_bf(stats) deci_estimate = binary_to_real(bf_estimate) error = f_h - deci_estimate return error
Python
0.000004
@@ -15,55 +15,8 @@ s np -%0Afrom jordan_gradient import gradient_estimator %0A%0Ade @@ -1528,1074 +1528,4 @@ bf%0A%0A -def gradient_error(f_h, precision=5, n_measurements=100):%0A %22%22%22 Computes error of gradient estimates for an input perturbation value%0A %0A :param np.array/float f_h: Value of f at perturbation h.%0A :param int n_measurements: Number of times to run the gradient program.%0A :return float error: Error of gradient estimate.%0A %22%22%22%0A%0A if isinstance(f_h, float):%0A d = 1 # f_h = np.array(f_h)%0A else:%0A d = f_h.ndim%0A%0A # enumerate qubit register%0A N_qi = d * precision%0A input_qubits = list(range(N_qi))%0A ancilla_qubits = %5BN_qi%5D%0A%0A # build program and run n_measurements times%0A p_g = gradient_estimator(f_h, input_qubits, ancilla_qubits, precision)%0A%0A from pyquil.api import SyncConnection%0A qvm = SyncConnection()%0A measurements = np.array(qvm.run(p_g, input_qubits, n_measurements))%0A%0A # summarize measurements and compute error%0A stats = measurements.sum(axis=0) / len(measurements)%0A bf_estimate = stats_to_bf(stats)%0A deci_estimate = binary_to_real(bf_estimate)%0A error = f_h - deci_estimate%0A %0A return error%0A
935a9dda8ffcbef049f62f7df6778be5c502dcfa
Update quick_install.py
bin/quick_install.py
bin/quick_install.py
#!/usr/bin/env python """Quickly install Mapbender3 starter using Git. You can just call it or give the name of a branch (defaults to 'develop' as well as a the directory where to install/update (defaults to mapbender3_BRANCH). The default admin account (root <root@example.com> / root) can also be given using command line arguments. Check the help with -h. Examples ======== http://bit.ly/1tQvo5i is the shortened URL for https://raw.githubusercontent.com/mapbender/mapbender-starter/develop/bin/quick_install.py - Install develop branch into mapbender3_develop curl -sSL http://bit.ly/1tQvo5i | python - Install foo branch into /tmp/bar curl -sSL http://bit.ly/1tQvo5i | python - --dir=/tmp/bar foo - Install develop branch, but use admin <admin@example.com> with password admin curl -sSL http://bit.ly/1tQvo5i | python - --username=admin --useremail=admin@example.com --userpassword=admin """ import argparse import os.path import shutil import subprocess import sys GIT_URL = 'https://github.com/mapbender/mapbender-starter.git' # Command tuples (Command, Working dir) INSTALL_CMDS = ( ('git clone %s -b {branch} {dir}' % GIT_URL, None), ('git submodule update --init --recursive', '{dir}'), ('phing deps', '{dir}'), ('app/console doctrine:database:create', '{dir}/application'), ('app/console doctrine:schema:create', '{dir}/application'), ('app/console fom:user:reset --username={username} --email={email} ' '--password={password} --silent', '{dir}/application'), ('app/console doctrine:fixtures:load ' '--fixtures=./mapbender/src/Mapbender/CoreBundle/DataFixtures/ORM/Epsg/' ' --append', '{dir}/application'), ) def main(**kwargs): if os.path.exists(kwargs.get('dir')) and not kwargs.get('force_install'): update(**kwargs) else: install(**kwargs) def install(**kwargs): if kwargs.get('force_install') and os.path.exists(kwargs.get('dir')): shutil.rmtree(kwargs.get('dir')) execute = True for cmd, cwd in INSTALL_CMDS: try: cmd = cmd.format(**kwargs) cwd = cwd.format(**kwargs) if cwd else None print("%s \"%s\" in \"%s\"" % ( 'Executing' if execute else 'Would execute', cmd, cwd if cwd else '.')) if execute: print('') subprocess.check_call( cmd.split(' '), cwd=cwd) print('') except subprocess.CalledProcessError: execute = False if not execute: sys.exit(1) def update(**kwargs): pass if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( 'branch', metavar='BRANCH', type=str, default='develop', nargs='?', help='Branch to checkout') parser.add_argument( '-d', '--dir', metavar='DIRECTORY', type=str, help='Working directory, defaults to mapbender3_BRANCH') parser.add_argument( '--username', type=str, default='root', help='Username for admin account (root)') parser.add_argument( '--email', type=str, default='root@example.com', help='Email for admin account (root@example.com)') parser.add_argument( '--password', type=str, default='root', help='Password for admin account (root)') parser.add_argument( '--force-install', action='store_true', default=False, help='Delete and install again instead of update') args = parser.parse_args() args.dir = args.dir if args.dir else 'mapbender3_%s' % args.branch main(**vars(args))
Python
0.000001
@@ -859,20 +859,16 @@ admin -- -user email=ad @@ -885,20 +885,16 @@ e.com -- -user password
29d209feb2a52c8cffb3f4116d43b2b01053df4b
Create latest and branch folder
deploy_gh_pages.py
deploy_gh_pages.py
import json import os import shutil import tempfile def copytree(src, dst, symlinks=False, ignore=None): for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) def call(command, ignore_error=False): ret = os.system(command) if ret != 0 and not ignore_error: raise Exception("Command failed: %s" % command) excluded_files = (".git", "CNAME", "index.html") def config_git(): call('git config --global user.email "lasote@gmail.com"') call('git config --global user.name "Luis Martinez de Bartolome"') def clean_gh_pages(): call('git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" 1>/dev/null') call("git fetch origin -q") call("git checkout gh-pages") if os.path.exists("en"): shutil.rmtree("en") def build_and_copy(branch, folder_name, versions_available, validate_links=False): call("git checkout %s" % branch) call("git pull origin %s" % branch) with open('versions.json', 'w') as f: f.write(json.dumps(versions_available)) call("make html") if validate_links: call("make linkcheck") tmp_dir = tempfile.mkdtemp() copytree("_build/html/", tmp_dir) shutil.rmtree("_build") # Go to deploy branch, copy new files and commit call("git checkout gh-pages") if not os.path.exists("en"): os.mkdir("en") version_folder = "en/%s" % (folder_name if branch != "master" else "latest") if os.path.exists(version_folder): shutil.rmtree(version_folder) os.mkdir(version_folder) copytree(tmp_dir, version_folder) call("git add -A .") call("git commit --message 'committed version %s'" % folder_name, ignore_error=True) def deploy(): if not os.getenv("TRAVIS_BRANCH", None) == "master": print("Skipping deploy for not master branch") return if os.getenv("TRAVIS_PULL_REQUEST", "") != "false": print("Deploy skipped, This is a PR in the main repository") return if not os.getenv("GITHUB_API_KEY"): print("Deploy skipped, missing GITHUB_API_KEY. Is this a PR?") return call('git remote add origin-pages ' 'https://%s@github.com/conan-io/docs.git > /dev/null 2>&1' % os.getenv("GITHUB_API_KEY")) call('git push origin-pages gh-pages') if __name__ == "__main__": config_git() clean_gh_pages() versions_dict = {"master": "1.4", "release/1.3.3": "1.3"} for branch, folder_name in versions_dict.items(): build_and_copy(branch, folder_name, versions_dict, validate_links=branch == "master") deploy()
Python
0
@@ -1532,19 +1532,21 @@ n_folder +s = +%5B %22en/%25s%22 @@ -1547,17 +1547,16 @@ n/%25s%22 %25 -( folder_n @@ -1558,16 +1558,21 @@ der_name +%5D%0A if bran @@ -1574,17 +1574,17 @@ branch -! += = %22maste @@ -1589,24 +1589,99 @@ ter%22 - else %22latest%22)%0A +:%0A version_folders.append(%22latest%22)%0A%0A for version_folder in version_folders:%0A @@ -1715,32 +1715,36 @@ older):%0A + + shutil.rmtree(ve @@ -1758,24 +1758,28 @@ older)%0A%0A + + os.mkdir(ver @@ -1787,24 +1787,28 @@ ion_folder)%0A + copytree @@ -1833,24 +1833,28 @@ folder)%0A + + call(%22git ad @@ -1862,16 +1862,20 @@ -A .%22)%0A + call
48e7bc7f8adf13dd4ac89277e086ba007e555810
Return unicode string if element on preprint provider form doesn't exist.
admin/preprint_providers/forms.py
admin/preprint_providers/forms.py
import bleach from django import forms from osf.models import PreprintProvider, Subject from admin.base.utils import get_subject_rules, get_toplevel_subjects, get_nodelicense_choices class PreprintProviderForm(forms.ModelForm): toplevel_subjects = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), required=False) subjects_chosen = forms.CharField(widget=forms.HiddenInput(), required=False) class Meta: model = PreprintProvider exclude = ['primary_identifier_name'] widgets = { 'licenses_acceptable': forms.CheckboxSelectMultiple(), 'subjects_acceptable': forms.HiddenInput(), } def __init__(self, *args, **kwargs): toplevel_choices = get_toplevel_subjects() nodelicense_choices = get_nodelicense_choices() super(PreprintProviderForm, self).__init__(*args, **kwargs) self.fields['toplevel_subjects'].choices = toplevel_choices self.fields['licenses_acceptable'].choices = nodelicense_choices def clean_subjects_acceptable(self, *args, **kwargs): subject_ids = filter(None, self.data['subjects_chosen'].split(', ')) subjects_selected = Subject.objects.filter(id__in=subject_ids) rules = get_subject_rules(subjects_selected) return rules def clean_advisory_board(self, *args, **kwargs): return bleach.clean( self.data.get('advisory_board'), tags=['a', 'b', 'br', 'div', 'em', 'h2', 'h3', 'li', 'p', 'strong', 'ul'], attributes=['class', 'style', 'href', 'title', 'target'], styles=['text-align', 'vertical-align'], strip=True ) def clean_description(self, *args, **kwargs): return bleach.clean( self.data.get('description'), tags=['a', 'br', 'em', 'p', 'span', 'strong'], attributes=['class', 'style', 'href', 'title', 'target'], styles=['text-align', 'vertical-align'], strip=True ) def clean_footer_links(self, *args, **kwargs): return bleach.clean( self.data.get('footer_links'), tags=['a', 'br', 'div', 'em', 'p', 'span', 'strong'], attributes=['class', 'style', 'href', 'title', 'target'], styles=['text-align', 'vertical-align'], strip=True ) class PreprintProviderCustomTaxonomyForm(forms.Form): custom_taxonomy_json = forms.CharField(widget=forms.Textarea, initial='{"include": [], "exclude": [], "custom": {}}', required=False) provider_id = forms.IntegerField(widget=forms.HiddenInput()) include = forms.ChoiceField(choices=[], required=False) exclude = forms.ChoiceField(choices=[], required=False) custom_name = forms.CharField(required=False) custom_parent = forms.CharField(required=False) bepress = forms.ChoiceField(choices=[], required=False) def __init__(self, *args, **kwargs): super(PreprintProviderCustomTaxonomyForm, self).__init__(*args, **kwargs) subject_choices = [(x, x) for x in Subject.objects.all().values_list('text', flat=True)] for name, field in self.fields.iteritems(): if hasattr(field, 'choices'): if field.choices == []: field.choices = subject_choices
Python
0.000011
@@ -1348,32 +1348,111 @@ rgs, **kwargs):%0A + if not self.data.get('advisory_board'):%0A return unicode('')%0A return b @@ -1456,32 +1456,32 @@ n bleach.clean(%0A - self @@ -1795,32 +1795,108 @@ rgs, **kwargs):%0A + if not self.data.get('description'):%0A return unicode('')%0A return b @@ -2158,32 +2158,32 @@ True%0A )%0A%0A - def clean_fo @@ -2209,32 +2209,109 @@ rgs, **kwargs):%0A + if not self.data.get('footer_links'):%0A return unicode('')%0A return b
4aa1b4d79ef7777f060a05c70ead7a27dc6ed6e9
Add plot_coef
fft2d/plot.py
fft2d/plot.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import fft2d import numpy as np import matplotlib.pyplot as plt import argparse def main(args): arr, prop = fft2d.load(args.filename) Nx, Ny = arr.shape Lx = prop.Lx if prop.Lx > 0 else 1.0 Ly = prop.Ly if prop.Ly > 0 else 1.0 X, Y = np.meshgrid(np.linspace(0, Ly, Ny), np.linspace(0, Lx, Nx)) plt.pcolormesh(X, Y, arr) plt.colorbar() plt.axis("image") ext = args.extenion if args.extenion[0] == "." else "." + args.extenion plt.savefig(args.filename + ext) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("filename") parser.add_argument("-e", "--extenion", default=".png") args = parser.parse_args() main(args)
Python
0.000013
@@ -131,84 +131,25 @@ def -main(args):%0A arr, prop = fft2d.load(args.filename)%0A Nx, Ny = arr.shape +_load_size(prop): %0A @@ -235,221 +235,757 @@ -X, Y = np.meshgrid(np.linspace(0, Ly, Ny), np.linspace(0, Lx, Nx))%0A plt.pcolormesh(X, Y, arr)%0A plt.colorbar()%0A plt.axis(%22image%22)%0A ext = args.extenion if args.extenion%5B0%5D == %22.%22 else %22.%22 + args.extenion +return Lx, Ly%0A%0A%0Adef plot_field(filename, figure_ext):%0A arr, prop = fft2d.load(filename)%0A Nx, Ny = arr.shape%0A Lx, Ly = _load_size(prop)%0A X, Y = np.meshgrid(np.linspace(0, Ly, Ny), np.linspace(0, Lx, Nx))%0A plt.pcolormesh(X, Y, arr)%0A plt.colorbar()%0A plt.axis(%22image%22)%0A plt.savefig(filename + figure_ext)%0A%0A%0Adef plot_coef(filename, figure_ext):%0A arr, prop = fft2d.load(filename)%0A Nx, Ny = arr.shape%0A Lx, Ly = _load_size(prop)%0A X, Y = np.meshgrid(range(Ny), range(Nx))%0A plt.subplot(211)%0A plt.pcolormesh(X, Y, np.real(arr))%0A plt.axis(%22tight%22)%0A plt.colorbar()%0A plt.title(%22Real%22)%0A plt.subplot(212)%0A plt.pcolormesh(X, Y, np.imag(arr))%0A plt.axis(%22tight%22)%0A plt.colorbar()%0A plt.title(%22Imaginary%22) %0A @@ -1001,13 +1001,8 @@ fig( -args. file @@ -1008,16 +1008,23 @@ ename + +figure_ ext)%0A%0A%0Ai @@ -1223,15 +1223,204 @@ -main(args +ext = args.filename%5B-1%5D%0A figure_ext = args.extenion if args.extenion%5B0%5D == %22.%22 else %22.%22 + args.extenion%0A %7B%0A %22f%22: plot_field,%0A %22c%22: plot_coef,%0A %7D%5Bext%5D(args.filename, figure_ext )%0A
7ee29cfee740d6096fca8379253073077890a54c
Add more info to the redis example.
examples/util/wordcount_redis.py
examples/util/wordcount_redis.py
from disco.schemes.scheme_redis import redis_output_stream from disco.worker.task_io import task_output_stream from disco.core import Job, result_iterator class WordCount(Job): reduce_output_stream = (task_output_stream, redis_output_stream) @staticmethod def map(line, params): k, v = line yield v, 1 @staticmethod def reduce(iter, params): from disco.util import kvgroup for word, counts in kvgroup(sorted(iter)): yield word, sum(counts) if __name__ == '__main__': import sys if len(sys.argv) != 3: print "Usage: python wordcount_redis.py <input redis> <output redis>" sys.exit(1) from wordcount_redis import WordCount job = WordCount() job.params = {} job.params['url'] = sys.argv[2] job.run(input=[sys.argv[1]]) job.wait(show=True)
Python
0
@@ -1,16 +1,555 @@ +%22%22%22%0AUsage:%0Apython wordcount_redis.py redis://redis_server:6379:0 redis://redis_server:6379:1%0A%0AThe input is read from db 0 and the output is written to db 1. The inputs%0Ashould be of the form (key, list_of_values) (they are read from the server with the%0Alrange command. See the redis documentation for more info).%0AThe output will also be of the form (key, list_of_values). The reason we use%0Athis approach is to unify the mechanism for the intermediate input-outputs%0A(which must be (key, list_of_values) with the inputs and outputs).%0A%22%22%22%0A%0A from disco.schem
8f37b3716dbda37c4a092f606692b7bfae0afd75
add possibility to delete socket;
syncano_cli/custom_sockets/command.py
syncano_cli/custom_sockets/command.py
# -*- coding: utf-8 -*- import json import os import subprocess import yaml import click from syncano.models import CustomSocket, SocketEndpoint from syncano_cli.custom_sockets.formatters import SocketFormatter class SocketCommand(object): list_line_template = '{socket_name:^29}|{socket_status:^19}|{status_info:^29}' socket_line_template = '{endpoint_name:^39}|{calls:^40}' TEMPLATE_DIR = 'custom_sockets/template/' SOCKET_FILE_NAME = 'socket.yml' def __init__(self, instance): self.instance = instance def list(self): # TODO: move the presentation logic to formatter; click.echo(self.list_line_template.format( socket_name='socket name', socket_status='status', status_info='status info', )) click.echo(80 * '-') for cs in CustomSocket.please.all(instance_name=self.instance.name): click.echo(self.list_line_template.format( socket_name=cs.name, socket_status=cs.status, status_info=cs.status_info )) def details(self, socket_name): cs = CustomSocket.please.get(name=socket_name, instance_name=self.instance.name) click.echo(SocketFormatter.format_socket_details(cs)) def list_endpoints(self): # TODO: move the presentation logic to formatter; click.echo(self.socket_line_template.format(endpoint_name='Name', calls='Calls')) click.echo(80 * '-') endpoints = SocketEndpoint.get_all_endpoints(instance_name=self.instance.name) for endpoint in endpoints: click.echo(self.socket_line_template.format(endpoint_name=endpoint.name, calls=endpoint.calls)) def delete(self, socket_name): print('delete {}'.format(socket_name)) def publish_from_dir(self, dir_path): with open(os.path.join(dir_path, self.SOCKET_FILE_NAME)) as socket_file: yml_file = yaml.safe_load(socket_file) api_data = SocketFormatter.to_json(socket_yml=yml_file, directory=dir_path) api_data.update({'instance_name': self.instance.name}) custom_socket = CustomSocket.please.create(**api_data) click.echo('INFO: socket {} created.'.format(custom_socket.name)) def publish_from_url(self, url_path): # TODO: add integration with new url endpoint integration pass def create_template(self, socket_name, destination): if not os.path.isdir(destination): os.makedirs(destination) if not os.path.isdir(os.path.join(destination, 'scripts')): os.makedirs(os.path.join(destination, 'scripts')) socket = CustomSocket.please.get(name=socket_name, instance_name=self.instance.name) yml_file, files = SocketFormatter.to_yml(socket_object=socket) with open(os.path.join(destination, 'socket.yml'), 'w+') as socket_yml: socket_yml.write(yml_file) for file_meta in files: with open(os.path.join(destination, 'scripts/{}'.format(file_meta['file_name'])), 'w+') as script_file: script_file.write(file_meta['source']) click.echo('INFO: template created in {}.'.format(destination)) def create_template_from_local_template(self, destination): if not os.path.isdir(destination): os.makedirs(destination) for roots, dirs, files in os.walk(self.TEMPLATE_DIR): for dir_name in dirs: if not os.path.isdir(os.path.join(destination, dir_name)): os.makedirs(os.path.join(destination, dir_name)) for file_name in files: try: directory = roots.split(self.TEMPLATE_DIR)[1] except IndexError: directory = '' with open(os.path.join(roots, file_name), 'r+') as file_to_read: with open(os.path.join("{}/{}".format(destination, directory) if directory else destination, file_name), 'w+') as file_to_write: file_to_write.write(file_to_read.read())
Python
0
@@ -1757,25 +1757,182 @@ -print('delete %7B%7D' +custom_socket = CustomSocket.please.get(name=socket_name, instance_name=self.instance.name)%0A custom_socket.delete()%0A click.echo(%22INFO: Custom Socket %7B%7D delted.%22 .for
a52bba727dd83acc129494e764c64714ff2d4e52
fix typo
sympy/utilities/pytest.py
sympy/utilities/pytest.py
"""py.test hacks to support XFAIL/XPASS""" # XXX this should be integrated into py.tes # XXX but we can't force everyone to install py-lib trunk # tested with py-lib 0.9.0 from py.__.test.outcome import Outcome, Passed, Failed, Skipped from py.__.test.terminal.terminal import TerminalSession from time import time as now __all__ = ['XFAIL'] class XFail(Outcome): pass class XPass(Outcome): pass TerminalSession.typemap[XFail] = 'f' TerminalSession.typemap[XPass] = 'X' TerminalSession.namemap[XFail] = 'XFAIL' TerminalSession.namemap[XPass] = '*** XPASS ***' def footer(self, colitems): super(TerminalSession, self).footer(colitems) self.endtime = now() self.out.line() self.skippedreasons() self.failures() self.xpasses() self.summaryline() def xpasses(self): texts = {} for colitem, outcome in self.getitemoutcomepairs(XPass): raisingtb = self.getlastvisible(outcome.excinfo.traceback) fn = raisingtb.frame.code.path lineno = raisingtb.lineno #d = texts.setdefault(outcome.excinfo.exconly(), {}) d = texts.setdefault(outcome.msg, {}) d[(fn,lineno)] = outcome if texts: self.out.line() self.out.sep('_', '*** XPASS ***') for text, dict in texts.items(): #for (fn, lineno), outcome in dict.items(): # self.out.line('Skipped in %s:%d' %(fn, lineno+1)) #self.out.line("reason: %s" % text) self.out.line("%s" % text) self.out.line() def summaryline(self): outlist = [] sum = 0 for typ in Passed, XPass, XFail, Failed, Skipped: l = self.getitemoutcomepairs(typ) if l: outlist.append('%d %s' % (len(l), typ.__name__.lower())) sum += len(l) elapsed = self.endtime-self.starttime status = "%s" % ", ".join(outlist) self.out.sep('=', 'tests finished: %s in %4.2f seconds' % (status, elapsed)) TerminalSession.footer = footer TerminalSession.xpasses = xpasses TerminalSession.summaryline = summaryline def XFAIL(func): """XFAIL decorator""" def func_wrapper(): try: func() except: raise XFail('XFAIL: %s' % func.func_name) else: raise XPass('XPASS: %s' % func.func_name) return func_wrapper
Python
0.999991
@@ -80,16 +80,17 @@ o py.tes +t %0A# XXX b
a08c04151e88eb95ae05484adb8b3f64ef91bd87
Update example
examples/tools/print_mo_and_dm.py
examples/tools/print_mo_and_dm.py
#!/usr/bin/env python # # Author: Qiming Sun <osirpt.sun@gmail.com> # import sys import numpy from pyscf import gto, tools ''' Formatted output for 2D array ''' mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz') nf = mol.nao_nr() orb = numpy.random.random((nf,4)) # # Print orbital coefficients # tools.dump_mat.dump_mo(mol, orb) # # Print lower triangular part of an array # dm = numpy.eye(3) tools.dump_mat.dump_tri(sys.stdout, dm) # # Print rectangular matrix # mol = gto.M(atom='C 0 0 0',basis='6-31g') dm = numpy.eye(mol.nao_nr()) tools.dump_mat.dump_rec(sys.stdout, dm, label=mol.spheric_labels(True), ncol=9, digits=2)
Python
0.000001
@@ -110,16 +110,21 @@ ort gto, + scf, tools%0A%0A @@ -656,8 +656,189 @@ gits=2)%0A +%0A%0A#%0A# Change the default output format of .analyze function.%0A#%0Amol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz')%0Amf = scf.RHF(mol).run()%0Amf.analyze(verbose=5, ncol=10, digits=9)%0A
c8b3fc11e04ea0a77348be5542d08937818193e3
Fix TensorBoard tag. Change: 145832229
tensorflow/tensorboard/tensorboard.py
tensorflow/tensorboard/tensorboard.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Serve TensorFlow summary data to a web frontend. This is a simple web server to proxy data from the event_loader to the web, and serve static web files. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import socket from werkzeug import serving from tensorflow.python.platform import app from tensorflow.python.platform import flags from tensorflow.python.platform import resource_loader from tensorflow.python.platform import tf_logging as logging from tensorflow.python.summary import event_file_inspector as efi from tensorflow.tensorboard.backend import application from tensorflow.tensorboard.plugins.projector import plugin as projector_plugin flags.DEFINE_string('logdir', '', """logdir specifies the directory where TensorBoard will look to find TensorFlow event files that it can display. TensorBoard will recursively walk the directory structure rooted at logdir, looking for .*tfevents.* files. You may also pass a comma separated list of log directories, and TensorBoard will watch each directory. You can also assign names to individual log directories by putting a colon between the name and the path, as in tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2 """) flags.DEFINE_boolean( 'insecure_debug_mode', False, 'Whether to run the app in debug mode. ' 'This increases log verbosity, and enables debugging on server exceptions.') flags.DEFINE_string('host', '0.0.0.0', 'What host to listen to. Defaults to ' 'serving on 0.0.0.0, set to 127.0.0.1 (localhost) to' 'disable remote access (also quiets security warnings).') flags.DEFINE_boolean('inspect', False, """Use this flag to print out a digest of your event files to the command line, when no data is shown on TensorBoard or the data shown looks weird. Example usages: tensorboard --inspect --event_file=myevents.out tensorboard --inspect --event_file=myevents.out --tag=loss tensorboard --inspect --logdir=mylogdir tensorboard --inspect --logdir=mylogdir --tag=loss See tensorflow/python/summary/event_file_inspector.py for more info and detailed usage. """) flags.DEFINE_string( 'tag', '', 'The particular tag to query for. Only used if --inspect is present') flags.DEFINE_string( 'event_file', '', 'The particular event file to query for. Only used if --inspect is present ' 'and --logdir is not specified.') flags.DEFINE_integer('port', 6006, 'What port to serve TensorBoard on.') flags.DEFINE_boolean('purge_orphaned_data', True, 'Whether to purge data that ' 'may have been orphaned due to TensorBoard restarts. ' 'Disabling purge_orphaned_data can be used to debug data ' 'disappearance.') flags.DEFINE_integer('reload_interval', 60, 'How often the backend should load ' 'more data.') FLAGS = flags.FLAGS class Server(object): """A simple WSGI-compliant http server that can serve TensorBoard.""" def get_tag(self): """Read the TensorBoard TAG number, and return it or an empty string.""" try: tag = resource_loader.load_resource('tensorboard/TAG').strip() logging.info('TensorBoard is tag: %s', tag) except IOError: logging.info('Unable to read TensorBoard tag') tag = '' def create_app(self): """Creates a WSGI-compliant app than can handle TensorBoard requests. Returns: (function) A complete WSGI application that handles TensorBoard requests. """ logdir = os.path.expanduser(FLAGS.logdir) if not logdir: msg = ('A logdir must be specified. Run `tensorboard --help` for ' 'details and examples.') logging.error(msg) print(msg) return -1 plugins = {'projector': projector_plugin.ProjectorPlugin()} return application.TensorBoardWSGIApp( logdir, plugins, purge_orphaned_data=FLAGS.purge_orphaned_data, reload_interval=FLAGS.reload_interval) def serve(self): """Starts a WSGI server that serves the TensorBoard app.""" tb_app = self.create_app() logging.info('Starting TensorBoard in directory %s', os.getcwd()) debug = FLAGS.insecure_debug_mode if debug: logging.set_verbosity(logging.DEBUG) logging.warning('TensorBoard is in debug mode. This is NOT SECURE.') print('Starting TensorBoard %s on port %d' % (self.get_tag(), FLAGS.port)) if FLAGS.host == '0.0.0.0': try: host = socket.gethostbyname(socket.gethostname()) print('(You can navigate to http://%s:%d)' % (host, FLAGS.port)) except socket.gaierror: pass else: print('(You can navigate to http://%s:%d)' % (FLAGS.host, FLAGS.port)) try: serving.run_simple( FLAGS.host, FLAGS.port, tb_app, threaded=True, use_reloader=debug, use_evalex=debug, use_debugger=debug) except socket.error: if FLAGS.port == 0: msg = 'Unable to find any open ports.' logging.error(msg) print(msg) return -2 else: msg = 'Tried to connect to port %d, but address is in use.' % FLAGS.port logging.error(msg) print(msg) return -3 def main(unused_argv=None): if FLAGS.inspect: logging.info('Not bringing up TensorBoard, but inspecting event files.') event_file = os.path.expanduser(FLAGS.event_file) efi.inspect(FLAGS.logdir, event_file, FLAGS.tag) return 0 Server().serve() if __name__ == '__main__': app.run()
Python
0.000009
@@ -3943,24 +3943,41 @@ : %25s', tag)%0A + return tag%0A except I @@ -4043,21 +4043,22 @@ )%0A -tag = +return ''%0A%0A d
ee2be0278636c363a0091516c47d8fc7795ad99a
Add comment
emission/net/ext_service/geocoder/nominatim.py
emission/net/ext_service/geocoder/nominatim.py
from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() import os from builtins import * from builtins import object import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse import logging import json from emission.core.wrapper.trip_old import Coordinate from pygeocoder import Geocoder as pyGeo ## We fall back on this if we have to try: script_dir = os.path.dirname(__file__) rel_path = "../gmaps/googlemaps.json" abs_file_path = os.path.join(script_dir, rel_path) googlemaps_key_file = open(abs_file_path, 'r') GOOGLE_MAPS_KEY = json.load(googlemaps_key_file)["api_key"] except Exception as e: print("google maps key not configured, falling back to nominatim") try: script_dir = os.path.dirname(__file__) rel_path = "nominatim.json" abs_file_path = os.path.join(script_dir, rel_path) nominatim_file = open(abs_file_path, 'r') nominatim_config_object = json.load(nominatim_file) NOMINATIM_QUERY_URL = nominatim_config_object["query_url"] except: print("nominatim not configured either, place decoding must happen on the client") class Geocoder(object): def __init__(self): pass @classmethod def make_url_geo(cls, address): params = { "q" : address, "format" : "json" } query_url = NOMINATIM_QUERY_URL + "/search.php?" encoded_params = urllib.parse.urlencode(params) url = query_url + encoded_params return url @classmethod def get_json_geo(cls, address): request = urllib.request.Request(cls.make_url_geo(address)) response = urllib.request.urlopen(request) jsn = json.loads(response.read()) return jsn @classmethod def geocode(cls, address): try: jsn = cls.get_json_geo(address) lat = float(jsn[0]["lat"]) lon = float(jsn[0]["lon"]) return Coordinate(lat, lon) except Exception as e: print(e) print("defaulting") #TODO: Right now there is no default gecoder. Discuss if we should create a google account for this. return _do_google_geo(address) # If we fail ask the gods @classmethod def make_url_reverse(cls, lat, lon): params = { "lat" : lat, "lon" : lon, "format" : "json" } query_url = NOMINATIM_QUERY_URL + "/reverse?" encoded_params = urllib.parse.urlencode(params) url = query_url + encoded_params return url @classmethod def get_json_reverse(cls, lat, lng): request = urllib.request.Request(cls.make_url_reverse(lat, lng)) response = urllib.request.urlopen(request) parsed_response = json.loads(response.read()) logging.debug("parsed_response = %s" % parsed_response) return parsed_response @classmethod def reverse_geocode(cls, lat, lng): # try: # jsn = cls.get_json_reverse(lat, lng) # address = jsn["display_name"] # return address # except: # print "defaulting" return _do_google_reverse(lat, lng) # Just in case ## Failsafe section def _do_google_geo(address): geo = pyGeo(GOOGLE_MAPS_KEY) results = geo.geocode(address) return Coordinate(results[0].coordinates[0], results[0].coordinates[1]) def _do_google_reverse(lat, lng): geo = pyGeo(GOOGLE_MAPS_KEY) address = geo.reverse_geocode(lat, lng) return address[0]
Python
0
@@ -1186,16 +1186,44 @@ y_url%22%5D%0A + #nominatim_file.close()%0A except:%0A
75a233b472c03bba9a6ca8b4d97cd41b8d73b1ce
check if the item group exist or not
erpnext/setup/doctype/item_group/item_group.py
erpnext/setup/doctype/item_group/item_group.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import urllib from frappe.utils import nowdate, cint, cstr from frappe.utils.nestedset import NestedSet from frappe.website.website_generator import WebsiteGenerator from frappe.website.render import clear_cache from frappe.website.doctype.website_slideshow.website_slideshow import get_slideshow class ItemGroup(NestedSet, WebsiteGenerator): nsm_parent_field = 'parent_item_group' website = frappe._dict( condition_field = "show_in_website", template = "templates/generators/item_group.html", no_cache = 1 ) def autoname(self): self.name = self.item_group_name def validate(self): super(ItemGroup, self).validate() self.make_route() def on_update(self): NestedSet.on_update(self) invalidate_cache_for(self) self.validate_name_with_item() self.validate_one_root() def make_route(self): '''Make website route''' if not self.route: self.route = '' if self.parent_item_group: parent_item_group = frappe.get_doc('Item Group', self.parent_item_group) # make parent route only if not root if parent_item_group.parent_item_group and parent_item_group.route: self.route = parent_item_group.route + '/' self.route += self.scrub(self.item_group_name) return self.route def after_rename(self, olddn, newdn, merge=False): NestedSet.after_rename(self, olddn, newdn, merge) def on_trash(self): NestedSet.on_trash(self) WebsiteGenerator.on_trash(self) def validate_name_with_item(self): if frappe.db.exists("Item", self.name): frappe.throw(frappe._("An item exists with same name ({0}), please change the item group name or rename the item").format(self.name), frappe.NameError) def get_context(self, context): context.show_search=True context.page_length = 6 context.search_link = '/product_search' start = int(frappe.form_dict.start or 0) if start < 0: start = 0 context.update({ "items": get_product_list_for_group(product_group = self.name, start=start, limit=context.page_length + 1, search=frappe.form_dict.get("search")), "parents": get_parent_item_groups(self.parent_item_group), "title": self.name, "products_as_list": cint(frappe.db.get_single_value('Website Settings', 'products_as_list')) }) if self.slideshow: context.update(get_slideshow(self)) return context @frappe.whitelist(allow_guest=True) def get_product_list_for_group(product_group=None, start=0, limit=10, search=None): child_groups = ", ".join(['"' + i[0] + '"' for i in get_child_groups(product_group)]) # base query query = """select name, item_name, item_code, route, image, website_image, thumbnail, item_group, description, web_long_description as website_description from `tabItem` where show_in_website = 1 and disabled=0 and (end_of_life is null or end_of_life='0000-00-00' or end_of_life > %(today)s) and (variant_of = '' or variant_of is null) and (item_group in ({child_groups}) or name in (select parent from `tabWebsite Item Group` where item_group in ({child_groups}))) """.format(child_groups=child_groups) # search term condition if search: query += """ and (web_long_description like %(search)s or item_name like %(search)s or name like %(search)s)""" search = "%" + cstr(search) + "%" query += """order by weightage desc, item_name, modified desc limit %s, %s""" % (start, limit) data = frappe.db.sql(query, {"product_group": product_group,"search": search, "today": nowdate()}, as_dict=1) return [get_item_for_list_in_html(r) for r in data] def get_child_groups(item_group_name): item_group = frappe.get_doc("Item Group", item_group_name) return frappe.db.sql("""select name from `tabItem Group` where lft>=%(lft)s and rgt<=%(rgt)s and show_in_website = 1""", {"lft": item_group.lft, "rgt": item_group.rgt}) def get_item_for_list_in_html(context): # add missing absolute link in files # user may forget it during upload if (context.get("website_image") or "").startswith("files/"): context["website_image"] = "/" + urllib.quote(context["website_image"]) products_template = 'templates/includes/products_as_grid.html' if cint(frappe.db.get_single_value('Products Settings', 'products_as_list')): products_template = 'templates/includes/products_as_list.html' return frappe.get_template(products_template).render(context) def get_group_item_count(item_group): child_groups = ", ".join(['"' + i[0] + '"' for i in get_child_groups(item_group)]) return frappe.db.sql("""select count(*) from `tabItem` where docstatus = 0 and show_in_website = 1 and (item_group in (%s) or name in (select parent from `tabWebsite Item Group` where item_group in (%s))) """ % (child_groups, child_groups))[0][0] def get_parent_item_groups(item_group_name): item_group = frappe.get_doc("Item Group", item_group_name) return [{"name": frappe._("Home"),"route":"/"}]+\ frappe.db.sql("""select name, route from `tabItem Group` where lft <= %s and rgt >= %s and show_in_website=1 order by lft asc""", (item_group.lft, item_group.rgt), as_dict=True) def invalidate_cache_for(doc, item_group=None): if not item_group: item_group = doc.name for d in get_parent_item_groups(item_group): d = frappe.get_doc("Item Group", d.name) if d.route: clear_cache(d.route)
Python
0
@@ -5343,16 +5343,69 @@ group):%0A +%09%09if frappe.db.exists(%22Item Group%22, d.get(%22name%22)):%0A%09 %09%09d = fr @@ -5433,22 +5433,30 @@ oup%22, d. +get(%22 name +%22) )%0A +%09 %09%09if d.r @@ -5461,16 +5461,17 @@ .route:%0A +%09 %09%09%09clear
a3fea6080df132c9fcd719cfa0fade5f167a515e
Add optional polling for completion to test_run_gzip.py
compress/test/cloud/test_run_gzip.py
compress/test/cloud/test_run_gzip.py
#!/usr/bin/python # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python sample demonstrating use of the Google Genomics Pipelines API. This sample demonstrates running a pipeline to compress a file that is in Google Cloud Storage. This sample demonstrates running the pipeline in an "ephemeral" manner; no call to pipelines.create() is neccessary. No pipeline is persisted in the pipelines list. Usage: * python test_run_gzip.py \ --project <project-id> \ --disk-size <size-in-gb> \ --input <gcs-input-path> \ --output <gcs-output-path> \ --logging <gcs-logging-path> """ import argparse import pprint import sys from oauth2client.client import GoogleCredentials from apiclient.discovery import build # Parse input args parser = argparse.ArgumentParser() parser.add_argument("--project", required=True, help="Cloud project id to run the pipeline in") parser.add_argument("--disk-size", required=True, type=int, help="Size (in GB) of disk for both input and output") parser.add_argument("--input", required=True, help="Cloud Storage path to input file") parser.add_argument("--output", required=True, help="Cloud Storage path to output file (with the .gz extension)") parser.add_argument("--logging", required=True, help="Cloud Storage path to send logging output") args = parser.parse_args() # Create the genomics service credentials = GoogleCredentials.get_application_default() service = build('genomics', 'v1alpha2', credentials=credentials) # Run the pipeline pipeline = service.pipelines().run(body={ 'ephemeralPipeline' : { 'projectId': args.project, 'name': 'compress', 'description': 'Run "gzip" on a file', 'docker' : { 'cmd': 'gzip /mnt/data/my_file', 'imageName': 'ubuntu' }, 'inputParameters' : [ { 'name': 'inputFile', 'description': 'Cloud Storage path to an uncompressed file ', 'localCopy': { 'path': 'my_file', 'disk': 'data' } } ], 'outputParameters' : [ { 'name': 'outputFile', 'description': 'Cloud Storage path for where to write the compressed result', 'localCopy': { 'path': 'my_file.gz', 'disk': 'data' } } ], 'resources' : { 'disks': [ { 'name': 'data', 'autoDelete': True, 'mountPoint': '/mnt/data', 'sizeGb': args.disk_size, 'type': 'PERSISTENT_HDD', } ], 'minimumCpuCores': 1, 'minimumRamGb': 1, } }, 'pipelineArgs' : { 'inputs': { 'inputFile': args.input }, 'outputs': { 'outputFile': args.output }, 'logging': { 'gcsPath': args.logging }, 'projectId': args.project, 'serviceAccount': { 'email': 'default', 'scopes': [ 'https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/devstorage.full_control', 'https://www.googleapis.com/auth/genomics' ] } } }).execute() # Emit the result of the pipeline run submission pp = pprint.PrettyPrinter(indent=2) pp.pprint(pipeline)
Python
0
@@ -1150,16 +1150,124 @@ ng-path%3E + %5C%0A --poll-interval %3Cinterval-in-seconds%3E%0A%0AWhere the poll-interval is optional (default is no polling). %0A%22%22%22%0A%0Aim @@ -1305,11 +1305,12 @@ ort -sys +time %0A%0Afr @@ -2060,16 +2060,172 @@ utput%22)%0A +parser.add_argument(%22--poll-interval%22, default=0, type=int,%0A help=%22Frequency (in seconds) to poll for completion (default: no polling)%22)%0A args = p @@ -2417,24 +2417,25 @@ ipeline%0A -pipeline +operation = servi @@ -3984,18 +3984,427 @@ .pprint( -pipeline +operation)%0A%0Aif args.poll_interval %3E 0:%0A operation_name = operation%5B'name'%5D%0A print%0A print %22Polling for completion of operation%22%0A%0A while not operation%5B'done'%5D:%0A print %22Operation not complete. Sleeping %25d seconds%22 %25 (args.poll_interval)%0A%0A time.sleep(args.poll_interval)%0A%0A operation = service.operations().get(name=operation_name).execute()%0A%0A print%0A print %22Operation complete%22%0A print%0A pp.pprint(operation )%0A
ad595aae697abbf7c7de79a4a2448f0c47133934
Make viewer runnable from outer dirs.
execution_trace/viewer/viewer.py
execution_trace/viewer/viewer.py
# Run with `python viewer.py PATH_TO_RECORD_JSON. import json import sys from flask import Flask, jsonify from flask.helpers import send_from_directory app = Flask(__name__) # `main` inits these. # File containing `record` output. record_path = None # 0 is source, 1:N is state record_data = [] @app.route("/") def hello(): return send_from_directory('.', 'index.html') @app.route("/source.json") def source(): return jsonify(record_data[0]) @app.route("/state.json") def state(): return jsonify({'data': record_data[1:]}) def main(): record_path = sys.argv[1] with open(record_path) as f: record_data.append(json.loads(f.readline())) for line in f: record_data.append(json.loads(line)) app.run() if __name__ == "__main__": main()
Python
0
@@ -56,16 +56,26 @@ rt json%0A +import os%0A import s @@ -77,16 +77,16 @@ ort sys%0A - %0Afrom fl @@ -181,16 +181,74 @@ name__)%0A +viewer_root = os.path.abspath(os.path.dirname(__file__))%0A%0A %0A# %60main @@ -427,11 +427,19 @@ ory( -'.' +viewer_root , 'i
385fbdc0401c979a71b0ff326852292bbb6a6ceb
Print coverages and deal with missing files
exp/influence2/ProcessResults.py
exp/influence2/ProcessResults.py
import numpy import matplotlib matplotlib.use("GTK3Agg") import matplotlib.pyplot as plt from exp.influence2.ArnetMinerDataset import ArnetMinerDataset from apgl.util.Latex import Latex from apgl.util.Util import Util from apgl.util.Evaluator import Evaluator numpy.set_printoptions(suppress=True, precision=3, linewidth=100) dataset = ArnetMinerDataset() ns = numpy.arange(5, 55, 5) averagePrecisionN = 20 bestPrecisions = numpy.zeros((len(ns), len(dataset.fields))) bestAveragePrecisions = numpy.zeros(len(dataset.fields)) for i, field in enumerate(dataset.fields): outputFilename = dataset.getResultsDir(field) + "outputLists.npz" outputLists, expertMatchesInds = Util.loadPickle(outputFilename) numMethods = len(outputLists) precisions = numpy.zeros((len(ns), numMethods)) averagePrecisions = numpy.zeros(numMethods) for i, n in enumerate(ns): for j in range(len(outputLists)): precisions[i, j] = Evaluator.precisionFromIndLists(expertMatchesInds, outputLists[j][0:n]) for j in range(len(outputLists)): averagePrecisions[j] = Evaluator.averagePrecisionFromLists(expertMatchesInds, outputLists[j][0:averagePrecisionN], averagePrecisionN) print(field) print(precisions) print(averagePrecisions) bestInd = numpy.argmax(averagePrecisions) plt.plot(ns, precisions[:, bestInd], label=field) bestPrecisions[:, i] = precisions[:, bestInd] bestAveragePrecisions[i] = averagePrecisions[bestInd] bestPrecisions2 = numpy.c_[numpy.array(ns), bestPrecisions] print(Latex.array2DToRows(bestPrecisions2)) print(Latex.array1DToRow(bestAveragePrecisions)) print(dataset.fields) plt.legend() plt.show()
Python
0
@@ -261,16 +261,31 @@ uator %0A%0A +ranLSI = False%0A numpy.se @@ -370,16 +370,29 @@ Dataset( +runLSI=ranLSI )%0A%0Ans = @@ -434,17 +434,17 @@ sionN = -2 +3 0 %0AbestP @@ -557,21 +557,117 @@ elds))%0A%0A +coverages = numpy.load(dataset.coverageFilename)%0Aprint(%22==== Coverages ====%22)%0Aprint(coverages)%0A%0A for -i +s , field @@ -693,24 +693,44 @@ t.fields): %0A + if ranLSI: %0A outputFi @@ -753,15 +753,111 @@ .get -Results +OutputFieldDir(field) + %22outputListsLSI.npz%22%0A else: %0A outputFilename = dataset.getOutputField Dir( @@ -877,22 +877,48 @@ putLists +LDA .npz%22%0A + %0A try: %0A outp @@ -978,25 +978,42 @@ lename)%0A -%0A + %0A %0A numMetho @@ -1034,24 +1034,28 @@ tLists)%0A + + precisions = @@ -1090,16 +1090,20 @@ thods))%0A + aver @@ -1142,29 +1142,37 @@ ethods)%0A -%0A + %0A + for i, n in @@ -1187,24 +1187,28 @@ e(ns): %0A + for @@ -1234,24 +1234,28 @@ utLists)): %0A + @@ -1350,25 +1350,33 @@ %5D) %0A + -%0A + %0A for j in @@ -1410,32 +1410,36 @@ %0A + averageP @@ -1569,21 +1569,29 @@ N) %0A -%0A + %0A + print(fi @@ -1591,24 +1591,28 @@ rint(field)%0A + print(pr @@ -1621,24 +1621,28 @@ isions)%0A + + print(averag @@ -1654,25 +1654,33 @@ isions)%0A -%0A + %0A bestInd @@ -1717,16 +1717,20 @@ ns)%0A + + plt.plot @@ -1771,24 +1771,28 @@ =field)%0A + + bestPrecisio @@ -1797,17 +1797,17 @@ ions%5B:, -i +s %5D = prec @@ -1825,24 +1825,28 @@ estInd%5D%0A + + bestAverageP @@ -1855,17 +1855,17 @@ cisions%5B -i +s %5D = aver @@ -1886,16 +1886,59 @@ bestInd%5D +%0A except IOError as e: %0A print(e) %0A%0AbestPr
1c939a99e377ff1dfe037c47dd99f635d3cb0a1f
Remove Cotswold election id (update expected)
polling_stations/apps/data_collection/management/commands/import_cotswold.py
polling_stations/apps/data_collection/management/commands/import_cotswold.py
from data_collection.management.commands import BaseXpressWebLookupCsvImporter class Command(BaseXpressWebLookupCsvImporter): council_id = 'E07000079' addresses_name = 'CotswoldPropertyPostCodePollingStationWebLookup-2017-03-27.TSV' stations_name = 'CotswoldPropertyPostCodePollingStationWebLookup-2017-03-27.TSV' elections = [ 'local.gloucestershire.2017-05-04', 'parl.2017-06-08' ] csv_delimiter = '\t'
Python
0
@@ -390,16 +390,17 @@ +# 'parl.20
55fd2ec6454df9c62e40c5115be94a00bf944bc4
Allow list_classes.py to be loaded without side-effects
examples/list_classes.py
examples/list_classes.py
#!/usr/bin/python """This script lists classes and optionally attributes from UML model created with Gaphor. """ import optparse import sys from gaphor import UML from gaphor.application import Session # Setup command line options. usage = "usage: %prog [options] file.gaphor" parser = optparse.OptionParser(usage=usage) parser.add_option( "-a", "--attributes", dest="attrs", action="store_true", help="Print class attributes", ) (options, args) = parser.parse_args() if len(args) != 1: parser.print_help() sys.exit(1) # The model file to load. model = args[0] # Create the Gaphor application object. session = Session() # Get services we need. element_factory = session.get_service("element_factory") file_manager = session.get_service("file_manager") # Load model from file. file_manager.load(model) # Find all classes using factory select. for cls in element_factory.select(UML.Class): print(f"Found class {cls.name}") if options.attrs: for attr in cls.ownedAttribute: print(f" Attribute: {attr.name}")
Python
0.000001
@@ -275,16 +275,33 @@ aphor%22%0A%0A +%0Adef main():%0A parser = @@ -337,16 +337,20 @@ usage)%0A%0A + parser.a @@ -360,16 +360,20 @@ option(%0A + %22-a%22 @@ -374,16 +374,20 @@ %22-a%22,%0A + %22--a @@ -402,16 +402,20 @@ s%22,%0A + + dest=%22at @@ -416,24 +416,28 @@ st=%22attrs%22,%0A + action=%22 @@ -449,16 +449,20 @@ _true%22,%0A + help @@ -488,19 +488,27 @@ butes%22,%0A -)%0A%0A + )%0A%0A (options @@ -538,16 +538,20 @@ args()%0A%0A + if len(a @@ -561,24 +561,28 @@ ) != 1:%0A + + parser.print @@ -589,16 +589,20 @@ _help()%0A + sys. @@ -610,16 +610,20 @@ xit(1)%0A%0A + # The mo @@ -640,16 +640,20 @@ o load.%0A + model = @@ -661,16 +661,20 @@ rgs%5B0%5D%0A%0A + # Create @@ -705,16 +705,20 @@ object.%0A + session @@ -730,16 +730,20 @@ sion()%0A%0A + # Get se @@ -758,16 +758,20 @@ e need.%0A + element_ @@ -819,16 +819,20 @@ ctory%22)%0A + file_man @@ -875,16 +875,20 @@ ager%22)%0A%0A + # Load m @@ -903,16 +903,20 @@ m file.%0A + file_man @@ -933,16 +933,20 @@ model)%0A%0A + # Find a @@ -978,16 +978,20 @@ select.%0A + for cls @@ -1029,16 +1029,20 @@ lass):%0A%0A + prin @@ -1075,16 +1075,20 @@ %22)%0A%0A + + if optio @@ -1098,16 +1098,20 @@ attrs:%0A%0A + @@ -1143,16 +1143,20 @@ ibute:%0A%0A + @@ -1185,16 +1185,56 @@ : %7Battr.name%7D%22)%0A +%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
5ced1112e75a076b253072e7386af0cac994c937
Address flake8 errors with long strings in actions/lib/get_scan_executions.py, via cleanup
packs/alertlogic/actions/lib/get_scan_executions.py
packs/alertlogic/actions/lib/get_scan_executions.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests import datetime def GetScanExecutions(config, scan_id): """ The template class for Returns: An blank Dict. Raises: ValueError: On lack of key in config. """ results = {} url = "https://{}/api/scan/v1/scans/{}".format(config['api_host'], scan_id) headers = {"Accept": "application/json"} try: r = requests.get(url, headers=headers, auth=(config['api_key'], '')) r.raise_for_status() except: raise ValueError("HTTP error: %s on %s" % (r.status_code, r.url)) try: data = r.json() except: raise ValueError("Invalid JSON") else: results = {'latest_complete': None, 'scans': []} for item in data: create_date = datetime.datetime.fromtimestamp(item['create_date']).strftime('%Y-%m-%d %H:%M:%S') if item['finish_date'] is not None: finish_date = datetime.datetime.fromtimestamp(item['finish_date']).strftime('%Y-%m-%d %H:%M:%S') duration = str(datetime.datetime.fromtimestamp(item['finish_date']) - datetime.datetime.fromtimestamp(item['create_date'])) else: finish_date = "-" duration = str(datetime.datetime.now().replace(microsecond=0) - datetime.datetime.fromtimestamp(item['create_date'])) results['scans'].append({"id": item['id'], "active": item['active'], "create_date": create_date, "finish_date": finish_date, "duration": duration}) # This list can be very large, limit to the last 10. results['scans'].sort(reverse=True) results['scans'] = results['scans'][0:10] # Find the latest ccmpleted scan.. for item in results['scans']: if item['active'] is False: results['latest_complete'] = item['id'] break return results
Python
0
@@ -789,16 +789,30 @@ equests%0A +from datetime import d @@ -1580,30 +1580,12 @@ c -reate_date = datetime. +d = date @@ -1615,32 +1615,61 @@ %5B'create_date'%5D) +%0A create_date = cd .strftime('%25Y-%25m @@ -1753,30 +1753,12 @@ f -inish_date = datetime. +d = date @@ -1796,16 +1796,49 @@ _date'%5D) +%0A finish_date = fd .strftim @@ -1895,146 +1895,15 @@ str( -datetime.datetime.fromtimestamp(item%5B'finish_date'%5D) -%0A datetime.datetime.fromtimestamp(item%5B'create_date'%5D) +fd - cd )%0A @@ -1992,25 +1992,16 @@ atetime. -datetime. now().re @@ -2026,92 +2026,11 @@ 0) - -%0A datetime.datetime.fromtimestamp(item%5B'create_date'%5D) + cd )%0A%0A
5018d20024651e632f30ab72918041d2ebf8ebcb
Test renaming a multiindex with duplicates (GH 38015) (#43187)
pandas/tests/indexing/multiindex/test_multiindex.py
pandas/tests/indexing/multiindex/test_multiindex.py
import numpy as np import pandas._libs.index as _index from pandas.errors import PerformanceWarning import pandas as pd from pandas import ( DataFrame, Index, MultiIndex, Series, ) import pandas._testing as tm class TestMultiIndexBasic: def test_multiindex_perf_warn(self): df = DataFrame( { "jim": [0, 0, 1, 1], "joe": ["x", "x", "z", "y"], "jolie": np.random.rand(4), } ).set_index(["jim", "joe"]) with tm.assert_produces_warning(PerformanceWarning): df.loc[(1, "z")] df = df.iloc[[2, 1, 3, 0]] with tm.assert_produces_warning(PerformanceWarning): df.loc[(0,)] def test_indexing_over_hashtable_size_cutoff(self): n = 10000 old_cutoff = _index._SIZE_CUTOFF _index._SIZE_CUTOFF = 20000 s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n)))) # hai it works! assert s[("a", 5)] == 5 assert s[("a", 6)] == 6 assert s[("a", 7)] == 7 _index._SIZE_CUTOFF = old_cutoff def test_multi_nan_indexing(self): # GH 3588 df = DataFrame( { "a": ["R1", "R2", np.nan, "R4"], "b": ["C1", "C2", "C3", "C4"], "c": [10, 15, np.nan, 20], } ) result = df.set_index(["a", "b"], drop=False) expected = DataFrame( { "a": ["R1", "R2", np.nan, "R4"], "b": ["C1", "C2", "C3", "C4"], "c": [10, 15, np.nan, 20], }, index=[ Index(["R1", "R2", np.nan, "R4"], name="a"), Index(["C1", "C2", "C3", "C4"], name="b"), ], ) tm.assert_frame_equal(result, expected) def test_nested_tuples_duplicates(self): # GH#30892 dti = pd.to_datetime(["20190101", "20190101", "20190102"]) idx = Index(["a", "a", "c"]) mi = MultiIndex.from_arrays([dti, idx], names=["index1", "index2"]) df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi) expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi) df2 = df.copy(deep=True) df2.loc[(dti[0], "a"), "c2"] = 1.0 tm.assert_frame_equal(df2, expected) df3 = df.copy(deep=True) df3.loc[[(dti[0], "a")], "c2"] = 1.0 tm.assert_frame_equal(df3, expected) def test_multiindex_with_datatime_level_preserves_freq(self): # https://github.com/pandas-dev/pandas/issues/35563 idx = Index(range(2), name="A") dti = pd.date_range("2020-01-01", periods=7, freq="D", name="B") mi = MultiIndex.from_product([idx, dti]) df = DataFrame(np.random.randn(14, 2), index=mi) result = df.loc[0].index tm.assert_index_equal(result, dti) assert result.freq == dti.freq def test_multiindex_complex(self): # GH#42145 complex_data = [1 + 2j, 4 - 3j, 10 - 1j] non_complex_data = [3, 4, 5] result = DataFrame( { "x": complex_data, "y": non_complex_data, "z": non_complex_data, } ) result.set_index(["x", "y"], inplace=True) expected = DataFrame( {"z": non_complex_data}, index=MultiIndex.from_arrays( [complex_data, non_complex_data], names=("x", "y"), ), ) tm.assert_frame_equal(result, expected)
Python
0
@@ -3588,28 +3588,439 @@ ame_equal(result, expected)%0A +%0A def test_rename_multiindex_with_duplicates(self):%0A # GH 38015%0A mi = MultiIndex.from_tuples(%5B(%22A%22, %22cat%22), (%22B%22, %22cat%22), (%22B%22, %22cat%22)%5D)%0A df = DataFrame(index=mi)%0A df = df.rename(index=%7B%22A%22: %22Apple%22%7D, level=0)%0A%0A mi2 = MultiIndex.from_tuples(%5B(%22Apple%22, %22cat%22), (%22B%22, %22cat%22), (%22B%22, %22cat%22)%5D)%0A expected = DataFrame(index=mi2)%0A tm.assert_frame_equal(df, expected)%0A
99927b39cdfa47d60d6e045898ad4e0745fb763a
Add the ability to get the queue depth by calling len(queue) where queue is an instance of Queue
rmqid/queue.py
rmqid/queue.py
""" Queue is a class that encompasses and returns the methods of the Specification.Queue class """ import contextlib import logging from pamqp import specification from rmqid import base LOGGER = logging.getLogger(__name__) class Queue(base.AMQPClass): """Queue class that with methods that return the specification class method frames. """ def __init__(self, channel, name, passive=False, durable=True, exclusive=False, auto_delete=False): """Create a new instance of the queue object. :param rmqid.channel.Channel: The channel object to work with :param str name: The name of the queue :param bool passive: Do not create exchange :param bool durable: Request a durable exchange :param bool auto_delete: Automatically delete when not in use """ super(Queue, self).__init__(channel, name) self.consumer_tag = 'rmqid.%i.%s' % (self.channel.id, id(self)) self.consuming = False self._passive = passive self._durable = durable self._exclusive = exclusive self._auto_delete = auto_delete def bind(self, exchange, routing_key=None): """Bind the queue to the specified exchange or routing key. If routing key is None, use the queue name. :param str | rmqid.base.AMQPClass exchange: The exchange to bind to :param str routing_key: The routing key to use """ if isinstance(exchange, base.AMQPClass): exchange = exchange.name self.rpc(specification.Queue.Bind(queue=self.name, exchange=exchange, routing_key=routing_key or self.name)) @contextlib.contextmanager def consume(self, no_ack=False, prefetch=None): """Generator """ self.consuming = True if prefetch: self.channel.prefetch(prefetch) self.rpc(specification.Basic.Consume(queue=self.name, consumer_tag=self.consumer_tag, no_ack=no_ack)) yield ConsumeGenerator(self) self.consuming = False self.rpc(specification.Basic.Cancel(consumer_tag=self.consumer_tag)) def declare(self): """Declare the queue""" self.rpc(specification.Queue.Declare(queue=self.name, durable=self._durable, passive=self._passive, exclusive=self._exclusive, auto_delete=self._auto_delete)) def delete(self, if_unused=False, if_empty=False): """Delete the queue :param bool if_unused: Delete only if unused :param bool if_empty: Delete only if empty """ self.rpc(specification.Queue.Delete(queue=self.name, if_unused=if_unused, if_empty=if_empty)) def get(self, no_ack=False): """Return the results of a Basic.Get :param bool no_ack: Broker should not expect a Basic.Ack, Basic.Reject or Basic.Nack :rtype: rmqid.message.Message """ return self.rpc(specification.Basic.Get(queue=self.name, no_ack=no_ack)) def unbind(self, exchange, routing_key=None): """Unbind queue from the specified exchange where it is bound the routing key. If routing key is None, use the queue name. :param str | rmqid.base.AMQPClass exchange: Exchange to unbind from :param str routing_key: The routing key that binds them """ if isinstance(exchange, base.AMQPClass): exchange = exchange.name self.rpc(specification.Queue.Bind(queue=self.name, exchange=exchange, routing_key=routing_key or self.name)) class ConsumeGenerator(object): def __init__(self, queue): self.queue = queue def next_message(self): while self.queue.consuming: value = self.queue.channel._get_message() if value: yield value
Python
0.000018
@@ -1131,16 +1131,265 @@ delete%0A%0A + def __len__(self):%0A %22%22%22Return the pending number of messages in the queue by doing a passive%0A Queue declare.%0A%0A :rtype: int%0A%0A %22%22%22%0A response = self.rpc(self._declare(True))%0A return response.message_count%0A%0A def @@ -1420,32 +1420,32 @@ ting_key=None):%0A - %22%22%22Bind @@ -2617,334 +2617,21 @@ pc(s -pecification.Queue.Declare(queue=self.name,%0A durable=self._durable,%0A passive=self._passive,%0A exclusive=self._exclusive,%0A auto_delete=self._auto_delete +elf._declare( ))%0A%0A @@ -3896,32 +3896,32 @@ hange=exchange,%0A - @@ -3986,16 +3986,601 @@ name))%0A%0A + def _declare(self, passive=None):%0A %22%22%22Return a specification.Queue.Declare class pre-composed for the rpc%0A method since this can be called multiple times.%0A%0A :rtype: pamqp.specification.Queue.Declare%0A%0A %22%22%22%0A return specification.Queue.Declare(queue=self.name,%0A durable=self._durable,%0A passive=passive or self._passive,%0A exclusive=self._exclusive,%0A auto_delete=self._auto_delete)%0A%0A %0Aclass C
8a348c30e2381e76f09355f3cf23a9e87e060d80
Update without_end.py
Python/String_1/without_end.py
Python/String_1/without_end.py
# Given a string, return a version without the first and last char, so "Hello" # yields "ell". The string length will be at least 2. # without_end('Hello') -> 'ell' # without_end('java') -> 'av' # without_end('coding') -> 'odin' def without_end(str): return str[1:len(str) - 1] print(without_end('Hello')) print(without_end('java')) print(without_end('coding'))
Python
0.000002
@@ -150,16 +150,17 @@ Hello') +- -%3E 'ell' @@ -182,16 +182,17 @@ 'java') +- -%3E 'av'%0A @@ -215,16 +215,17 @@ oding') +- -%3E 'odin
9259bcd9d4dd745a5f34dff8dd4286eecf4a62f2
Update verifyAuthenticodePS.py
Python/verifyAuthenticodePS.py
Python/verifyAuthenticodePS.py
# NOTE: This is Windows Only - tested in Python2.7.1 # https://twitter.com/jgstew/status/1011657455275610112 # https://github.com/jgstew/tools/blob/master/CMD/PS_VerifyFileSig.bat # https://github.com/jgstew/tools/blob/master/Python/verifyAuthenticode.py # powershell -ExecutionPolicy Bypass -command "(Get-AuthenticodeSignature \"C:\Windows\explorer.exe\").Status -eq 'Valid'" import subprocess import sys sFileName = r"C:\Windows\explorer.exe" # TODO: use `-ExecutionPolicy Bypass` somehow # TODO: not sure if `Popen` is the best `subprocess` option. I'm just using the first thing that worked that I found: # https://stackoverflow.com/questions/21944895/running-powershell-script-within-python-script-how-to-make-python-print-the-pow psResult = subprocess.Popen( ["powershell", r'(Get-AuthenticodeSignature "' + sFileName + r'").Status -eq "Valid"'], stdout=sys.stdout ) psResult.communicate() # This will output `True` if the signature is valid.
Python
0
@@ -488,16 +488,147 @@ somehow%0A +# TODO: capture the output in python and evaluate it rather than just passing through the PowerShell result with stdout=sys.stdout%0A # TODO:
f1ef248f046c91683df8e6837249cc407a5f1cf2
Update ngrokwebhook.py
examples/ngrokwebhook.py
examples/ngrokwebhook.py
#sample script that reads ngrok info from localhost:4040 and create Cisco Spark Webhook #typicall ngrok is called "ngrok http 8080" to redirect localhost:8080 to Internet #accesible ngrok url # #To use script simply launch ngrok, then launch this script. After ngrok is killed, run this #script a second time to remove webhook from Cisco Spark import requests import json import re import sys import requests.packages.urllib3 requests.packages.urllib3.disable_warnings() from ciscosparkapi import CiscoSparkAPI, Webhook def findwebhookidbyname(api, webhookname): webhooks = api.webhooks.list() for wh in webhooks: if wh.name == webhookname: return wh.id else: return "not found" #Webhook attributes webhookname="testwebhook" resource="messages" event="created" url_suffix="/sparkwebhook" #grab the at from a local at.txt file instead of global variable fat=open ("at.txt","r+") at=fat.readline().rstrip() fat.close api = CiscoSparkAPI(at) #go to the localhost page for nogrok and grab the public url for http try: ngrokpage = requests.get("http://127.0.0.1:4040").text except: print ("no ngrok running - deleting webhook if it exists") whid=findwebhookidbyname(api, webhookname) if "not found" in whid: print ("no webhook found") sys.exit() else: print (whid) dict=api.webhooks.delete(whid) print (dict) print ("Webhook deleted") sys.exit() for line in ngrokpage.split("\n"): if "window.common = " in line: ngrokjson = re.search('JSON.parse\(\"(.+)\"\)\;',line).group(1) ngrokjson = (ngrokjson.replace('\\','')) print (ngrokjson) Url = (json.loads(ngrokjson)["Session"]["Tunnels"]["command_line (http)"]["URL"])+url_suffix print (Url) #check if the webhook exists by name and then create it if not whid=findwebhookidbyname(api, webhookname) if "not found" in whid: #create print ("not found") dict=api.webhooks.create(webhookname, targetUrl, resource, event) print (dict) else: #update print (whid) dict=api.webhooks.update(whid, name=webhookname, targetUrl=Url) print (dict)
Python
0.000001
@@ -2058,22 +2058,16 @@ okname, -target Url, res
339459eefa9c2b3daab76f258a4bde576b374c9b
Update create_http_task.py (#2187)
tasks/create_http_task.py
tasks/create_http_task.py
# Copyright 2019 Google LLC All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse import datetime def create_http_task(project, queue, location, url, payload=None, in_seconds=None): # [START cloud_tasks_create_http_task] """Create a task for a given queue with an arbitrary payload.""" from google.cloud import tasks_v2beta3 from google.protobuf import timestamp_pb2 # Create a client. client = tasks_v2beta3.CloudTasksClient() # TODO(developer): Uncomment these lines and replace with your values. # project = 'my-project-id' # queue = 'my-queue' # location = 'us-central1' # url = 'https://example.com/task_handler' # payload = 'hello' # Construct the fully qualified queue name. parent = client.queue_path(project, location, queue) # Construct the request body. task = { 'http_request': { # Specify the type of request. 'http_method': 'POST', 'url': url # The full url path that the task will be sent to. } } if payload is not None: # The API expects a payload of type bytes. converted_payload = payload.encode() # Add the payload to the request. task['http_request']['body'] = converted_payload if in_seconds is not None: # Convert "seconds from now" into an rfc3339 datetime string. d = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds) # Create Timestamp protobuf. timestamp = timestamp_pb2.Timestamp() timestamp.FromDatetime(d) # Add the timestamp to the tasks. task['schedule_time'] = timestamp # Use the client to build and send the task. response = client.create_task(parent, task) print('Created task {}'.format(response.name)) return response # [END cloud_tasks_create_http_task] if __name__ == '__main__': parser = argparse.ArgumentParser( description=create_http_task.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--project', help='Project of the queue to add the task to.', required=True, ) parser.add_argument( '--queue', help='ID (short name) of the queue to add the task to.', required=True, ) parser.add_argument( '--location', help='Location of the queue to add the task to.', required=True, ) parser.add_argument( '--url', help='The full url path that the request will be sent to.', required=True, ) parser.add_argument( '--payload', help='Optional payload to attach to the push queue.' ) parser.add_argument( '--in_seconds', type=int, help='The number of seconds from now to schedule task attempt.' ) args = parser.parse_args() create_http_task( args.project, args.queue, args.location, args.url, args.payload, args.in_seconds)
Python
0.000001
@@ -1357,16 +1357,36 @@ 'hello' +%0A in_seconds = 10 %0A%0A # @@ -1956,17 +1956,16 @@ payload%0A -%0A if i
fc8bfc1f2cda0844adbf1d831a1a5e1888f8949b
add regression test for behavior when fallbackfee is disabled
test/functional/wallet_fallbackfee.py
test/functional/wallet_fallbackfee.py
#!/usr/bin/env python3 # Copyright (c) 2017-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test wallet replace-by-fee capabilities in conjunction with the fallbackfee.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_raises_rpc_error class WalletRBFTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.nodes[0].generate(101) # sending a transaction without fee estimations must be possible by default on regtest self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) # test sending a tx with disabled fallback fee (must fail) self.restart_node(0, extra_args=["-fallbackfee=0"]) assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)) assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].fundrawtransaction(self.nodes[0].createrawtransaction([], [{self.nodes[0].getnewaddress(): 1}]))) assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendmany("", {self.nodes[0].getnewaddress(): 1})) if __name__ == '__main__': WalletRBFTest().main()
Python
0
@@ -408,16 +408,69 @@ pc_error +%0Afrom test_framework.util import rpc_port ## ELEMENTS %0A%0Aclass @@ -562,17 +562,17 @@ nodes = -1 +2 %0A @@ -1347,16 +1347,16 @@ 1%7D%5D)))%0A - @@ -1481,16 +1481,1315 @@ : 1%7D))%0A%0A + ## ELEMENTS: test claimpegin with fallback fee set to zero%0A # getpeginaddress does not work with descriptor wallets yet%0A if not self.options.descriptors:%0A extra_args = %5B%0A '-fallbackfee=0',%0A '-mainchainrpchost=127.0.0.1',%0A '-mainchainrpcport=%25s' %25 rpc_port(0),%0A '-parentgenesisblockhash=%25s' %25 self.nodes%5B0%5D.getblockhash(0),%0A '-con_parent_chain_signblockscript=51',%0A '-parentscriptprefix=75',%0A %5D%0A self.restart_node(0)%0A self.restart_node(1, extra_args)%0A%0A addrs = self.nodes%5B1%5D.getpeginaddress()%0A txid = self.nodes%5B0%5D.sendtoaddress(addrs%5B%22mainchain_address%22%5D, 5)%0A raw = self.nodes%5B0%5D.getrawtransaction(txid)%0A self.nodes%5B0%5D.generate(12)%0A proof = self.nodes%5B0%5D.gettxoutproof(%5Btxid%5D)%0A assert_raises_rpc_error(-6, %22Fee estimation failed%22, lambda: self.nodes%5B1%5D.claimpegin(raw, proof))%0A%0A # Try again with fallbackfee below the min relay fee. It should just work%0A # (will let the relay fee override the fallbackfee)%0A extra_args%5B0%5D = '-fallbackfee=0.00000001'%0A self.restart_node(1, extra_args)%0A self.nodes%5B1%5D.claimpegin(raw, proof)%0A%0A if __nam
94c1b54ef1db5180c5f1d07e857bf91d6b6fbf25
fix for python2.7 on windows
examples/usage_stream.py
examples/usage_stream.py
# -*- coding: utf-8 -*- from janome.tokenizer import Tokenizer import sys from io import open PY3 = sys.version_info[0] == 3 print(u'Tokenize (stream mode)') t = Tokenizer(mmap=True) with open('text_lemon.txt', encoding='utf-8') as f: text = f.read() if not PY3: text = unicode(text, 'utf-8') for token in t.tokenize(text, stream=True): print(token)
Python
0.000001
@@ -297,17 +297,8 @@ text -, 'utf-8' )%0A
331cef286380f3369344dbc9a60e460619c17405
fix dnn
federatedml/nn/homo_nn/zoo/dnn.py
federatedml/nn/homo_nn/zoo/dnn.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import typing from federatedml.nn.homo_nn.backend.tf_keras.layers import has_builder, DENSE, DROPOUT from federatedml.nn.homo_nn.backend.tf_keras.nn_model import KerasNNModel, KerasSequenceDataConverter from federatedml.nn.homo_nn.zoo.nn import build_nn def is_supported_layer(layer): return has_builder(layer) and layer in {DENSE, DROPOUT} def build_dnn(nn_define, loss, optimizer, metrics) -> typing.Tuple[KerasNNModel, KerasSequenceDataConverter]: return build_nn(nn_define=nn_define, loss=loss, optimizer=optimizer, metrics=metrics, is_supported_layer=is_supported_layer, default_layer=DENSE)
Python
0.000032
@@ -614,23 +614,8 @@ %0A#%0A%0A -import typing%0A%0A from @@ -774,36 +774,8 @@ odel -, KerasSequenceDataConverter %0Afro @@ -806,19 +806,16 @@ .zoo -.nn import buil @@ -810,22 +810,16 @@ import -build_ nn%0A%0A%0Adef @@ -818,24 +818,28 @@ nn%0A%0A%0Adef is_ +dnn_ supported_la @@ -926,12 +926,30 @@ ild_ -d nn -( +_model(input_shape, nn_d @@ -983,67 +983,87 @@ rics -) -%3E typing.Tuple%5BKerasNNModel, KerasSequenceDataConverter%5D +,%0A is_supported_layer=is_dnn_supported_layer) -%3E KerasNNModel :%0A @@ -1071,24 +1071,27 @@ return +nn. build_nn (nn_defi @@ -1082,17 +1082,77 @@ build_nn -( +_model(input_shape=input_shape,%0A nn_defin @@ -1184,16 +1184,25 @@ + loss=los @@ -1224,16 +1224,25 @@ + + optimize @@ -1274,16 +1274,25 @@ + metrics= @@ -1312,32 +1312,41 @@ + + is_supported_lay @@ -1364,24 +1364,33 @@ rted_layer,%0A +
41bac18d88d51abf057b2d26886fe929e8c5a4ad
Fix for optional mag field
feed/feed.home.usgs.earthquake.py
feed/feed.home.usgs.earthquake.py
#!/usr/local/bin/python3 -u """ Author: Oliver Ratzesberger <https://github.com/fxstein> Copyright: Copyright (C) 2016 Oliver Ratzesberger License: Apache License, Version 2.0 """ # Make sure we have access to SentientHome commons import os import sys try: sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') except: exit(1) # Sentient Home Application from common.shapp import shApp from common.sheventhandler import shEventHandler from common.shutil import epoch2date import json # Default settings from cement.utils.misc import init_defaults defaults = init_defaults('usgs_quake', 'usgs_quake') defaults['usgs_quake']['poll_interval'] = 10.0 def mapMetadata(metadata): event = [{ 'measurement': 'usgs.earthquake.metadata', # Time Series Name 'tags': { 'title': metadata['title'], 'api': metadata['api'], 'status': metadata['status'], }, 'fields': { 'generated': epoch2date(metadata['generated']/1000), 'count': metadata['count'] } }] return event def mapFeature(feature): event = [{ 'measurement': 'usgs.earthquake.feature', # Time Series Name 'tags': { 'type': feature['properties']['type'], 'gtype': feature['geometry']['type'], 'types': feature['properties']['types'], 'magType': feature['properties']['magType'], 'tsunami': feature['properties']['tsunami'], 'code': feature['properties']['code'], 'net': feature['properties']['net'], 'nst': feature['properties']['nst'], 'sources': feature['properties']['sources'], 'alert': feature['properties']['alert'], }, 'fields': { 'long': float(feature['geometry']['coordinates'][0]), 'lat': float(feature['geometry']['coordinates'][1]), 'depth': float(feature['geometry']['coordinates'][2]), 'mag': float(feature['properties']['mag']), 'felt': feature['properties']['felt'], 'sig': feature['properties']['sig'], 'dmin': feature['properties']['dmin'], 'id': feature['id'], 'status': feature['properties']['status'], 'title': feature['properties']['title'], 'place': feature['properties']['place'], 'status': feature['properties']['status'], 'time': epoch2date(feature['properties']['time']/1000), 'updated': epoch2date(feature['properties']['updated']/1000), 'tz': feature['properties']['tz'], 'ids': feature['properties']['ids'], } }] fields = event[0]['fields'] # Optional fields if feature['properties']['felt'] is not None: fields['felt'] = float(feature['properties']['felt']) if feature['properties']['gap'] is not None: fields['gap'] = float(feature['properties']['gap']) if feature['properties']['rms'] is not None: fields['rms'] = float(feature['properties']['rms']) if feature['properties']['mag'] is not None: fields['mag'] = float(feature['properties']['mag']) if feature['properties']['cdi'] is not None: fields['cdi'] = float(feature['properties']['cdi']) return event with shApp('usgs_quake', config_defaults=defaults) as app: app.run() handler = shEventHandler(app, dedupe=True) path = app.config.get('usgs_quake', 'path') while True: # Get all earthquakes of the past hour r = handler.get(app.config.get('usgs_quake', 'addr') + path) data = json.loads(r.text) # app.log.debug('Raw data: %s' % r.text) event = mapMetadata(data['metadata']) app.log.debug('Event data: %s' % event) handler.postEvent(event, dedupe=True, batch=True) # Need to revser the order of incoming events as news are on top but we # need to process in the order they happened. features = data['features'][::-1] for feature in features: event = mapFeature(feature) app.log.debug('Event data: %s' % event) # dedupe automatically ignores events we have processed before handler.postEvent(event, dedupe=True, batch=True) # We reset the poll interval in case the configuration has changed handler.sleep()
Python
0
@@ -2006,64 +2006,8 @@ %5D),%0A - 'mag': float(feature%5B'properties'%5D%5B'mag'%5D),%0A @@ -3262,24 +3262,134 @@ '%5D%5B'cdi'%5D)%0A%0A + if feature%5B'properties'%5D%5B'mag'%5D is not None:%0A fields%5B'mag'%5D = float(feature%5B'properties'%5D%5B'mag'%5D)%0A%0A return e
c830da138e8efc86204f96eff96fd9c0d956da20
Add GDrive listfiles method
file_distributor/gdrive_upload.py
file_distributor/gdrive_upload.py
#!/usr/bin/env python3 import argparse import configparser import httplib2 import os import sys import time # Google Drive API from apiclient.discovery import build from apiclient.http import MediaFileUpload from oauth2client import client from oauth2client import tools from oauth2client.file import Storage config_filename = "file_distributor.cfg" class Logger: """Outputs formatted log messages.""" # Change this to True to enable output debug logging for this module. print_debug_logs = True @classmethod def __log(self, log_level, message): """Outputs a formatted log message if logging is activated. Parameters: log_level -- String. Severity of the log message. message -- String. Message to be logged. """ if self.print_debug_logs: print( "[ " + time.strftime("%Y-%m-%d %H:%M:%S") + " | " + log_level + " ] " + message ) @staticmethod def debug(message): """Outputs a debug level log message.""" Logger.__log("DEBUG ", message) @staticmethod def success(message): """Outputs a success level log message.""" Logger.__log("SUCCESS", message) @staticmethod def log_error(message): """Outputs a error level log message.""" Logger.__log("ERROR ", message) class GoogleDrive: """Wrapper for the Google Drive API.""" def __get_credentials(self): """Refreshes Google Drive credentials, authorizing if necessary. self.__application_name and self.__client_secret_file_path must have been set. The credentials will be saved to generated_credentials.json. Returns OAuth credentials. """ credential_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "generated_credentials.json" ) store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: scope = "https://www.googleapis.com/auth/drive" flow = client.flow_from_clientsecrets( self.__client_secret_file_path, scope ) flow.user_agent = self.__application_name flags = argparse \ .ArgumentParser(parents=[tools.argparser]) \ .parse_args() flags.noauth_local_webserver = True credentials = tools.run_flow(flow, store, flags) Logger.debug("Credentials saved to [" + credential_path + "]") return credentials def __init__(self, application_name, client_secret_file_path): """Initializes and pre-authenticates the Google Drive credentials.""" self.__application_name = application_name self.__client_secret_file_path = client_secret_file_path http_auth = self.__get_credentials().authorize(httplib2.Http()) self.__service = build("drive", "v3", http=http_auth) def upload_file(self, file_path_local, file_name_gdrive): """Uploads a file to a Google Drive account. Parameters: file_path_local -- String. Absolute path to the file to be uploaded. file_name_gdrive -- String. Filename for the uploaded file in Google Drive. """ self.__service.files().create( media_body=MediaFileUpload(file_path_local), body={"name":file_name_gdrive} ).execute() Logger.debug("File [" + file_path_local + "] uploaded.") if __name__ == "__main__": config = configparser.ConfigParser() config.read(config_filename) section_local = "Local" file_path_local = config[section_local]["file_path"] section_gdrive = "Google Drive" application_name = config[section_gdrive]["application_name"] file_name_gdrive = config[section_gdrive]["file_name"] client_secret_file_path = "client_secret.json" g = GoogleDrive(application_name, client_secret_file_path) g.upload_file(os.path.join( os.path.dirname(os.path.realpath(__file__)), file_path_local ), file_name_gdrive)
Python
0
@@ -69,16 +69,28 @@ ttplib2%0A +import json%0A import o @@ -3102,16 +3102,314 @@ _auth)%0A%0A + def list_files(self):%0A %22%22%22Retrieves the data of all files in the Google Drive account.%22%22%22%0A Logger.debug(%22File and directory details:%22)%0A%0A file_list = self.__service.files().list().execute()%5B%22files%22%5D%0A for file in file_list:%0A print(%22 %22 + json.dumps(file))%0A%0A def @@ -4417,24 +4417,105 @@ _file_path)%0A +%0A # g.list_files() # Can be used to find the FileId of a specific directory%0A%0A g.upload
428a2528a1164ffd1a84a3eee89936cacab49b8e
Fix test2_unicode test_view_name
test/python/topology/test2_unicode.py
test/python/topology/test2_unicode.py
# coding=utf-8 # Licensed Materials - Property of IBM # Copyright IBM Corp. 2017 import unittest from streamsx.topology.topology import * from streamsx.topology.tester import Tester from streamsx.topology import context from streamsx import rest import streamsx.ec as ec import test_vers class view_name_source(object): """A class which wraps a StreamsConnection object and returns the view name """ def __init__(self, sc): self.sc = sc def __call__(self): for instance in self.sc.get_instances(id=ec.instance_id()): for job in instance.get_jobs(id=ec.job_id()): for view in job.get_views(): yield view.name @unittest.skipIf(not test_vers.tester_supported() , "Tester not supported") class TestUnicode(unittest.TestCase): def setUp(self): Tester.setup_standalone(self) def test_strings(self): """ Test strings that are unicode. Includes a stream name to verify it does not cause an error, but under the covers the actual name will be a mangled version of it since SPL identifiers are only ASCII. """ topo = Topology() ud = [] ud.append('⡍⠔⠙⠖ ⡊ ⠙⠕⠝⠰⠞ ⠍⠑⠁⠝ ⠞⠕ ⠎⠁⠹ ⠹⠁⠞ ⡊ ⠅⠝⠪⠂ ⠕⠋ ⠍⠹') ud.append('2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm') ud.append('многоязычных') ud.append("Arsenal hammered 5-1 by Bayern again") s = topo.source(ud, name='façade') sas = s.as_string() sd = s.map(lambda s : {'val': s + "_test_it!"}) tester = Tester(topo) tester.contents(s, ud) tester.contents(sas, ud) dud = [] for v in ud: dud.append({'val': v + "_test_it!"}) tester.contents(sd, dud) tester.test(self.test_ctxtype, self.test_config) print(tester.result) def test_view_name(self): """ Test view names that are unicode. """ if self.test_ctxtype == context.ContextTypes.STANDALONE: return self.skipTest("Skipping unicode view tests for standalone.") view_names = ["®®®®", "™¬⊕⇔"] topo = Topology() view_name_stream = topo.source(view_name_source(self.sc)) view0 = topo.source(["hello"]).view(name=view_names[0]) view1 = topo.source(["hello"]).view(name=view_names[1]) tester = Tester(topo) tester.contents(view_name_stream, view_names, ordered=False) # For running Bluemix tests, the username & password need a default value of None username = getattr(self, "username", None) password = getattr(self, "password", None) tester.test(self.test_ctxtype, self.test_config, username=username, password=password) @unittest.skipIf(not test_vers.tester_supported() , "Tester not supported") class TestDistributedUnicode(TestUnicode): def setUp(self): Tester.setup_distributed(self) # Get username and password username = self.username password = self.password self.sc = rest.StreamsConnection(username=username, password=password) @unittest.skipIf(not test_vers.tester_supported() , "Tester not supported") class TestBluemixUnicode(TestUnicode): def setUp(self): Tester.setup_streaming_analytics(self, force_remote_build=True) vcap = self.test_config.get('topology.service.vcap') sn = self.test_config.get('topology.service.name') self.sc = rest.StreamingAnalyticsConnection(vcap, sn)
Python
0.000082
@@ -486,20 +486,16 @@ -for instance @@ -495,18 +495,17 @@ nstance -in += self.sc @@ -517,17 +517,16 @@ instance -s (id=ec.i @@ -538,27 +538,25 @@ ce_id()) -:%0A +%0A for j @@ -551,21 +551,13 @@ - for job -in += ins @@ -569,17 +569,16 @@ .get_job -s (id=ec.j @@ -585,33 +585,25 @@ ob_id()) -:%0A +%0A for vie @@ -590,25 +590,24 @@ ())%0A - for view in @@ -623,24 +623,16 @@ iews():%0A -
031bc75556d24f7f603cb9b33ecb47e0f06eb826
Add test for parsing slice_time_reference options
fmriprep/cli/tests/test_parser.py
fmriprep/cli/tests/test_parser.py
"""Test parser.""" from packaging.version import Version import pytest from ..parser import _build_parser from .. import version as _version from ... import config MIN_ARGS = ["data/", "out/", "participant"] @pytest.mark.parametrize( "args,code", [ ([], 2), (MIN_ARGS, 2), # bids_dir does not exist (MIN_ARGS + ["--fs-license-file"], 2), (MIN_ARGS + ["--fs-license-file", "fslicense.txt"], 2), ], ) def test_parser_errors(args, code): """Check behavior of the parser.""" with pytest.raises(SystemExit) as error: _build_parser().parse_args(args) assert error.value.code == code @pytest.mark.parametrize("args", [MIN_ARGS, MIN_ARGS + ["--fs-license-file"]]) def test_parser_valid(tmp_path, args): """Check valid arguments.""" datapath = tmp_path / "data" datapath.mkdir(exist_ok=True) args[0] = str(datapath) if "--fs-license-file" in args: _fs_file = tmp_path / "license.txt" _fs_file.write_text("") args.insert(args.index("--fs-license-file") + 1, str(_fs_file.absolute())) opts = _build_parser().parse_args(args) assert opts.bids_dir == datapath @pytest.mark.parametrize( "argval,gb", [ ("1G", 1), ("1GB", 1), ("1000", 1), # Default units are MB ("32000", 32), # Default units are MB ("4000", 4), # Default units are MB ("1000M", 1), ("1000MB", 1), ("1T", 1000), ("1TB", 1000), ("%dK" % 1e6, 1), ("%dKB" % 1e6, 1), ("%dB" % 1e9, 1), ], ) def test_memory_arg(tmp_path, argval, gb): """Check the correct parsing of the memory argument.""" datapath = tmp_path / "data" datapath.mkdir(exist_ok=True) _fs_file = tmp_path / "license.txt" _fs_file.write_text("") args = MIN_ARGS + ["--fs-license-file", str(_fs_file)] + ["--mem", argval] opts = _build_parser().parse_args(args) assert opts.memory_gb == gb @pytest.mark.parametrize("current,latest", [("1.0.0", "1.3.2"), ("1.3.2", "1.3.2")]) def test_get_parser_update(monkeypatch, capsys, current, latest): """Make sure the out-of-date banner is shown.""" expectation = Version(current) < Version(latest) def _mock_check_latest(*args, **kwargs): return Version(latest) monkeypatch.setattr(config.environment, "version", current) monkeypatch.setattr(_version, "check_latest", _mock_check_latest) _build_parser() captured = capsys.readouterr().err msg = """\ You are using fMRIPrep-%s, and a newer version of fMRIPrep is available: %s. Please check out our documentation about how and when to upgrade: https://fmriprep.readthedocs.io/en/latest/faq.html#upgrading""" % ( current, latest, ) assert (msg in captured) is expectation @pytest.mark.parametrize( "flagged", [(True, None), (True, "random reason"), (False, None)] ) def test_get_parser_blacklist(monkeypatch, capsys, flagged): """Make sure the blacklisting banner is shown.""" def _mock_is_bl(*args, **kwargs): return flagged monkeypatch.setattr(_version, "is_flagged", _mock_is_bl) _build_parser() captured = capsys.readouterr().err assert ("FLAGGED" in captured) is flagged[0] if flagged[0]: assert (flagged[1] or "reason: unknown") in captured def test_bids_filter_file(tmp_path, capsys): bids_path = tmp_path / "data" out_path = tmp_path / "out" bff = tmp_path / "filter.json" args = [str(bids_path), str(out_path), "participant", "--bids-filter-file", str(bff)] bids_path.mkdir() parser = _build_parser() with pytest.raises(SystemExit): parser.parse_args(args) err = capsys.readouterr().err assert "Path does not exist:" in err bff.write_text('{"invalid json": }') with pytest.raises(SystemExit): parser.parse_args(args) err = capsys.readouterr().err assert "JSON syntax error in:" in err
Python
0.000001
@@ -156,16 +156,63 @@ t config +%0Afrom ...tests.test_config import _reset_config %0A%0AMIN_AR @@ -4011,16 +4011,450 @@ ror in:%22 in err%0A + _reset_config()%0A%0A%0A@pytest.mark.parametrize(%22st_ref%22, (None, %220%22, %221%22, %220.5%22, %22start%22, %22middle%22))%0Adef test_slice_time_ref(tmp_path, st_ref):%0A bids_path = tmp_path / %22data%22%0A out_path = tmp_path / %22out%22%0A args = %5Bstr(bids_path), str(out_path), %22participant%22%5D%0A if st_ref:%0A args.extend(%5B%22--slice-time-ref%22, st_ref%5D)%0A bids_path.mkdir()%0A%0A parser = _build_parser()%0A%0A parser.parse_args(args)%0A _reset_config()%0A
81cd7e3126db68872dbd57625cfa4368e0f98050
change name back from byte
flexget/plugins/output/pyload.py
flexget/plugins/output/pyload.py
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin from future.moves.urllib.parse import quote from logging import getLogger from requests.exceptions import RequestException from flexget import plugin from flexget.event import event from flexget.utils import json from flexget.config_schema import one_or_more from flexget.utils.template import RenderError log = getLogger('pyload') class PyloadApi(object): def __init__(self, requests, url): self.requests = requests self.url = url def get_session(self, config): # Login data = {'username': config['username'], 'password': config['password']} result = self.post('login', data=data) response = result.json() if not response: raise plugin.PluginError('Login failed', log) return response.replace('"', '') def get(self, method): try: return self.requests.get(self.url.rstrip("/") + "/" + method.strip("/")) except RequestException as e: if e.response and e.response.status_code == 500: raise plugin.PluginError('Internal API Error: <%s> <%s>' % (method, self.url), log) raise def post(self, method, data): try: return self.requests.post(self.url.rstrip("/") + "/" + method.strip("/"), data=data) except RequestException as e: if e.response and e.response.status_code == 500: raise plugin.PluginError('Internal API Error: <%s> <%s> <%s>' % (method, self.url, data), log) raise class PluginPyLoad(object): """ Parse task content or url for hoster links and adds them to pyLoad. Example:: pyload: api: http://localhost:8000/api queue: yes username: my_username password: my_password folder: desired_folder package: desired_package_name (jinja2 supported) package_password: desired_package_password hoster: - YoutubeCom parse_url: no multiple_hoster: yes enabled: yes Default values for the config elements:: pyload: api: http://localhost:8000/api queue: no hoster: ALL parse_url: no multiple_hoster: yes enabled: yes """ __author__ = 'http://pyload.org' __version__ = '0.5' DEFAULT_API = 'http://localhost:8000/api' DEFAULT_QUEUE = False DEFAULT_FOLDER = '' DEFAULT_HOSTER = [] DEFAULT_PARSE_URL = False DEFAULT_MULTIPLE_HOSTER = True DEFAULT_PREFERRED_HOSTER_ONLY = False DEFAULT_HANDLE_NO_URL_AS_FAILURE = False schema = { 'type': 'object', 'properties': { 'api': {'type': 'string'}, 'username': {'type': 'string'}, 'password': {'type': 'string'}, 'folder': {'type': 'string'}, 'package': {'type': 'string'}, 'package_password': {'type': 'string'}, 'queue': {'type': 'boolean'}, 'parse_url': {'type': 'boolean'}, 'multiple_hoster': {'type': 'boolean'}, 'hoster': one_or_more({'type': 'string'}), 'preferred_hoster_only': {'type': 'boolean'}, 'handle_no_url_as_failure': {'type': 'boolean'}, 'enabled': {'type': 'boolean'} }, 'required': ['username', 'password'], 'additionalProperties': False } def on_task_output(self, task, config): if not config.get('enabled', True): return if not task.accepted: return self.add_entries(task, config) def add_entries(self, task, config): """Adds accepted entries""" apiurl = config.get('api', self.DEFAULT_API) api = PyloadApi(task.requests, apiurl) try: session = api.get_session(config) except IOError: raise plugin.PluginError('pyLoad not reachable', log) except plugin.PluginError: raise except Exception as e: raise plugin.PluginError('Unknown error: %s' % str(e), log) hoster = config.get('hoster', self.DEFAULT_HOSTER) for entry in task.accepted: # bunch of urls now going to check content = entry.get('description', '') + ' ' + quote(entry['url']) content = json.dumps(content) url = json.dumps(entry['url']) if config.get('parse_url', self.DEFAULT_PARSE_URL) else "''" log.debug('Parsing url %s', url) data = {'html': content, 'url': url, 'session': session} result = api.post('parseURLs', data=data) parsed = result.json() urls = [] # check for preferred hoster for name in hoster: if name in parsed: urls.extend(parsed[name]) if not config.get('multiple_hoster', self.DEFAULT_MULTIPLE_HOSTER): break # no preferred hoster and not preferred hoster only - add all recognized plugins if not urls and not config.get('preferred_hoster_only', self.DEFAULT_PREFERRED_HOSTER_ONLY): for name, purls in parsed.items(): if name != 'BasePlugin': urls.extend(purls) if task.options.test: log.info('Would add `%s` to pyload', urls) continue # no urls found if not urls: if config.get('handle_no_url_as_failure', self.DEFAULT_HANDLE_NO_URL_AS_FAILURE): entry.fail('No suited urls in entry %s' % entry['title']) else: log.info('No suited urls in entry %s', entry['title']) continue log.debug('Add %d urls to pyLoad', len(urls)) try: dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1 # Use the title of the entry, if no naming schema for the package is defined. name = config.get('package', entry['title']) # If name has jinja template, render it try: name = entry.render(name) except RenderError as e: name = entry['title'] log.error('Error rendering jinja event: %s', e) data = {'name': json.dumps('"%s"' % name.encode('ascii', 'ignore')), 'links': json.dumps(urls), 'dest': json.dumps(dest), 'session': session} pid = api.post('addPackage', data=data).text log.debug('added package pid: %s', pid) # Set Folder folder = config.get('folder', self.DEFAULT_FOLDER) folder = entry.get('path', folder) if folder: # If folder has jinja template, render it try: folder = entry.render(folder) except RenderError as e: folder = self.DEFAULT_FOLDER log.error('Error rendering jinja event: %s', e) # set folder with api data = json.dumps({'folder': folder}) api.post("setPackageData", data={'pid': pid, 'data': data, 'session': session}) # Set Package Password package_password = config.get('package_password') if package_password: data = json.dumps({'password': package_password}) api.post('setPackageData', data={'pid': pid, 'data': data, 'session': session}) except Exception as e: entry.fail(str(e)) @event('plugin.register') def register_plugin(): plugin.register(PluginPyLoad, 'pyload', api_ver=2)
Python
0.000007
@@ -6526,17 +6526,8 @@ mps( -'%22%25s%22' %25 name @@ -6552,16 +6552,25 @@ ignore') +.decode() ),%0A
cee0a7a3af8f17c69e4c9701a363f50904321bd1
fix daemonizer.kill
aquests/lib/daemonize.py
aquests/lib/daemonize.py
import os import sys import time import signal from . import killtree, processutil class Daemonizer: def __init__(self, chdir="/", procname = None, umask=0o22): self.chdir = chdir self.procname = procname self.umask = umask self.pidfile = os.path.join (chdir, '.pid') def runAsDaemon(self): if status (self.chdir, self.procname): return 0 self.fork_and_die() self.dettach_env () self.fork_and_die() sys.stdout.flush() sys.stderr.flush() self.attach_stream('stdin', 'r') self.attach_stream('stdout', 'a+') self.attach_stream('stderr', 'a+') return 1 def dettach_env (self): os.setsid() os.umask(self.umask) os.chdir(self.chdir) def attach_stream (self, name, mode, fd = '/dev/null'): stream = open(fd, mode) os.dup2(stream.fileno(), getattr(sys, name).fileno()) def fork_and_die(self): r = os.fork() if r == -1: raise OSError("Couldn't fork().") elif r > 0: # I'm the parent if self.pidfile: open (self.pidfile, 'w').write (str(r)) sys.exit(0) elif r < 0: raise OSError("Something bizarre happened while trying to fork().") # now only r = 0 (the child) survives. return r def status (chdir, procname = None): pidfile = os.path.join (chdir, '.pid') if not os.path.isfile (pidfile): return 0 with open (pidfile) as f: pid = int (f.read ()) return processutil.is_running (pid, procname) and pid or 0 def kill (chdir, procname = None, include_children = True): import psutil for i in range (2): pid = status (chdir, procname) if not pid: break os.kill (pid, signal.SIGTERM) time.sleep (2) if include_children: try: killtree.kill (pid, True) except psutil.NoSuchProcess: pass while processutil.is_running (pid, procname): time.sleep (1) try: os.remove (os.path.join (chdir, ".pid")) except FileNotFoundError: pass if __name__ == "__main__" : import time Daemonizer ().runAsDaemon () f = open ('/home/ubuntu/out', 'w') while 1: time.sleep (1) f.write ('asdkljaldjalkdjalkdsa\n') f.flush() f.close ()
Python
0.000002
@@ -1401,17 +1401,16 @@ id or 0%0A -%09 %0Adef kil @@ -1454,16 +1454,34 @@ ildren = + True, signaling = True):%0A @@ -1550,16 +1550,18 @@ rocname) +%09%09 %0A%09%09if no @@ -1579,16 +1579,37 @@ break%0A%09%09 +%0A%09%09if signaling:%09%0A%09%09%09 os.kill @@ -1629,19 +1629,18 @@ SIGTERM) -%09%09 %0A +%09 %09%09time.s @@ -1648,16 +1648,20 @@ eep (2)%0A +%09%09%09%0A %09%09if inc
15283945f3103392d5d056a4ccf58281192ab8e4
Fix Pylint warnings
specdep.py
specdep.py
#!/usr/bin/python # see http://docs.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch16s04.html import sys import os import platform import urlparse import pkg import getopt from scripts.lib import mappkgname def build_type(): debian_like = ["ubuntu", "debian"] rhel_like = ["fedora", "redhat", "centos"] dist = platform.linux_distribution(full_distribution_name=False)[0].lower() assert dist in debian_like + rhel_like if dist in debian_like: return "deb" elif dist in rhel_like: return "rpm" def map_package_name_deb(name): """Map RPM package name to equivalent Deb names""" return mappkgname.map_package(name) # Rules to build SRPM from SPEC def build_srpm_from_spec(spec): srpmpath = spec.source_package_path() print '%s: %s %s' % (srpmpath, spec.specpath(), " ".join(spec.source_paths())) # Rules to download sources # Assumes each RPM only needs one download - we have some multi-source # packages but in all cases the additional sources are patches provided # in the Git repository def download_rpm_sources(spec): for (url, path) in zip(spec.source_urls(), spec.source_paths()): source = urlparse.urlparse(url) # Source comes from a remote HTTP server if source.scheme in ["http", "https"]: print '%s: %s' % (path, spec.specpath()) print '\t@echo [CURL] $@' print '\t@curl --silent --show-error -L -o $@ %s' % url # Source comes from a local file or directory if source.scheme == "file": print '%s: %s $(shell find %s)' % ( path, spec.specpath(), source.path) # Assume that the directory name is already what's expected by the # spec file, and prefix it with the version number in the tarball print '\t@echo [GIT] $@' dirname = "%s-%s" % (os.path.basename(source.path), spec.version()) print '\t@git --git-dir=%s/.git '\ 'archive --prefix %s/ -o $@ HEAD' % (source.path, dirname) # Rules to build RPMS from SRPMS (uses information from the SPECs to # get packages) def build_rpm_from_srpm(spec): # This doesn't generate the right Makefile fragment for a multi-target # rule - we may end up building too often, or not rebuilding correctly # on a partial build rpm_paths = spec.binary_package_paths() srpm_path = spec.source_package_path() for rpm_path in rpm_paths: print '%s: %s' % (rpm_path, srpm_path) def package_to_rpm_map(specs): provides_to_rpm = {} for spec in specs: for provided in spec.provides(): for rpmpath in spec.binary_package_paths(): provides_to_rpm[provided] = rpmpath return provides_to_rpm def buildrequires_for_rpm(spec, provides_to_rpm): for rpmpath in spec.binary_package_paths(): for buildreq in spec.buildrequires(): # Some buildrequires come from the system repository if provides_to_rpm.has_key(buildreq): buildreqrpm = provides_to_rpm[buildreq] print "%s: %s" % (rpmpath, buildreqrpm) def usage(name): """ Print usage information """ print "usage: %s [-h] [-i PKG] SPEC [SPEC ...]" % name def parse_cmdline(): """ Parse command line options """ try: opts, args = getopt.getopt(sys.argv[1:], "hi:d:", ["help", "ignore=", "dist="]) except getopt.GetoptError as err: usage(sys.argv[0]) print str(err) sys.exit(1) ignore = [] dist = "" for opt, val in opts: if opt == "-i" or opt == "--ignore": ignore.append(val) elif opt == "-d" or opt == "--dist": dist = val else: usage(sys.argv[0]) print "unknown option: %s" % opt sys.exit(1) if len(args) == 0: usage(sys.argv[0]) print "%s: error: too few arguments" % sys.argv[0] sys.exit(1) return {"ignore": ignore, "specs": args, "dist": dist} def main(): params = parse_cmdline() specs = {} for spec_path in params['specs']: try: if build_type() == "deb": spec = pkg.Spec(spec_path, target="deb", map_name=map_package_name_deb) else: spec = pkg.Spec(spec_path, target="rpm", dist=params['dist']) pkg_name = spec.name() if pkg_name in params['ignore']: continue specs[os.path.basename(spec_path)] = spec except pkg.SpecNameMismatch as e: sys.stderr.write("error: %s\n" % e.message) sys.exit(1) provides_to_rpm = package_to_rpm_map(specs.values()) for spec in specs.itervalues(): build_srpm_from_spec(spec) download_rpm_sources(spec) build_rpm_from_srpm(spec) buildrequires_for_rpm(spec, provides_to_rpm) print "" # Generate targets to build all srpms and all rpms all_rpms = [] all_srpms = [] for spec in specs.itervalues(): rpm_path = spec.binary_package_paths()[0] all_rpms.append(rpm_path) all_srpms.append(spec.source_package_path()) print "%s: %s" % (spec.name(), rpm_path) print "" print "rpms: " + " \\\n\t".join(all_rpms) print "" print "srpms: " + " \\\n\t".join(all_srpms) print "" print "install: all" print "\t. scripts/%s/install.sh" % build_type() if __name__ == "__main__": main()
Python
0
@@ -3433,17 +3433,16 @@ %22hi:d:%22, - %0A @@ -4677,16 +4677,18 @@ tch as e +xn :%0A @@ -4727,16 +4727,18 @@ s%5Cn%22 %25 e +xn .message
f5c5fef9cfdc94ad2d1b7d95f990e288251f57fe
Add search analyzer to search
froide/helper/search/__init__.py
froide/helper/search/__init__.py
import importlib from django.conf import settings from elasticsearch_dsl import analyzer, tokenizer from django_elasticsearch_dsl import Index from .signal_processor import CelerySignalProcessor from .queryset import SearchQuerySetWrapper from .registry import search_registry __all__ = [ 'CelerySignalProcessor', 'search_registry', 'SearchQuerySetWrapper', ] def get_index(name): index_name = '%s_%s' % ( settings.ELASTICSEARCH_INDEX_PREFIX, name ) # if settings.ELASTICSEARCH_INDEX_PREFIX == 'froide_test': # index_name += '_%s' % threading.get_ident() index = Index(index_name) # See Elasticsearch Indices API reference for available settings index.settings( number_of_shards=1, number_of_replicas=0 ) return index def get_default_text_analyzer(): return analyzer( 'froide_analyzer', tokenizer='standard', filter=[ 'standard', 'lowercase', 'asciifolding', ] ) def get_default_ngram_analyzer(): return analyzer( 'froide_ngram_analyzer', tokenizer=tokenizer( 'froide_ngram_tokenzier', type='edge_ngram', min_gram=1, max_gram=15, token_chars=['letter', 'digit'] ), filter=[ 'standard', 'lowercase', 'asciifolding', ] ) def get_func(config_name, default_func): def get_it(): from django.conf import settings func_path = settings.FROIDE_CONFIG.get(config_name, None) if not func_path: return default_func() module, func = func_path.rsplit('.', 1) module = importlib.import_module(module) analyzer_func = getattr(module, func) return analyzer_func() return get_it get_text_analyzer = get_func('search_text_analyzer', get_default_text_analyzer) get_ngram_analyzer = get_func('search_ngram_analyzer', get_default_ngram_analyzer)
Python
0
@@ -1877,19 +1877,89 @@ nc(' +text_analyzer', get_default_text_analyzer)%0Aget_search_analyzer = get_func(' search -_text _ana @@ -2024,23 +2024,16 @@ t_func(' -search_ ngram_an
56b38e64aeea12269b36d11849e0952377510c16
Change method of listening to state changes
homeassistant/components/automation/template.py
homeassistant/components/automation/template.py
""" homeassistant.components.automation.template ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Offers template automation rules. For more details about this automation rule, please refer to the documentation at https://home-assistant.io/components/automation/#template-trigger """ import logging from homeassistant.const import CONF_VALUE_TEMPLATE from homeassistant.exceptions import TemplateError from homeassistant.helpers.event import track_state_change from homeassistant.util import template _LOGGER = logging.getLogger(__name__) def trigger(hass, config, action): """ Listen for state changes based on `config`. """ value_template = config.get(CONF_VALUE_TEMPLATE) if value_template is None: _LOGGER.error("Missing configuration key %s", CONF_VALUE_TEMPLATE) return False # Get all entity ids all_entity_ids = hass.states.entity_ids() # Local variable to keep track of if the action has already been triggered already_triggered = False def state_automation_listener(entity, from_s, to_s): """ Listens for state changes and calls action. """ nonlocal already_triggered template_result = _check_template(hass, value_template) # Check to see if template returns true if template_result and not already_triggered: already_triggered = True action() elif not template_result: already_triggered = False track_state_change(hass, all_entity_ids, state_automation_listener) return True def if_action(hass, config): """ Wraps action method with state based condition. """ value_template = config.get(CONF_VALUE_TEMPLATE) if value_template is None: _LOGGER.error("Missing configuration key %s", CONF_VALUE_TEMPLATE) return False return lambda: _check_template(hass, value_template) def _check_template(hass, value_template): """ Checks if result of template is true """ try: value = template.render(hass, value_template, {}) except TemplateError: _LOGGER.exception('Error parsing template') return False return value.lower() == 'true'
Python
0
@@ -346,17 +346,38 @@ TEMPLATE +, EVENT_STATE_CHANGED %0A - from hom @@ -423,67 +423,8 @@ ror%0A -from homeassistant.helpers.event import track_state_change%0A from @@ -777,80 +777,8 @@ se%0A%0A - # Get all entity ids%0A all_entity_ids = hass.states.entity_ids()%0A%0A @@ -895,54 +895,28 @@ def -state_automation_listener(entity, from_s, to_s +event_listener(event ):%0A @@ -1315,65 +1315,50 @@ -track_state_change(hass, all_entity_ids, state_automation +hass.bus.listen(EVENT_STATE_CHANGED, event _lis @@ -1364,17 +1364,16 @@ stener)%0A -%0A retu
1d68777981ef9189ba1b871211cee58598daf498
Add transmission sensor: number of active torrents (#9914)
homeassistant/components/sensor/transmission.py
homeassistant/components/sensor/transmission.py
""" Support for monitoring the Transmission BitTorrent client API. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.transmission/ """ import logging from datetime import timedelta import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_NAME, CONF_PORT, CONF_MONITORED_VARIABLES, STATE_UNKNOWN, STATE_IDLE) from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['transmissionrpc==0.11'] _LOGGER = logging.getLogger(__name__) _THROTTLED_REFRESH = None DEFAULT_NAME = 'Transmission' DEFAULT_PORT = 9091 SENSOR_TYPES = { 'current_status': ['Status', None], 'download_speed': ['Down Speed', 'MB/s'], 'upload_speed': ['Up Speed', 'MB/s'] } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_USERNAME): cv.string, }) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Transmission sensors.""" import transmissionrpc from transmissionrpc.error import TransmissionError name = config.get(CONF_NAME) host = config.get(CONF_HOST) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) port = config.get(CONF_PORT) transmission_api = transmissionrpc.Client( host, port=port, user=username, password=password) try: transmission_api.session_stats() except TransmissionError: _LOGGER.exception("Connection to Transmission API failed") return False # pylint: disable=global-statement global _THROTTLED_REFRESH _THROTTLED_REFRESH = Throttle(timedelta(seconds=1))( transmission_api.session_stats) dev = [] for variable in config[CONF_MONITORED_VARIABLES]: dev.append(TransmissionSensor(variable, transmission_api, name)) add_devices(dev) class TransmissionSensor(Entity): """Representation of a Transmission sensor.""" def __init__(self, sensor_type, transmission_client, client_name): """Initialize the sensor.""" self._name = SENSOR_TYPES[sensor_type][0] self.transmission_client = transmission_client self.type = sensor_type self.client_name = client_name self._state = None self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] @property def name(self): """Return the name of the sensor.""" return '{} {}'.format(self.client_name, self._name) @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement # pylint: disable=no-self-use def refresh_transmission_data(self): """Call the throttled Transmission refresh method.""" from transmissionrpc.error import TransmissionError if _THROTTLED_REFRESH is not None: try: _THROTTLED_REFRESH() except TransmissionError: _LOGGER.error("Connection to Transmission API failed") def update(self): """Get the latest data from Transmission and updates the state.""" self.refresh_transmission_data() if self.type == 'current_status': if self.transmission_client.session: upload = self.transmission_client.session.uploadSpeed download = self.transmission_client.session.downloadSpeed if upload > 0 and download > 0: self._state = 'Up/Down' elif upload > 0 and download == 0: self._state = 'Seeding' elif upload == 0 and download > 0: self._state = 'Downloading' else: self._state = STATE_IDLE else: self._state = STATE_UNKNOWN if self.transmission_client.session: if self.type == 'download_speed': mb_spd = float(self.transmission_client.session.downloadSpeed) mb_spd = mb_spd / 1024 / 1024 self._state = round(mb_spd, 2 if mb_spd < 0.1 else 1) elif self.type == 'upload_speed': mb_spd = float(self.transmission_client.session.uploadSpeed) mb_spd = mb_spd / 1024 / 1024 self._state = round(mb_spd, 2 if mb_spd < 0.1 else 1)
Python
0.00004
@@ -271,16 +271,69 @@ as vol%0A%0A +import homeassistant.helpers.config_validation as cv%0A from hom @@ -518,23 +518,8 @@ LES, - STATE_UNKNOWN, STA @@ -618,61 +618,8 @@ ttle -%0Aimport homeassistant.helpers.config_validation as cv %0A%0ARE @@ -791,16 +791,66 @@ PES = %7B%0A + 'active_torrents': %5B'Active Torrents', None%5D,%0A 'cur @@ -967,16 +967,17 @@ 'MB/s'%5D +, %0A%7D%0A%0APLAT @@ -2654,35 +2654,25 @@ self.t -ransmission +m _client = tr @@ -3865,35 +3865,25 @@ if self.t -ransmission +m _client.sess @@ -3914,35 +3914,25 @@ oad = self.t -ransmission +m _client.sess @@ -3976,35 +3976,25 @@ oad = self.t -ransmission +m _client.sess @@ -4416,21 +4416,12 @@ e = -STATE_UNKNOWN +None %0A%0A @@ -4431,35 +4431,25 @@ if self.t -ransmission +m _client.sess @@ -4532,35 +4532,25 @@ float(self.t -ransmission +m _client.sess @@ -4767,27 +4767,17 @@ t(self.t -ransmission +m _client. @@ -4893,28 +4893,149 @@ , 2 if mb_spd %3C 0.1 else 1)%0A + elif self.type == 'active_torrents':%0A self._state = self.tm_client.session.activeTorrentCount%0A
afffe668a1f5ec5f5073897ec712f34b77bb3613
Allow intermediate caching on API calls
telostats/stations/api.py
telostats/stations/api.py
import json from datetime import datetime, timedelta from dateutil.parser import parse as parse_date from tastypie.cache import SimpleCache from tastypie.resources import ModelResource, Resource, fields from tastypie.serializers import Serializer from .models import Station from ..utils.tempodb import TempoDbClient class StationResource(ModelResource): class Meta: queryset = Station.visible_objects.all() resource_name = 'station' serializer = Serializer(formats=['json']) limit = 0 # show all stations by default allowed_methods = ['get'] filtering = { 'id': ('exact', ), } excludes = ['visible'] def dehydrate(self, bundle): bundle.data['polygon'] = json.loads(bundle.data['polygon']) return bundle class StationSeries: def __init__(self, initial=None): self.__dict__['_data'] = {} if hasattr(initial, 'items'): self.__dict__['_data'] = initial def __getattr__(self, name): return self._data.get(name, None) def __setattr__(self, name, value): self.__dict__['_data'][name] = value def to_dict(self): return self._data class RecentResource(Resource): id = fields.CharField(attribute='id') series = fields.ListField(attribute='series') class Meta: object_class = StationSeries resource_name = 'recent' # cache = SimpleCache(timeout=60 * 60) serializer = Serializer(formats=['json']) limit = 1 list_allowed_methods = [] detail_allowed_methods = ['get'] filtering = { 'id': ('exact', ), } def _client(self): return TempoDbClient() def _get_series(self, station_id=None, **kwargs): return self._client().get_series(station_id, **kwargs) def get_object_list(self, request): series_list = self._get_series().items() res = [] for sta_id, series in series_list: obj = StationSeries(initial=series) obj.id = sta_id res.append(obj) return res def obj_get_list(self, request=None, **kwargs): return self.get_object_list(request) def obj_get(self, request=None, **kwargs): station_id = kwargs['pk'] series = self._get_series(station_id=station_id)[station_id] # zip the two lists together on same timestamps timestamps = [x['t'] for x in series['available']] # or poles, dm;st available = [x['v'] for x in series['available']] poles = [x['v'] for x in series['poles']] series = [{ 'timestamp': t, 'poles': p, 'available': a, 'bikes': p - a } for t, p, a in zip(timestamps, poles, available)] initial_series = {'series': series} station_series = StationSeries(initial=initial_series) station_series.id = station_id return station_series class AverageResource(Resource): id = fields.CharField(attribute='id') series = fields.ListField(attribute='series') class Meta: object_class = StationSeries resource_name = 'average' # cache = SimpleCache(timeout=60 * 60 * 24 * 7) serializer = Serializer(formats=['json']) limit = 1 list_allowed_methods = [] detail_allowed_methods = ['get'] filtering = { 'id': ('exact', ), } def _client(self): return TempoDbClient() def _get_series(self, station_id=None, **kwargs): return self._client().get_series(station_id, start=timedelta(days=7), **kwargs) def obj_get(self, request=None, **kwargs): station_id = kwargs['pk'] series = self._get_series(station_id=station_id)[station_id] # initial result structure res = dict([ (i, {'available': [], 'poles': []}) for i in range(24)]) # data collection in buckets for s in ['available', 'poles']: for datum in series[s]: hour = parse_date(datum['t']).hour res[hour][s].append(datum['v']) # reduce lists by average res = [{ 'timestamp': datetime(2013, 1, 1, k).isoformat(), 'available': sum(v['available']) / len(v['available']), 'poles': sum(v['poles']) / len(v['poles']), } for k, v in res.items()] # add final bike count for hour in res: hour['bikes'] = hour['poles'] - hour['available'] initial_series = {'series': res} station_series = StationSeries(initial=initial_series) station_series.id = station_id return station_series
Python
0
@@ -122,22 +122,18 @@ import -Simple +No Cache%0Afr @@ -309,16 +309,341 @@ lient%0A%0A%0A +class APICache(NoCache):%0A def __init__(self, timeout=60, *args, **kwargs):%0A super(APICache, self).__init__(*args, **kwargs)%0A self.timeout = timeout%0A%0A def cache_control(self):%0A return %7B%0A 'max_age': self.timeout,%0A 's_maxage': self.timeout,%0A 'public': True%0A %7D%0A%0A class St @@ -737,16 +737,16 @@ s.all()%0A - @@ -771,16 +771,67 @@ tation'%0A + cache = APICache(timeout=60 * 60 * 24 * 7)%0A @@ -1779,34 +1779,32 @@ 'recent'%0A - # cache = SimpleC @@ -1788,38 +1788,35 @@ cache = -Simple +API Cache(timeout=60 @@ -1818,18 +1818,18 @@ ut=60 * -60 +15 )%0A @@ -3550,18 +3550,16 @@ %0A - # cache = @@ -3563,14 +3563,11 @@ e = -Simple +API Cach
3f80950f1fa9c5bad018a8cdaa9f6ae70168e4e1
Update main.py
device/src/main.py
device/src/main.py
#This is the file executing while STM32 MCU bootup, and in this file, #it will call other functions to fullfill the project. #Communication module: LoRa. #Communication method with gateway via LoRa. #Uart port drive LoRa module. #Parse JSON between device and gateway via LoRa channel. #LoRa module: E32-TTL-100 #Pin specification: #Module MCU #M0(IN) <--> GPIO(X3)(OUT) #mode setting, can not hang #M1(IN) <--> GPIO(X4)(OUT) #mode setting, can not hang #RXD(IN) <--> X1(TX)(OUT) #UART4 #TXD(OUT) <--> X2(RX)(IN) #UART4 #AUX(OUT) <--> GPIO/INT(IN) #module status detecting #VCC #GND #Communication mode is 0, need to set M0 and M1 to 0. import pyb from pyb import Pin from pyb import Timer from pyb import UART import micropython #Import light intensity needed module import LightIntensity import time import json micropython.alloc_emergency_exception_buf(100) Pin('Y11',Pin.OUT_PP).low() #GND Pin('Y9',Pin.OUT_PP).high() #VCC #Set LoRa module with mode-0. M0 = Pin('X3', Pin.OUT_PP) M1 = Pin('X4', Pin.OUT_PP) M0.low() M1.low() #Init uart4 for LoRa module. u4 = UART(4,9600) u4.init(9600, bits=8, parity=None, stop=1) cmd_online = '{"ID":"1", "CMD":"Online", "TYPE":"N", "VALUE":"N"}\n' #Send Online command to gateway while it power on to obtain its status data from gateway's database. u4.write(cmd_online) #LED shining regularly(using timer) to indicate the program is running correctly tim1 = Timer(1, freq=1) tim1.callback(lambda t: pyb.LED(1).toggle()) #Read the light intensity value from sensor regularly. ''' lightVlaue = 0 def getLightInten(): global lightVlaue lightVlaue = LightIntensity.readLight() tim1 = Timer(2, freq=1) tim1.callback(getLightInten()) print(LightIntensity.readLight()) ''' if __name__=='__main__' while True: #Waiting for the message from UART4 to obtain LoRa data. len = u4.any() if(len > 0): recv = u4.read() print(recv) json_lora = json.loads(recv) #Parse JSON from gateway. if (json_lora.get("CMD") == 'Online' and json_lora.get("TYPE") == 'Light2' ): if json_lora.get("VALUE") == 'On': pyb.LED(2).on() else: pyb.LED(2).off() print(LightIntensity.readLight()) ''' if lightVlaue > 0: print(lightVlaue) lightVlaue = 0 '''
Python
0.000001
@@ -1779,16 +1779,17 @@ _main__' +: %0A while
06a398c6f14259ed03899eae84d54d12fb4336c3
Update test_code_style.py
_unittests/ut_module/test_code_style.py
_unittests/ut_module/test_code_style.py
""" @brief test log(time=0s) """ import sys import os import unittest from pyquickhelper.loghelper import fLOG from pyquickhelper.pycode import check_pep8, ExtTestCase try: import src except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", ".."))) if path not in sys.path: sys.path.append(path) import src class TestCodeStyle(ExtTestCase): """Test style.""" def test_src(self): "skip pylint" self.assertFalse(src is None) def test_style_src(self): thi = os.path.abspath(os.path.dirname(__file__)) src_ = os.path.normpath(os.path.join(thi, "..", "..", "src")) check_pep8(src_, fLOG=fLOG, pylint_ignore=('C0103', 'C1801', 'R0201', 'R1705', 'W0108', 'W0613', 'R0911', 'W0201', 'W070', 'W0622', 'R1702', 'C0111', 'W0703', 'C0200'), neg_pattern='.*MokadiGrammar_((frParser)|(frListener)|(frLexer))[.]py$', skip=["Unable to import 'ensae_teaching_cs.pythonnet'", "treant_wrapper.py:38: W0603", "No name 'imwrite' in module 'cv2'", "No name 'VideoCapture' in module 'cv2'", "pydata2016.py:77: W0612", "mokadi_mails.py:42: W0612", "Unable to import 'pymmails.grabber'", "mokadi_action_slides.py:84: W0612", "gui_mokadi_process.py:13: W0612", "Redefining name 'fLOG' from outer scope", "Access to member 'thread_listen' before its definition", "Instance of 'Exception' has no 'strerror'", "Instance of 'Exception' has no 'errno'", "gui_mokadi.py:129", "gui_mokadi.py:78: W0612", "Unused variable 'ensae_teaching_cs'", "Unable to import 'ensae_teaching_cs'", "Unable to import 'pygame'", "Unable to import 'pygame.camera'", "Unable to import 'cv2'", "Unable to import 'keyring'", "Unable to import 'pyaudio'", ]) def test_style_test(self): thi = os.path.abspath(os.path.dirname(__file__)) test = os.path.normpath(os.path.join(thi, "..", )) check_pep8(test, fLOG=fLOG, neg_pattern="temp_.*", pylint_ignore=('C0103', 'C1801', 'R0201', 'R1705', 'W0108', 'W0613', 'C0111', 'W0703', 'C0122', 'W0101', 'R1707'), skip=["src' imported but unused", "skip_' imported but unused", "skip__' imported but unused", "skip___' imported but unused", "Unused variable 'skip_'", "imported as skip_", "Unused import src", "Unable to import 'ensae_teaching_cs.pythonnet'", "Redefining name 'path' from outer scope", "Unable to import 'keyring'", ]) if __name__ == "__main__": unittest.main()
Python
0.000004
@@ -454,16 +454,83 @@ t src%0A%0A%0A +def _run_cmd_filter(name):%0A print(%22#%22, name)%0A return False%0A%0A%0A class Te @@ -855,16 +855,48 @@ OG=fLOG, + run_cmd_filter=_run_cmd_filter, %0A @@ -2785,16 +2785,48 @@ emp_.*%22, + run_cmd_filter=_run_cmd_filter, %0A
cc80ceb0a27d1aa0564f43e4d21d069272eab3c0
fix icons in public index
archivebox/index/html.py
archivebox/index/html.py
__package__ = 'archivebox.index' from datetime import datetime from typing import List, Optional, Iterator, Mapping from pathlib import Path from django.utils.html import format_html, mark_safe from collections import defaultdict from .schema import Link from ..system import atomic_write from ..logging_util import printable_filesize from ..util import ( enforce_types, ts_to_date, urlencode, htmlencode, urldecode, ) from ..config import ( OUTPUT_DIR, VERSION, GIT_SHA, FOOTER_INFO, HTML_INDEX_FILENAME, ) MAIN_INDEX_TEMPLATE = 'static_index.html' MINIMAL_INDEX_TEMPLATE = 'minimal_index.html' LINK_DETAILS_TEMPLATE = 'snapshot.html' TITLE_LOADING_MSG = 'Not yet archived...' ### Main Links Index @enforce_types def parse_html_main_index(out_dir: Path=OUTPUT_DIR) -> Iterator[str]: """parse an archive index html file and return the list of urls""" index_path = Path(out_dir) / HTML_INDEX_FILENAME if index_path.exists(): with open(index_path, 'r', encoding='utf-8') as f: for line in f: if 'class="link-url"' in line: yield line.split('"')[1] return () @enforce_types def generate_index_from_links(links: List[Link], with_headers: bool): if with_headers: output = main_index_template(links) else: output = main_index_template(links, template=MINIMAL_INDEX_TEMPLATE) return output @enforce_types def main_index_template(links: List[Link], template: str=MAIN_INDEX_TEMPLATE) -> str: """render the template for the entire main index""" return render_django_template(template, { 'version': VERSION, 'git_sha': GIT_SHA, 'num_links': str(len(links)), 'date_updated': datetime.now().strftime('%Y-%m-%d'), 'time_updated': datetime.now().strftime('%Y-%m-%d %H:%M'), 'links': [link._asdict(extended=True) for link in links], 'FOOTER_INFO': FOOTER_INFO, }) ### Link Details Index @enforce_types def write_html_link_details(link: Link, out_dir: Optional[str]=None) -> None: out_dir = out_dir or link.link_dir rendered_html = link_details_template(link) atomic_write(str(Path(out_dir) / HTML_INDEX_FILENAME), rendered_html) @enforce_types def link_details_template(link: Link) -> str: from ..extractors.wget import wget_output_path link_info = link._asdict(extended=True) return render_django_template(LINK_DETAILS_TEMPLATE, { **link_info, **link_info['canonical'], 'title': htmlencode( link.title or (link.base_url if link.is_archived else TITLE_LOADING_MSG) ), 'url_str': htmlencode(urldecode(link.base_url)), 'archive_url': urlencode( wget_output_path(link) or (link.domain if link.is_archived else '') ) or 'about:blank', 'extension': link.extension or 'html', 'tags': link.tags or 'untagged', 'size': printable_filesize(link.archive_size) if link.archive_size else 'pending', 'status': 'archived' if link.is_archived else 'not yet archived', 'status_color': 'success' if link.is_archived else 'danger', 'oldest_archive_date': ts_to_date(link.oldest_archive_date), }) @enforce_types def render_django_template(template: str, context: Mapping[str, str]) -> str: """render a given html template string with the given template content""" from django.template.loader import render_to_string return render_to_string(template, context) def snapshot_icons(snapshot) -> str: from core.models import EXTRACTORS archive_results = snapshot.archiveresult_set.filter(status="succeeded") link = snapshot.as_link() path = link.archive_path canon = link.canonical_outputs() output = "" output_template = '<a href="/{}/{}" class="exists-{}" title="{}">{}</a> &nbsp;' icons = { "singlefile": "❶", "wget": "🆆", "dom": "🅷", "pdf": "📄", "screenshot": "💻", "media": "📼", "git": "🅶", "archive_org": "🏛", "readability": "🆁", "mercury": "🅼", "warc": "📦" } exclude = ["favicon", "title", "headers", "archive_org"] # Missing specific entry for WARC extractor_items = defaultdict(lambda: None) for extractor, _ in EXTRACTORS: for result in archive_results: if result.extractor == extractor: extractor_items[extractor] = result for extractor, _ in EXTRACTORS: if extractor not in exclude: exists = extractor_items[extractor] is not None output += format_html(output_template, path, canon[f"{extractor}_path"], str(exists), extractor, icons.get(extractor, "?")) if extractor == "wget": # warc isn't technically it's own extractor, so we have to add it after wget exists = list((Path(path) / canon["warc_path"]).glob("*.warc.gz")) output += format_html(output_template, exists[0] if exists else '#', canon["warc_path"], str(bool(exists)), "warc", icons.get("warc", "?")) if extractor == "archive_org": # The check for archive_org is different, so it has to be handled separately target_path = Path(path) / "archive.org.txt" exists = target_path.exists() output += '<a href="{}" class="exists-{}" title="{}">{}</a> '.format(canon["archive_org_path"], str(exists), "archive_org", icons.get("archive_org", "?")) return format_html('<span class="files-icons" style="font-size: 1.1em; opacity: 0.8; min-width: 240px; display: inline-block">{}<span>', mark_safe(output))
Python
0.000001
@@ -4582,16 +4582,37 @@ exists = + False%0A if extract @@ -4642,16 +4642,274 @@ not None +:%0A outpath = (Path(path) / canon%5Bf%22%7Bextractor%7D_path%22%5D)%0A if outpath.is_dir():%0A exists = any(outpath.glob('*.*'))%0A elif outpath.is_file():%0A exists = outpath.stat().st_size %3E 100 %0A @@ -4991,36 +4991,32 @@ %5D, str(exists),%0A -
5f0e00378b49ca2431d819debaa6ad0d4b4921fe
manage account does not exist exception fix error handling
src/dm/api.py
src/dm/api.py
import json from .pythontwitter import twitter from bot.models import Account import logging import typing from django.conf import settings logger = logging.getLogger(__name__) def get_api_dict(__screen_name: typing.Optional[str]=None): if not __screen_name: return try: account = Account.objects.get(username=__screen_name) consumer_key = account.backend_twitter_consumer_key consumer_secret = account.backend_twitter_consumer_secret access_token = account.backend_twitter_access_token access_token_secret = account.backend_twitter_access_token_secret except Account.DoesNotExist as e: logger.error(f"account with username {__screen_name} does not exist", e) return if (not consumer_key or not consumer_secret or not access_token or not access_token_secret): return api_dict = {'sleep_on_rate_limit': True} api_dict['consumer_key'] = consumer_key api_dict['consumer_secret'] = consumer_secret api_dict['access_token_key'] = access_token api_dict['access_token_secret'] = access_token_secret return api_dict def getapi(__screen_name: typing.Optional[str]=None): api_dict = get_api_dict(__screen_name) if api_dict: return twitter.Api(**api_dict) def getdm(dmid, screen_name): dm_lst = [] count = 49 cursor = -1 api_ = getapi(screen_name) if not api_: return while True: logger.debug('client cursor:{}'.format(cursor)) dms = api_.GetAllDirectMessages(count=count, cursor=cursor) dm_lst.extend(dms["events"]) lcall= len(dms["events"]) logger.debug(f'number of DMs returned during this call: {lcall}') logger.debug(f'total number of DMs returned so far: {len(dm_lst)}') cursor = dms.get("next_cursor", 0) logger.debug('client cursor:{}'.format(cursor)) #list all ids in this api call result, stop if id is among them ids = [] for dm in dms["events"]: ids.append(int(dm["id"])) if cursor == 0 or dmid in ids: break return dm_lst def senddm(text, user_id=None, return_json=True, quick_reply=None, attachment=None, screen_name=None): api_ = getapi(screen_name) if not api_: return try: response = api_.PostDirectMessage(text=text, user_id=user_id, return_json=True, quick_reply=quick_reply, attachment=attachment) except twitter.error.TwitterError as e: logger.error("message_create event (DM) error: %s", e) response = str(e) logger.debug(response) try: response["event"]["created_timestamp"] except KeyError: """ {'errors': [{'code': 349, 'message': 'You cannot send messages to this user.'}]} """ try: error_msg = json.dumps(response) except: error_msg = "Unknown message_create event (DM) error" logger.error(error_msg) return error_msg userid = response["event"]["message_create"]["target"]["recipient_id"] txt = response["event"]["message_create"]["message_data"]["text"] if screen_name: account = f"@{screen_name}" else: account = "default account" msg = f"Sending DM '{txt}' to user_id {userid} from {account} was successful" logger.info(msg) return response
Python
0
@@ -718,20 +718,25 @@ ot exist -%22, e +. %5Cn %7Be%7D%22 )%0A
608c55bb681667a6e1fe65e328676a3a99deb391
Update mupenGenerator.py
configgen/generators/mupen/mupenGenerator.py
configgen/generators/mupen/mupenGenerator.py
#!/usr/bin/env python import Command import mupenControllers import recalboxFiles from generators.Generator import Generator class MupenGenerator(Generator): # Main entry of the module # Configure mupen and return a command def generate(self, system, rom, playersControllers): # Settings recalbox default config file if no user defined one if not system.config['configfile']: # Using recalbox config file system.config['configfile'] = recalboxFiles.mupenCustom # Write controllers configuration files mupenControllers.writeControllersConfig(playersControllers) commandArray = ["mupen64plus", "--corelib", "/usr/lib/libmupen64plus.so.2.0.0", "--gfx", "/usr/lib/mupen64plus/mupen64plus-video-{}.so".format(system.config['core']), "--configdir", "/recalbox/share/system/configs/mupen64/", "--datadir", "/recalbox/share/system/configs/mupen64/", rom] return Command.Command(videomode=system.config['videomode'], array=commandArray, env={"SDL_VIDEO_GL_DRIVER":"/usr/lib/libGLESv2.so"})
Python
0
@@ -844,34 +844,32 @@ configdir%22, -%22/ recalbox /share/syste @@ -860,39 +860,23 @@ lbox -/share/system/configs/ +Files. mupen -64/%22 +Conf , %22- @@ -890,18 +890,16 @@ r%22, -%22/ recalbox /sha @@ -898,39 +898,23 @@ lbox -/share/system/configs/ +Files. mupen -64/%22 +Conf , ro
38a199ebf039ddfe33ad3dd3142a821db1495603
Add support of JSON sources
import_data/management/commands/process_xslt.py
import_data/management/commands/process_xslt.py
from django.core.management.base import BaseCommand from lxml import etree, html import urllib2 from os import path import importlib from django.db.models.fields import related class Command(BaseCommand): help = 'Processes XSLT transformation on a fetched by URL resource and outputs the result' def add_arguments(self, parser): parser.add_argument('url', help='URL to fetch source XML') parser.add_argument('xslt_file', help='Path to XSLT transformation file') parser.add_argument('--validate', action='store_true', help='Validate against Relax NG schema after transformation') rng_file = path.join(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))), 'schema.rng') parser.add_argument('--rng_file', default=rng_file, help='Path to RELAX NG file. Defaults to schema.rng in module dir. ' 'Used only if --validate is set') parser.add_argument('--save', action='store_true', help='Save data to the model. Successful validation against Relax NG ' 'schema is required. Model names and fields in transformed XML ' 'must represent existing models and fields. Otherwise import ' 'will break with an exception') def handle(self, *args, **options): source_etree, encoding = self.load_source_by_url(options['url']) transformed_etree = self.xslt_transform(source_etree, options['xslt_file']) output = etree.tostring(transformed_etree, pretty_print=True, encoding=encoding) print '<?xml version="1.0" encoding="' + (encoding or '') + '"?>\n' + output if options['validate'] or options['save']: try: self.assert_valid_rng_schema(transformed_etree, options['rng_file']) print 'Document is valid' if options['save']: saved_objects_count = 0 for model_element in self.get_model_elements(transformed_etree): model = self.get_model(model_element.attrib['model']) for item_element in self.get_item_elements(model_element): obj = model() for field_element in self.get_field_elements(item_element): if field_element.attrib.get('unique') and not self.is_unique(model, field_element): break setattr(obj, field_element.attrib['name'], field_element.text.strip()) else: for fk_element in self.get_fk_elements(item_element): related_obj = self.save_related_item(fk_element) self.set_related(obj, related_obj) obj.save() for m2m_element in self.get_m2m_elements(item_element): related_obj = self.save_related_item(m2m_element) self.set_related(obj, related_obj) saved_objects_count += 1 print 'Saved objects: ' + str(saved_objects_count) except etree.DocumentInvalid as ex: print 'Document is not valid: ' + str(ex) def get_model(self, model_path_string): ''' Returns model object by string, containing its path Path is in format: application_name.ModelName The same format like by manage.py dumpdata ''' application_name, model_name = model_path_string.split('.') models_import_str = application_name + '.models' models = importlib.import_module(models_import_str) model = getattr(models, model_name) return model def get_related_item_element(self, fk_element): ''' Returns related element by its foreign key It takes <fk /> element and finds related <item /> element by attributes ''' fk_item_element_selector = '//model[@model="{}"]//item[@key="{}"]'.format( fk_element.attrib['model'], fk_element.attrib['key'] ) fk_item_element = fk_element.xpath(fk_item_element_selector)[0] return fk_item_element def get_model_elements(self, transformed_etree): return transformed_etree.xpath('//model') def get_item_elements(self, model_element): return model_element.xpath('.//item') def get_field_elements(self, item_element): return item_element.xpath('.//field') def get_fk_elements(self, item_element): return item_element.xpath('.//fk') def get_m2m_elements(self, item_element): return item_element.xpath('.//m2mk') def save_related_item(self, fk_element): ''' Finds and saves related <item /> element by given <fk /> element ''' fk_model = self.get_model(fk_element.attrib['model']) obj = fk_model() related_item_element = self.get_related_item_element(fk_element) for field_element in self.get_field_elements(related_item_element): setattr(obj, field_element.attrib['name'], field_element.text.strip()) obj.save() return obj def set_related(self, obj, related_obj): ''' Finds and saves related model object It finds appropriate foreign key by related objects type. It means that you can't use two different foreign key fields to the same model ''' fk_field = [ field for field in type(obj)._meta.get_fields() if field.related_model == type(related_obj) ][0] if type(fk_field) is related.ForeignKey: setattr(obj, fk_field.name, related_obj) else: getattr(obj, fk_field.name).add(related_obj) def load_source_by_url(self, url): ''' Gets soure etree and content encoding by given url file:// schema is also supported ''' headers = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; rv:44.0) Gecko/20100101 Firefox/44.0' } req = urllib2.Request('http://stackoverflow.com', None, headers) response = urllib2.urlopen(req) encoding = response.headers.getparam('charset') content_type = response.info().type if 'xml' in content_type: source_etree = etree.parse(response) elif 'html' in content_type: source_etree = html.parse(response) else: raise Exception('Unsupported content type for source URL ' + url) return source_etree, encoding def xslt_transform(self, source_etree, xslt_file_path): ''' Transforms source XML by given XSLT file ''' xslt_etree = etree.parse(xslt_file_path) transform = etree.XSLT(xslt_etree) transformed_etree = transform(source_etree) return transformed_etree def assert_valid_rng_schema(self, transformed_etree, rng_file_path): ''' Validates source XML against given Relax NG schema If validation falls raises an etree.DocumentInvalid exception ''' rng_file_etree = etree.parse(rng_file_path) relaxng = etree.RelaxNG(rng_file_etree) relaxng.assertValid(transformed_etree) def is_unique(self, model, field_element): ''' Checks uniqueness of given field_element value after it will be saved in the database This straightforward check lets to avoid duplicates in the database without need to set unique constraints there ''' params = {field_element.attrib['name']: field_element.text} return not model.objects.filter(**params).count()
Python
0
@@ -169,16 +169,60 @@ related +%0Afrom dicttoxml import dicttoxml%0Aimport json %0A%0Aclass @@ -6845,32 +6845,163 @@ parse(response)%0A + elif 'json' in content_type:%0A dictionary = json.load(response)%0A source_etree = dicttoxml(dictionary)%0A else:%0A
76d5ada4b5853cea1445d3331fa9fb95299a335d
Update at 2017-07-19 00-12-21
data.py
data.py
import json import random from pathlib import Path from pprint import pprint import numpy as np from tqdm import tqdm from utils import sample, split, read_json, read_img DATASET = Path('~/dataset/').expanduser() DIRS = [x for x in DATASET.iterdir() if x.is_dir()] TRAIN_DIRS = DIRS[:-1] VAL_DIRS = DIRS[-1:] IMAGE_TRAIN = Path('npy/image_train') IMAGE_VAL = Path('npy/image_val') WINDOW_TRAIN = Path('npy/window_train') WINDOW_VAL = Path('npy/window_val') N_IMAGE_TRAIN = 25000 N_IMAGE_VAL = 5000 N_WINDOW_TRAIN = 25000 N_WINDOW_VAL = 2000 TIMESTEPS = 30 def check(): for folder in DIRS: label = read_json(folder / 'label.json') label_len = len(label) n_frames = len(list((folder / 'frames/').iterdir())) assert n_frames == label_len, '{}: {}, {}'.format(folder, label_len, n_frames) def gen_image_npy(video_dirs, target_dir, n_samples): x_all = [] y_all = [] for video_dir in video_dirs: imgs = sorted((video_dir / 'frames/').iterdir()) label = json.load((video_dir / 'label.json').open())['label'] x_all.extend(imgs) y_all.extend(label) x_use, y_use = sample(x_all, y_all, k=n_samples) parts = split(x_use, y_use, k=1000) for idx, x_part, y_part in tqdm(parts): n = len(x_part) xs = np.zeros((n, 224, 224, 3), dtype=np.float32) ys = np.zeros((n, 1), dtype=np.uint8) for i in range(n): xs[i] = read_img(x_part[i]) ys[i] = y_part[i] np.save(str(target_dir / 'x_{:05d}.npy'.format(idx)), xs) np.save(str(target_dir / 'y_{:05d}.npy'.format(idx)), ys) del xs, ys def gen_window_npy(video_dirs, target_dir, n_samples): x_all = [] y_all = [] for video_dir in video_dirs: n_frames = len(list((video_dir / 'frames/').iterdir())) labels = read_json(video_dir / 'label.json')['label'] windows = [(video_dir, i, i + TIMESTEPS) for i in range(n_frames - TIMESTEPS)] x_all.extend(windows) y_all.extend([labels[e - 1] for (_, s, e) in windows]) x_use, y_use = sample(x_all, y_all, k=n_samples) parts = split(x_use, y_use, k=200) for idx, x_part, y_part in tqdm(parts): n = len(x_part) xs = np.zeros((n, timesteps, 224, 224, 3), dtype=np.float32) ys = np.zeros((n, 1), dtype=np.uint8) for i in range(n): (video_dir, s, e) = x_part[i] for f in range(s, e): path = video_dir / 'frames' / '{:08d}.jpg'.format(f) xs[i][f - s] = read_img(path) ys[i] = y_part[i] np.save(str(target_dir / 'x_{:05d}.npy'.format(idx)), xs) np.save(str(target_dir / 'y_{:05d}.npy'.format(idx)), ys) del xs, ys def image_generator(npy_dir, batch_size): x_paths = sorted(npy_dir.glob('x_*.npy')) y_paths = sorted(npy_dir.glob('y_*.npy')) idx = 0 x_batch = np.zeros((batch_size, 224, 224, 3), dtype=np.float32) y_batch = np.zeros((batch_size, 1), dtype=np.uint8) while True: for x_path, y_path in zip(x_paths, y_paths): x_part = np.load(x_path) y_part = np.load(y_path) for x, y in zip(x_part, y_part): x_batch[idx] = x y_batch[idx] = y if idx + 1 == batch_size: yield x_batch, y_batch idx = (idx + 1) % batch_size del x_part, y_part def window_generator(npy_dir, batch_size): x_paths = sorted(npy_dir.glob('x_*.npy')) y_paths = sorted(npy_dir.glob('y_*.npy')) idx = 0 x_batch = np.zeros((batch_size, TIMESTEPS, 224, 224, 3), dtype=np.float32) y_batch = np.zeros((batch_size, 1), dtype=np.uint8) while True: for x_path, y_path in zip(x_paths, y_paths): x_part = np.load(x_path) y_part = np.load(y_path) for x, y in zip(x_part, y_part): x_batch[idx] = x y_batch[idx] = y if idx + 1 == batch_size: yield x_batch, y_batch idx = (idx + 1) % batch_size del x_part, y_part image_train_gen = image_generator(IMAGE_TRAIN, 40) image_val_gen = image_generator(IMAGE_VAL, 40) window_train_gen = window_generator(WINDOW_TRAIN, 30) window_val_gen = window_generator(WINDOW_VAL, 30) if __name__ == '__main__': check() for folder in [IMAGE_TRAIN, IMAGE_VAL, WINDOW_TRAIN, WINDOW_VAL]: folder.mkdir(parents=True, exist_ok=True) print('Train data:') pprint(TRAIN_DIRS) print('Validation data:') pprint(VAL_DIRS) # gen_image_npy(TRAIN_DIRS, IMAGE_TRAIN, N_IMAGE_TRAIN) # gen_image_npy(VAL_DIRS, IMAGE_VAL, N_IMAGE_VAL) gen_window_npy(TRAIN_DIRS, WINDOW_TRAIN, N_WINDOW_TRAIN) gen_window_npy(VAL_DIRS, WINDOW_VAL, N_WINDOW_VAL)
Python
0
@@ -640,16 +640,25 @@ l.json') +%5B'label'%5D %0A
8dcc7deb52fcbc907c4ea57102ae94daabc70502
use refactored frepple data load method
contrib/django/freppledb/execute/commands.py
contrib/django/freppledb/execute/commands.py
from __future__ import print_function import os, sys from datetime import datetime from django.db import transaction, DEFAULT_DB_ALIAS from django.conf import settings from freppledb.common.models import Parameter from freppledb.execute.models import Task import frepple def printWelcome(prefix = 'frepple', database = DEFAULT_DB_ALIAS): # Send the output to a logfile if database == DEFAULT_DB_ALIAS: frepple.settings.logfile = os.path.join(settings.FREPPLE_LOGDIR,'%s.log' % prefix) else: frepple.settings.logfile = os.path.join(settings.FREPPLE_LOGDIR,'%s_%s.log' % (prefix,database)) # Welcome message if settings.DATABASES[database]['ENGINE'] == 'django.db.backends.sqlite3': print("frePPLe on %s using sqlite3 database '%s'" % ( sys.platform, settings.DATABASES[database].get('NAME','') )) else: print("frePPLe on %s using %s database '%s' as '%s' on '%s:%s'" % ( sys.platform, settings.DATABASES[database].get('ENGINE','').split('.')[-1], settings.DATABASES[database].get('NAME',''), settings.DATABASES[database].get('USER',''), settings.DATABASES[database].get('HOST',''), settings.DATABASES[database].get('PORT','') )) task = None def logProgress(val, database = DEFAULT_DB_ALIAS): global task transaction.enter_transaction_management(using=database) try: if not task and 'FREPPLE_TASKID' in os.environ: try: task = Task.objects.all().using(database).get(pk=os.environ['FREPPLE_TASKID']) except: raise Exception("Task identifier not found") if task: if task.status == 'Canceling': task.status = 'Cancelled' task.save(using=database) sys.exit(2) else: task.status = '%d%%' % val task.save(using=database) finally: transaction.commit(using=database) transaction.leave_transaction_management(using=database) def logMessage(msg, database = DEFAULT_DB_ALIAS): global task if task: transaction.enter_transaction_management(managed=False, using=database) transaction.managed(False, using=database) try: task.message = msg task.save(using=database) finally: transaction.commit(using=database) transaction.leave_transaction_management(using=database) def createPlan(database = DEFAULT_DB_ALIAS): # Auxiliary functions for debugging def debugResource(res,mode): # if res.name != 'my favorite resource': return print("=> Situation on resource", res.name) for j in res.loadplans: print("=> ", j.quantity, j.onhand, j.startdate, j.enddate, j.operation.name, j.operationplan.quantity, j.setup) def debugDemand(dem,mode): if dem.name == 'my favorite demand': print("=> Starting to plan demand ", dem.name) solver.loglevel = 2 else: solver.loglevel = 0 # Create a solver where the plan type are defined by an environment variable try: plantype = int(os.environ['FREPPLE_PLANTYPE']) except: plantype = 1 # Default is a constrained plan try: constraint = int(os.environ['FREPPLE_CONSTRAINT']) except: constraint = 15 # Default is with all constraints enabled solver = frepple.solver_mrp(name = "MRP", constraints = constraint, plantype = plantype, loglevel=int(Parameter.getValue('plan.loglevel', database, 0)), lazydelay = int(Parameter.getValue('lazydelay', database, '86400')), allowsplits = (Parameter.getValue('allowsplits', database, 'true') == "true"), plansafetystockfirst = False #userexit_resource=debugResource, #userexit_demand=debugDemand ) print("Plan type: ", plantype) print("Constraints: ", constraint) solver.solve() def exportPlan(database = DEFAULT_DB_ALIAS): if settings.DATABASES[database]['ENGINE'] == 'django.db.backends.postgresql_psycopg2': from freppledb.execute.export_database_plan_postgresql import exportfrepple as export_plan_to_database else: from freppledb.execute.export_database_plan import exportfrepple as export_plan_to_database export_plan_to_database() if __name__ == "__main__": # Select database try: db = os.environ['FREPPLE_DATABASE'] or DEFAULT_DB_ALIAS except: db = DEFAULT_DB_ALIAS # Use the test database if we are running the test suite if 'FREPPLE_TEST' in os.environ: settings.DATABASES[db]['NAME'] = settings.DATABASES[db]['TEST_NAME'] if 'TEST_CHARSET' in os.environ: settings.DATABASES[db]['CHARSET'] = settings.DATABASES[db]['TEST_CHARSET'] if 'TEST_COLLATION' in os.environ: settings.DATABASES[db]['COLLATION'] = settings.DATABASES[db]['TEST_COLLATION'] if 'TEST_USER' in os.environ: settings.DATABASES[db]['USER'] = settings.DATABASES[db]['TEST_USER'] printWelcome(database=db) logProgress(1, db) print("\nStart loading data from the database at", datetime.now().strftime("%H:%M:%S")) frepple.printsize() from freppledb.execute.load import loadfrepple loadfrepple(db) frepple.printsize() logProgress(33, db) print("\nStart plan generation at", datetime.now().strftime("%H:%M:%S")) createPlan(db) frepple.printsize() logProgress(66, db) #print("\nStart exporting static model to the database at", datetime.now().strftime("%H:%M:%S")) #from freppledb.execute.export_database_static import exportStaticModel #exportStaticModel(database=db, source=None).run() print("\nStart exporting plan to the database at", datetime.now().strftime("%H:%M:%S")) exportPlan(db) #print("\nStart saving the plan to flat files at", datetime.now().strftime("%H:%M:%S")) #from freppledb.execute.export_file_plan import exportfrepple as export_plan_to_file #export_plan_to_file() #print("\nStart saving the plan to an XML file at", datetime.now().strftime("%H:%M:%S")) #frepple.saveXMLfile("output.1.xml","PLANDETAIL") #frepple.saveXMLfile("output.2.xml","PLAN") #frepple.saveXMLfile("output.3.xml","STANDARD") #print("Start deleting model data at", datetime.now().strftime("%H:%M:%S")) #frepple.erase(True) #frepple.printsize() print("\nFinished planning at", datetime.now().strftime("%H:%M:%S")) logProgress(100, db)
Python
0
@@ -4899,32 +4899,54 @@ load -frepple%0A loadfrepple(db +Data%0A loadData(database=db, filter=None).run( )%0A
0c88ed9bce4004938d7119f784b82a630b22b95d
Revise doc string with complexity
alg_bellman_ford_shortest_path.py
alg_bellman_ford_shortest_path.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def update_distance(v, v_neighbor, w_graph_d, distance_d, previous_d): if (distance_d[v_neighbor] > distance_d[v] + w_graph_d[v][v_neighbor]): distance_d[v_neighbor] = ( distance_d[v] + w_graph_d[v][v_neighbor]) previous_d[v_neighbor] = v return distance_d, previous_d def bellman_ford(w_graph_d, start_vertex): """Bellman-Ford algorithm for weighted / negative graph.""" distance_d = {v: np.inf for v in w_graph_d.keys()} previous_d = {v: None for v in w_graph_d.keys()} distance_d[start_vertex] = 0 n = len(w_graph_d.keys()) # Run through |V| - 1 times. for i in xrange(1, n): # Run through all edges. for v in w_graph_d.keys(): for v_neighbor in w_graph_d[v].keys(): distance_d, previous_d = update_distance( v, v_neighbor, w_graph_d, distance_d, previous_d) # Check negative cycle. _distance_d = distance_d.copy() _previous_d = previous_d.copy() for v in w_graph_d.keys(): for v_neighbor in w_graph_d[v].keys(): _distance_d, _previous_d = update_distance( v, v_neighbor, w_graph_d, _distance_d, _previous_d) if _distance_d != distance_d: raise ValueError('Negative cycle exists.') return distance_d, previous_d def main(): w_graph_d = { 's': {'a': 2, 'b': 6}, 'a': {'b': 3, 'c': 1}, # 'b': {'a': -5, 'd': 2}, # With negative cycle. 'b': {'a': -2, 'd': 2}, 'c': {'b': 1, 'e': 4, 'f': 2}, 'd': {'c': 3, 'f': 2}, 'e': {}, 'f': {'e': 1} } start_vertex = 's' distance_d, previous_d = bellman_ford(w_graph_d, start_vertex) print('distance_d: {}'.format(distance_d)) print('previous_d: {}'.format(previous_d)) if __name__ == '__main__': main()
Python
0.000006
@@ -543,16 +543,59 @@ thm for +single-source shortest path problem%0A in weighted @@ -599,9 +599,11 @@ ted -/ +and neg @@ -614,16 +614,73 @@ e graph. +%0A%0A Time complexity for graph G(V, W): O(%7CV%7C*%7CE%7C).%0A %22%22%22%0A
e954a0a400eeca33bfba6b5eada12728c3b15287
Set window active if AppQWidget is minimized
alignak_app/widgets/app_widget.py
alignak_app/widgets/app_widget.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2015-2016: # Matthieu Estrada, ttamalfor@gmail.com # # This file is part of (AlignakApp). # # (AlignakApp) is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # (AlignakApp) is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>. """ App Widget manage creation of a QWidget to make a tempalte for all QWidgets of Alignak-app """ import sys from logging import getLogger from alignak_app.core.utils import get_image_path, init_config, get_css try: __import__('PyQt5') from PyQt5.QtWidgets import QApplication # pylint: disable=no-name-in-module from PyQt5.QtWidgets import QWidget, QStyle, QVBoxLayout # pylint: disable=no-name-in-module from PyQt5.QtWidgets import QLabel, QStyleOption # pylint: disable=no-name-in-module from PyQt5.QtWidgets import QHBoxLayout, QPushButton # pylint: disable=no-name-in-module from PyQt5.Qt import Qt, QIcon, QPixmap, QFrame # pylint: disable=no-name-in-module except ImportError: # pragma: no cover from PyQt4.Qt import QApplication # pylint: disable=import-error from PyQt4.Qt import QWidget, QStyle, QVBoxLayout # pylint: disable=import-error from PyQt4.Qt import QLabel, QStyleOption # pylint: disable=import-error from PyQt4.Qt import QHBoxLayout, QPushButton # pylint: disable=import-error from PyQt4.Qt import Qt, QIcon, QPixmap, QFrame # pylint: disable=import-error logger = getLogger(__name__) class AppQWidget(QFrame): """ Class who create a QWidget template. """ def __init__(self, parent=None): super(AppQWidget, self).__init__(parent) self.setWindowFlags(Qt.FramelessWindowHint) self.setWindowIcon(QIcon(get_image_path('icon'))) self.setStyleSheet(get_css()) self.offset = None def initialize(self, title): """ Initialize the QWidget, with its "title" :param title: title of the QWidget :type title: str """ self.setWindowTitle(title) self.setObjectName('app_widget') main_layout = QVBoxLayout() main_layout.setContentsMargins(0, 0, 0, 0) self.setLayout(main_layout) main_layout.addWidget(self.get_logo_widget(title)) self.setAttribute(Qt.WA_TransparentForMouseEvents) def get_logo_widget(self, title): """ Return the logo QWidget :return: logo QWidget :rtype: QWidget """ logo_widget = QWidget() logo_widget.setFixedHeight(45) logo_widget.setObjectName('logo') logo_layout = QHBoxLayout() logo_widget.setLayout(logo_layout) logo_label = QLabel() logo_label.setPixmap(QPixmap(get_image_path('alignak'))) logo_label.setFixedSize(121, 35) logo_label.setScaledContents(True) logo_layout.addWidget(logo_label, 0) title_label = QLabel('<h3>%s</h3>' % title) title_label.setObjectName('title') title_label.setAttribute(Qt.WA_TransparentForMouseEvents) logo_layout.addWidget(title_label, 1) logo_layout.setAlignment(title_label, Qt.AlignHCenter) minimize_btn = QPushButton() minimize_btn.setIcon(QIcon(get_image_path('minimize'))) minimize_btn.setFixedSize(22, 22) minimize_btn.setObjectName('app_widget') minimize_btn.clicked.connect(self.minimize) if 'Notification' in self.windowTitle(): minimize_btn.setEnabled(False) logo_layout.addWidget(minimize_btn, 2) maximize_btn = QPushButton() maximize_btn.setIcon(QIcon(get_image_path('maximize'))) maximize_btn.setFixedSize(22, 22) maximize_btn.setObjectName('app_widget') maximize_btn.clicked.connect(self.minimize_maximize) if 'Notification' in self.windowTitle(): maximize_btn.setEnabled(False) logo_layout.addWidget(maximize_btn, 3) close_btn = QPushButton() close_btn.setIcon(QIcon(get_image_path('exit'))) close_btn.setFixedSize(22, 22) close_btn.setObjectName('app_widget') close_btn.clicked.connect(self.close) logo_layout.addWidget(close_btn, 4) return logo_widget def minimize(self): """ Minimize QWidget """ if self.windowState() == Qt.WindowMinimized: self.setWindowState(Qt.WindowNoState) else: self.setWindowState(Qt.WindowMinimized) def minimize_maximize(self): """ Minimize / Maximize QWidget """ if self.windowState() == Qt.WindowMaximized: self.setWindowState(Qt.WindowNoState) else: self.setWindowState(Qt.WindowMaximized) def center(self): """ Center QWidget """ screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) center = QApplication.desktop().screenGeometry(screen).center() self.move(center.x() - (self.width() / 2), center.y() - (self.height() / 2)) def show_widget(self): """ Show and center AppQWidget """ self.center() self.show() def add_widget(self, widget): """ Add the main QWidget of AppQWidget :param widget: QWidget to add :type widget: QWidget """ self.setMinimumSize(widget.size()) self.layout().addWidget(widget, 2) def mousePressEvent(self, event): """ QWidget.mousePressEvent(QMouseEvent) """ if 'Notification' not in self.windowTitle(): self.offset = event.pos() def mouseMoveEvent(self, event): """ QWidget.mousePressEvent(QMouseEvent) """ if 'Notification' not in self.windowTitle(): try: x = event.globalX() y = event.globalY() x_w = self.offset.x() y_w = self.offset.y() self.move(x - x_w, y - y_w) except AttributeError as e: logger.warning('Move Event %s: %s', self.objectName(), str(e)) if __name__ == '__main__': init_config() app = QApplication(sys.argv) app_widget = AppQWidget() app_widget.initialize('Alignak Status') widget_test = QWidget() widget_test.setMinimumSize(800, 600) layout_test = QVBoxLayout() widget_test.setLayout(layout_test) label_text = QLabel('This is a text') layout_test.addWidget(label_text) app_widget.add_widget(widget_test) app_widget.show_widget() sys.exit(app.exec_())
Python
0.000001
@@ -5700,16 +5700,53 @@ f.show() +%0A QWidget.activateWindow(self) %0A%0A de
ad3554ae58f65a295ac94c131d8193e0b2e7e6f8
Add reminder to look at the number of terms returned
termsuggester/word2vec.py
termsuggester/word2vec.py
from gensim.models import Word2Vec import logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) class Word2VecSuggester(): def __init__(self, modelfile): try: self.model = Word2Vec.load(modelfile) logger.info('Load Word2Vec model "{}"'.format(modelfile)) except IOError: logger.warn('Unable to load Word2Vec model "{}"'.format(modelfile)) logger.warn('Was the train_word2vec script run?') self.model = None def suggest_terms(self, query_word): if self.model is not None: results = self.model.most_similar(positive=[query_word], negative=[]) suggestions = {} for word, weight in results: suggestions[word] = weight return suggestions else: return {}
Python
0
@@ -589,16 +589,94 @@ _word):%0A + # TODO: make the number of terms returned a parameter of the function%0A @@ -828,16 +828,25 @@ ative=%5B%5D +, topn=10 )%0A
e06fdea0355f7d4df80c0eeba9a30486d4ddb158
add types to enum testcases
altair_parser/tests/_testcases.py
altair_parser/tests/_testcases.py
""" JSONSchema test cases. Each object defined in this file should be a dictionary with three keys: - 'schema' should be to a dictionary defining a valid JSON schema. - 'valid' should be a list of dictionaries, each of which is a valid instance under the specified schema. - 'invalid' should be a list of dictionaries, each of which is an invalid instance under the specified schema. These test cases are used by test_testcases.py """ simple_types = { 'schema': { "type": "object", "properties": { "str": {"type": "string"}, "num": {"type": "number"}, "int": {"type": "integer"}, "bool": {"type": "boolean"}, "null": {"type": "null"} } }, 'valid': [ { "str": "hello world", "num": 3.141592653, "int": 42, "bool": True, "null": None } ], 'invalid': [ { "str": 100, "num": 3.141592653, "int": 42, "bool": True, "null": None }, { "str": "hello world", "num": "3.14", "int": 42, "bool": True, "null": None }, { "str": "hello world", "num": 3.141592653, "int": 4.2, "bool": True, "null": None }, { "str": "hello world", "num": 3.141592653, "int": 42, "bool": "True", "null": None }, { "str": "hello world", "num": 3.141592653, "int": 42, "bool": "True", "null": 123 }, ] } compound_types = { 'schema': { "type": "object", "properties": { "str_or_num": {"type": ["string", "number"]}, "num_or_null": {"type": ["number", "null"]} } }, 'valid': [ { "str_or_num": 42, "num_or_null": None }, { "str_or_num": "42", "num_or_null": 42 } ], 'invalid': [ { "str_or_num": [1, 2, 3], "num_or_null": None }, { "str_or_num": None, "num_or_null": 42 }, { "str_or_num": 50, "num_or_null": "hello" } ] } array_types = { 'schema': { 'properties': { 'intarray': { 'type': 'array', 'items': {'type': 'integer'} }, 'strnullarray': { 'type': 'array', 'items': {'type': ['string', 'null']} } } }, 'valid': [ { 'intarray': [1, 2, 3], 'strnullarray': ["hello", "there", None] } ], 'invalid': [ { 'intarray': [1, 2, 3.14], 'strnullarray': ["hello", "there", None] }, { 'intarray': [1, 2, 3], 'strnullarray': [42, "str", None] } ], } enum_types = { 'schema': { 'properties': { 'intenum': { 'enum': [1, 2, 3] }, 'strenum': { 'enum': ['a', 'b', 'c'] }, 'mixedenum': { 'enum': [1, 'A', False, None], } } }, 'valid': [ { 'intenum': 3, 'strenum': 'b', 'mixedenum': 'A' }, { 'intenum': 2, 'strenum': 'a', 'mixedenum': False }, { 'intenum': 1, 'strenum': 'c', 'mixedenum': None } ], 'invalid': [ { 'intenum': '3', 'strenum': 'b', 'mixedenum': 'A' }, { 'intenum': 2, 'strenum': 'a', 'mixedenum': 'False' }, { 'intenum': 1, 'strenum': 3.14, 'mixedenum': None } ] }
Python
0.000001
@@ -120,11 +120,8 @@ be -to a di @@ -3224,16 +3224,51 @@ 1, 2, 3%5D +,%0A 'type': 'integer' %0A @@ -3339,16 +3339,50 @@ b', 'c'%5D +,%0A 'type': 'string' %0A
c15dda4fb4671975e4dcbe75141bf5ab0572a85e
move timing to library mode
main.py
main.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime as dt import sys import os import unittest import xbmc import xbmcgui from lib import constants as const from lib import library from lib.utils import (os_join, uni_join) from lib.xbmcwrappers import (rpc, log, dialogs, open_settings) if const.os == "win": import pywin32setup from lib import playback from lib.remote import Remote def start(): open_settings(category=2, action=1) def koalasetup(): if not os.path.exists(const.userdatafolder): os.makedirs(const.userdatafolder) if not os.path.exists(os_join(const.libpath, "%s shows" % const.provider)): os.makedirs(os_join(const.libpath, "%s shows" % const.provider)) if not os.path.exists(os_join(const.libpath, "%s movies" % const.provider)): os.makedirs(os_join(const.libpath, "%s movies" % const.provider)) def is_libpath_added(): sources = rpc("Files.GetSources", media="video") for source in sources.get('sources', []): if source['file'].startswith(uni_join(const.libpath, const.provider)): return True return False def refresh_settings(): xbmc.executebuiltin('Dialog.Close(dialog)') xbmc.executebuiltin('ReloadSkin') xbmc.executebuiltin('Addon.OpenSettings(%s)' % const.addonid) def run_testsuite(): suite = unittest.TestLoader().discover(start_dir='tests') unittest.TextTestRunner().run(suite) def configure_remote(): remote = Remote() remote.configure() def test(): pass def get_params(argv): params = {} if argv in (["main.py"], ['']): # if addon-icon clicked or addon selected in program addons list params = {"mode": "main", "action": "start"} else: # if action triggered from settings/service arg_pairs = argv[1:] for arg_pair in arg_pairs: arg, val = arg_pair.split('=') params[arg] = val if not params: raise Exception("Unknown sys argv: %s" % argv) return params def watch_mode(action): playback.live(action) def library_mode(action): if xbmcgui.Window(10000).getProperty("%s running" % const.addonname) == "true": if action in ["startup", "schedule"]: return run = dialogs.yesno(heading="Running", line1="Koala is running. ", line2="Running multiple instances cause instablity.", line3="Continue?") if not run: return koalasetup() if not is_libpath_added(): dialogs.ok(heading="Koala path not in video sources", line1="Koala library paths have not been added to Kodi video sources:", line2=uni_join(const.libpath, "%s shows" % const.provider), line3=uni_join(const.libpath, "%s movies" % const.provider)) return try: xbmcgui.Window(10000).setProperty("%s running" % const.addonname, "true") library.main(action) finally: xbmcgui.Window(10000).setProperty("%s running" % const.addonname, "false") def main_mode(action): switch = { "start": start, "configure_remote": configure_remote, "refresh_settings": refresh_settings, "test": test, "run_testsuite": run_testsuite, } switch[action]() def main(argv=None): if argv is None: argv = get_params(sys.argv) mode = argv['mode'] action = argv.get('action', None) settings_coord = argv['reopen_settings'].split() if 'reopen_settings' in argv else None try: starttime = dt.datetime.now() log.info("Starting %s" % const.addonname) switch = { "main": main_mode, "library": library_mode, "watch": watch_mode, } selected_mode = switch[mode] selected_mode(action) finally: log.info("%s finished (in %s)" % (const.addonname, str(dt.datetime.now() - starttime))) if settings_coord: open_settings(*settings_coord) if __name__ == '__main__': main()
Python
0.000001
@@ -2400,16 +2400,20 @@ ultiple +can instance @@ -2867,29 +2867,88 @@ return%0A +%0A -try:%0A +starttime = dt.datetime.now()%0A log.info(%22Starting %25s%22 %25 action)%0A xbmc @@ -3017,16 +3017,25 @@ %22true%22)%0A + try:%0A @@ -3151,16 +3151,101 @@ false%22)%0A + log.info(%22Finished %25s in %25s%22 %25 (action, str(dt.datetime.now() - starttime)))%0A %0A%0Adef ma @@ -3652,181 +3652,49 @@ argv -%5B'reopen_settings'%5D.split() if 'reopen_settings' in argv else None%0A%0A try:%0A starttime = dt.datetime.now()%0A log.info(%22Starting %25s%22 %25 const.addonname)%0A +.get('reopen_settings', %22%22).split()%0A%0A - swit @@ -3696,28 +3696,24 @@ switch = %7B%0A - %22mai @@ -3735,20 +3735,16 @@ - - %22library @@ -3768,20 +3768,16 @@ - %22watch%22: @@ -3793,26 +3793,18 @@ de,%0A - - %7D%0A - sele @@ -3828,16 +3828,25 @@ h%5Bmode%5D%0A + try:%0A @@ -3884,104 +3884,8 @@ ly:%0A - log.info(%22%25s finished (in %25s)%22 %25 (const.addonname, str(dt.datetime.now() - starttime)))%0A
73a375a3adb140c270444e886b3df842e0b28a86
Fix formatting tests: cfloat and cdouble as well as np.float and np.double are the same; make sure we test 4 bytes float.
numpy/core/tests/test_print.py
numpy/core/tests/test_print.py
import numpy as np from numpy.testing import * def check_float_type(tp): for x in [0, 1,-1, 1e10, 1e20] : assert_equal(str(tp(x)), str(float(x))) def test_float_types(): """ Check formatting. This is only for the str function, and only for simple types. The precision of np.float and np.longdouble aren't the same as the python float precision. """ for t in [np.float, np.double, np.longdouble] : yield check_float_type, t def check_complex_type(tp): for x in [0, 1,-1, 1e10, 1e20] : assert_equal(str(tp(x)), str(complex(x))) assert_equal(str(tp(x*1j)), str(complex(x*1j))) assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j))) def test_complex_types(): """Check formatting. This is only for the str function, and only for simple types. The precision of np.float and np.longdouble aren't the same as the python float precision. """ for t in [np.cfloat, np.cdouble, np.clongdouble] : yield check_complex_type, t if __name__ == "__main__": run_module_suite()
Python
0.000011
@@ -408,24 +408,26 @@ in %5Bnp.float +32 , np.double, @@ -971,21 +971,24 @@ in %5Bnp.c -float +omplex64 , np.cdo
8c8d97de4c5684f623194260997e01d9b5edbdae
Remove print statements.
dit/divergences/jensen_shannon_divergence.py
dit/divergences/jensen_shannon_divergence.py
""" The Jensen-Shannon Diverence. This is a reasonable measure of distinguishablity between distribution. """ from __future__ import division import numpy as np from six.moves import zip # pylint: disable=redefined-builtin import dit from ..distconst import mixture_distribution from ..shannon import entropy as H, entropy_pmf as H_pmf def jensen_shannon_divergence_pmf(pmfs, weights=None): """ The Jensen-Shannon Divergence: H(sum(w_i*P_i)) - sum(w_i*H(P_i)). The square root of the Jensen-Shannon divergence is a distance metric. Assumption: Linearly distributed probabilities. Parameters ---------- pmfs : NumPy array, shape (n,k) The `n` distributions, each of length `k` that will be mixed. weights : NumPy array, shape (n,) The weights applied to each pmf. This array will be normalized automatically. If None, each pmf is weighted equally. Returns ------- jsd: float The Jensen-Shannon Divergence """ pmfs = np.atleast_2d(pmfs) if weights is None: weights = np.ones(pmfs.shape[0], dtype=float) / pmfs.shape[0] else: if len(weights) != len(pmfs): msg = "number of weights != number of pmfs" raise dit.exceptions.ditException(msg) weights = np.asarray(weights, dtype=float) weights /= weights.sum() mixture = dit.math.pmfops.convex_combination(pmfs, weights) one = H_pmf(mixture) entropies = np.apply_along_axis(H_pmf, 1, pmfs) print(pmfs) print(entropies) print(entropies * weights) two = (entropies * weights).sum() return one - two def jensen_shannon_divergence(dists, weights=None): """ The Jensen-Shannon Divergence: H(sum(w_i*P_i)) - sum(w_i*H(P_i)). The square root of the Jensen-Shannon divergence is a distance metric. Parameters ---------- dists: [Distribution] The distributions, P_i, to take the Jensen-Shannon Divergence of. weights: [float], None The weights, w_i, to give the distributions. If None, the weights are assumed to be uniform. Returns ------- jsd: float The Jensen-Shannon Divergence Raises ------ ditException Raised if there `dists` and `weights` have unequal lengths. InvalidNormalization Raised if the weights do not sum to unity. InvalidProbability Raised if the weights are not valid probabilities. """ if weights is None: weights = np.array([1/len(dists)] * len(dists)) else: if hasattr(weights, 'pmf'): m = 'Likely user error. Second argument to JSD should be weights.' raise Exception(m) # validation of `weights` is done in mixture_distribution, # so we don't need to worry about it for the second part. mixture = mixture_distribution(dists, weights, merge=True) one = H(mixture) two = sum(w*H(d) for w, d in zip(weights, dists)) jsd = one - two return jsd
Python
0.000021
@@ -1501,76 +1501,8 @@ fs)%0A - print(pmfs)%0A print(entropies)%0A print(entropies * weights)%0A