commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
47594f279d2fb0d0c8cc8f8d0c0bb86a43d0bf95
|
implement update for PMS
|
yags.py
|
yags.py
|
#!/usr/local/bin/python3
from tkinter import *
from subprocess import call
import threading
import queue
import time, random
import serial
SISPMCTL = "/usr/local/bin/sispmctl"
class PMSController(threading.Thread):
def __init__(self, queue):
self.__queue = queue
threading.Thread.__init__(self)
def run(self):
while 1:
item = self.__queue.get()
if item is None:
print('Stopping PMS Controller')
break # reached end of queue
# pretend we're doing something that takes 10-100 ms
#time.sleep(random.randint(10, 100) / 1000.0)
print("exec sispmctl with:", item, "started")
call([SISPMCTL, item[0], str(item[1])])
class App(Tk):
def button_command(self, i):
#print('i', i)
#print('Outlet', self.outlet[i])
#print('State', self.variable[i].get())
if self.variable[i].get() == 1:
param = "-o"
else:
param = "-f"
self.pms_queue.put([param, self.outlet[i]])
#call([SISPMCTL, param, str(self.outlet[i])])
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
#self.build_main_window()
self.grid()
self.exiting = False
self.variable = {'LEDs': IntVar(), 'Controller': IntVar(), 'Spindle': IntVar()}
for key, val in self.variable.items():
val.set(1)
text = ["LEDs", "Controller", "Spindle"]
self.outlet = {'LEDs': 4, 'Controller': 3, 'Spindle': 2}
self.pms_queue = queue.Queue(0)
PMSController(self.pms_queue).start()
cur_row = 0
Label(text='LEDs', width=15).grid(row=cur_row, column=0, sticky="E")
Radiobutton(self, text="On", padx = 20, variable=self.variable['LEDs'], command=lambda: self.button_command('LEDs'),
value=1).grid(row=cur_row, column=1)
Radiobutton(self, text="Off", padx = 20, variable=self.variable['LEDs'], command=lambda: self.button_command('LEDs'),
value=2).grid(row=cur_row, column=2)
cur_row += 1
Label(text='Controller', width=15).grid(row=cur_row, column=0, sticky="E")
Radiobutton(self, text="On", padx = 20, variable=self.variable['Controller'], command=lambda: self.button_command('Controller'),
value=1).grid(row=cur_row, column=1)
Radiobutton(self, text="Off", padx = 20, variable=self.variable['Controller'], command=lambda: self.button_command('Controller'),
value=2).grid(row=cur_row, column=2)
cur_row += 1
Label(text='Spindle', width=15).grid(row=cur_row, column=0, sticky="E")
Radiobutton(self, text="On", padx = 20, variable=self.variable['Spindle'], command=lambda: self.button_command('Spindle'),
value=1).grid(row=cur_row, column=1)
Radiobutton(self, text="Off", padx = 20, variable=self.variable['Spindle'], command=lambda: self.button_command('Spindle'),
value=2).grid(row=cur_row, column=2)
cur_row += 1
Button(self, text="Exit", fg="red", command=self.quit).grid(row=cur_row, column=1)
app = App()
app.mainloop()
print('Main app ended')
app.exiting = True
time.sleep(5)
app.pms_queue.put(None)
|
Python
| 0
|
@@ -509,16 +509,249 @@
of queue
+%0A elif item is %22update%22:%0A print('Updating Variables')%0A process = Popen(%5BSISPMCTL, %22-g%22, %22all%22%5D, stdout=PIPE)%0A (output, err) = process.communicate()%0A print(output)
%0A%0A
|
9841256a1732663518bc7c4ec3d143023ed0ebe1
|
Load backends conditionally (so we don't force users to install unneeded deps).
|
zeya.py
|
zeya.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Phil Sung, Samson Yeung
#
# This file is part of Zeya.
#
# Zeya is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Zeya is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Zeya. If not, see <http://www.gnu.org/licenses/>.
# Zeya - a web music server.
# Work with python2.5
from __future__ import with_statement
import BaseHTTPServer
import getopt
import urllib
import sys
try:
import json
json.dumps
except (ImportError, AttributeError):
import simplejson as json
from rhythmbox import RhythmboxBackend
from directory import SingleRecursedDir
DEFAULT_PORT = 8080
DEFAULT_BACKEND = "rhythmbox"
# Store the state of the library.
library_contents = []
library_repr = ""
valid_backends = ['rhythmbox', 'directory']
class BadArgsError(Exception):
"""
Error due to incorrect command-line invocation of this program.
"""
def __init__(self, message):
self.error_message = message
def __str__(self):
return "Error: %s" % (self.error_message,)
# TODO: support a multithreaded server.
class ZeyaHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""
Web server request handler.
"""
def do_GET(self):
"""
Handle a GET request.
"""
# http://host/ yields the library main page.
if self.path == '/':
self.serve_static_content('/library.html')
# http://host/getlibrary returns a representation of the music
# collection.
elif self.path == '/getlibrary':
self.serve_library()
# http://host/getcontent?key yields an Ogg stream of the file
# associated with the specified key.
elif self.path.startswith('/getcontent?'):
self.serve_content(urllib.unquote(self.path[12:]))
# All other paths are assumed to be static content.
# http://host/foo is mapped to resources/foo.
else:
self.serve_static_content(self.path)
def get_content_type(self, path):
"""
Return the MIME type associated with the given path.
"""
path = path.lower()
if path.endswith('.html'):
return 'text/html'
elif path.endswith('.png'):
return 'image/png'
elif path.endswith('.css'):
return 'text/css'
elif path.endswith('.ogg'):
return 'audio/ogg'
else:
return 'application/octet-stream'
def serve_content(self, path):
"""
Serve an audio stream (audio/ogg).
"""
self.send_response(200)
self.send_header('Content-type', 'audio/ogg')
self.end_headers()
backend.get_content(path, self.wfile)
self.wfile.close()
def serve_library(self):
"""
Serve a representation of the library.
We take the output of backend.get_library_contents(), dump it as JSON,
and give that to the client.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(library_repr.encode('utf-8'))
self.wfile.close()
def serve_static_content(self, path):
"""
Serve static content from the resources/ directory.
"""
try:
# path already has a leading '/' in front of it.
with open('resources' + path) as f:
self.send_response(200)
self.send_header('Content-type', self.get_content_type(path))
self.end_headers()
self.wfile.write(f.read())
self.wfile.close()
except IOError:
self.send_error(404, 'File not found: %s' % (path,))
def getOptions():
"""
Parse the arguments and return a tuple (show_help, backend, port), or raise
BadArgsError if the invocation was not valid.
show_help: whether user requested help information
backend: string containing backend to use (only supported value right now
is "rhythmbox")
port: port number to listen on
"""
help_msg = False
port = DEFAULT_PORT
backend_type = DEFAULT_BACKEND
try:
opts, file_list = getopt.getopt(sys.argv[1:], "hp:",
["help", "backend=", "port="])
except getopt.GetoptError:
raise BadArgsError("Unsupported options")
for flag, value in opts:
if flag in ("-h", "--help"):
help_msg = True
if flag in ("--backend"):
backend_type = value
if backend_type not in valid_backends:
raise BadArgsError("Unsupported backend type")
if flag in ("-p", "--port"):
try:
port = int(value)
except ValueError:
raise BadArgsError("Invalid port setting %r" % (value,))
return (help_msg, backend_type, port)
def usage():
print "Usage: zeya.py [-h|--help] [--backend=rhythmbox] [--port]"
def main(port):
global library_contents, library_repr
# Read the library.
print "Loading library..."
library_contents = backend.get_library_contents()
library_repr = json.dumps(library_contents, ensure_ascii=False)
server = BaseHTTPServer.HTTPServer(('', port), ZeyaHandler)
print "Listening on port %d" % (port,)
# Start up a web server.
print "Ready to serve!"
try:
server.serve_forever()
except KeyboardInterrupt:
pass
finally:
server.server_close()
if __name__ == '__main__':
try:
(show_help, backend_type, port) = getOptions()
except BadArgsError, e:
print e
usage()
sys.exit(1)
if show_help:
usage()
sys.exit(0)
print "Using %r backend" % (backend_type,)
if backend_type == "rhythmbox":
backend = RhythmboxBackend()
elif backend_type == 'directory':
backend = SingleRecursedDir('/vid/fragmede/music/pink/')
main(port)
|
Python
| 0
|
@@ -1008,88 +1008,8 @@
on%0A%0A
-from rhythmbox import RhythmboxBackend%0Afrom directory import SingleRecursedDir%0A%0A
DEFA
@@ -6206,16 +6206,200 @@
hmbox%22:%0A
+ # Import the backend modules conditionally, so users don't have to%0A # install dependencies unless they are actually used.%0A from rhythmbox import RhythmboxBackend%0A
@@ -6461,24 +6461,72 @@
directory':%0A
+ from directory import SingleRecursedDir%0A
back
|
1aae9f83bb0117e5bb02ab6579ff6f6e767752a5
|
Use safer .get to access dict
|
relative_import.py
|
relative_import.py
|
'''
Copyright (c) 2014 Joaquin Duo - File under MIT License
Import this module to enable explicit relative importing on a submodule or
sub-package running it as a main module. Doing so is useful for running smoke
tests or small scripts within the module.
If you are using this tool enabled on production, make sure you do enough
testing. (since it does some guessing trying to find the right package of
the module)
Usage:
------
To enable explicit relative importing in __main__, you simply import
this package before any relative import
import relative_import
from .my_pkg import foo, bar
Make sure your PYTHON_PATH is correctly set to solve the relative path of the
submodule/subpackage.
'''
from inspect import currentframe
from os import path
import importlib
import sys
def __get_search_path(main_file_dir, sys_path):
#Gather candidate search paths
paths = []
#look for paths containing the file
for pth in sys_path:
#convert relative path to absolute
pth = path.abspath(pth)
#filter __main__'s file directory, naturally it will be in the sys.path
#filter parent paths containing the package
if (pth != main_file_dir
and pth == path.commonprefix((pth, main_file_dir))):
paths.append(pth)
#check if we have results
if paths:
#we found candidates
#now look for the largest parent search path
paths.sort()
return paths[-1]
def __enable_relative_import():
'''
Enables explicit relative import in sub-modules when ran as __main__
'''
#find caller locals
frame = currentframe()
#go two frames back to find who imported us
for _ in range(2):
frame = frame.f_back
#now we have access to the module globals
main_globals = frame.f_globals
#If __package__ is already set or its not the __main__, stop doing anything.
# (in some cases relative_import could be called once from outside
# __main__ if it was not called in __main__)
# (also a reload of relative_import could trigger this function)
if main_globals['__package__'] or main_globals['__name__'] != '__main__':
return
#find __main__'s file directory
main_file_dir = path.dirname(path.abspath(main_globals['__file__']))
search_path = __get_search_path(main_file_dir, sys.path)
#no candidates for search path
if not search_path:
return
#solve package name from search path
pkg_str = path.relpath(main_file_dir, search_path).replace(path.sep, '.')
#import the package in order to set __package__ value later
try:
if '__init__.py' in main_globals['__file__']:
#The __main__ is __init__.py => its own package
#If we treat it as a normal module it would be imported twice
#So we simply reuse it
sys.modules[pkg_str] = sys.modules['__main__']
#We need to set __path__ because its needed for
#relative importing
sys.modules[pkg_str].__path__ = [main_file_dir]
else:
#we need to import the package to be available
importlib.import_module(pkg_str)
#finally enable relative import
main_globals['__package__'] = pkg_str
except ImportError as e:
#In many situations we won't care if it fails, simply report error
#main will fail anyway if finds an explicit relative import
print >> sys.stderr, e
#Enable relative import in __main__
#this function will be called only the first import of this module (or reloads)
__enable_relative_import()
|
Python
| 0
|
@@ -2088,33 +2088,37 @@
if main_globals
-%5B
+.get(
'__package__'%5D o
@@ -2114,17 +2114,17 @@
ckage__'
-%5D
+)
or main
@@ -2131,17 +2131,21 @@
_globals
-%5B
+.get(
'__name_
@@ -2146,17 +2146,17 @@
_name__'
-%5D
+)
!= '__m
|
64f539e388c67e35b0434ad0c3669a141f00e110
|
Use pickle instead of json for revision generation
|
replipy/storage.py
|
replipy/storage.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import hashlib
import json
import time
import uuid
from abc import ABCMeta, abstractmethod
from collections import defaultdict
class ABCDatabase(object, metaclass=ABCMeta):
class Conflict(Exception):
"""Raises in case of conflict updates"""
def __init__(self, name):
self._name = name
self._start_time = int(time.time() * 10**6)
self._update_seq = 0
@abstractmethod
def __contains__(self, idx):
"""Verifies that document with specified idx exists"""
@property
def name(self):
"""Returns database symbolic name as string"""
return self._name
@property
def start_time(self):
"""Returns database start time in microseconds"""
return self._start_time
@property
def update_seq(self):
"""Returns current update sequence value"""
return self._update_seq
def info(self):
"""Returns database information object as dict"""
return {
'db_name': self.name,
'instance_start_time': str(self.start_time),
'update_seq': self.update_seq
}
@abstractmethod
def load(self, idx):
"""Returns document by specified idx"""
@abstractmethod
def store(self, doc, rev=None):
"""Creates document or updates if rev specified"""
@abstractmethod
def remove(self, idx, rev):
"""Removes document by specified idx and rev"""
@abstractmethod
def revs_diff(self, idrevs):
"""Returns missed revisions for specified id - revs mapping"""
@abstractmethod
def bulk_docs(self, docs, new_edits=True):
"""Bulk update docs"""
@abstractmethod
def ensure_full_commit(self):
"""Ensures that all changes are actually stored on disk"""
class MemoryDatabase(ABCDatabase):
def __init__(self, *args, **kwargs):
super(MemoryDatabase, self).__init__(*args, **kwargs)
self._docs = {}
def __contains__(self, item):
return item in self._docs
def _new_rev(self, doc):
oldrev = doc.get('_rev')
if oldrev is None:
seq, _ = 0, None
else:
seq, _ = oldrev.split('-', 1)
seq = int(seq)
hash = hashlib.md5(json.dumps(doc).encode('utf-8')).hexdigest()
newrev = '%d-%s' % (seq + 1, hash)
return newrev.lower()
def load(self, idx):
return self._docs[idx]
def store(self, doc, rev=None, new_edits=True):
if '_id' not in doc:
doc['_id'] = str(uuid.uuid4()).lower()
if rev is None:
rev = doc.get('_rev')
idx = doc['_id']
if new_edits:
if idx in self and self._docs[idx]['_rev'] != rev:
raise self.Conflict('Document update conflict')
elif idx not in self and rev is not None:
raise self.Conflict('Document update conflict')
doc['_rev'] = self._new_rev(doc)
else:
assert rev, 'Document revision missed'
doc['_rev'] = rev
idx, rev = doc['_id'], doc['_rev']
self._docs[idx] = doc
self._update_seq += 1
return idx, rev
def remove(self, idx, rev):
if self._docs[idx]['_rev'] != rev:
raise self.Conflict('Document update conflict')
doc = {
'_id': idx,
'_rev': rev,
'_deleted': True
}
return self.store(doc, rev)
def revs_diff(self, idrevs):
res = defaultdict(dict)
for idx, revs in idrevs.items():
missing = []
if idx not in self:
missing.extend(revs)
res[idx]['missing'] = missing
continue
doc = self._docs[idx]
for rev in revs:
if doc['_rev'] != rev:
missing.append(rev)
if missing:
res[idx]['missing'] = missing
return res
def bulk_docs(self, docs, new_edits=True):
res = []
for doc in docs:
idx, rev = doc['_id'], doc.get('_rev')
try:
idx, rev = self.store(doc, rev, new_edits)
res.append({
'ok': True,
'id': idx,
'rev': rev
})
except Exception as err:
res.append({'id': idx,
'error': type(err).__name__,
'reason': str(err)})
return res
def ensure_full_commit(self):
return {
'ok': True,
'instance_start_time': self.info()['instance_start_time']
}
|
Python
| 0
|
@@ -232,20 +232,22 @@
%0Aimport
-json
+pickle
%0Aimport
@@ -2443,20 +2443,19 @@
-hash
+sig
= hashl
@@ -2465,12 +2465,14 @@
md5(
-json
+pickle
.dum
@@ -2482,24 +2482,8 @@
doc)
-.encode('utf-8')
).he
@@ -2529,20 +2529,19 @@
eq + 1,
-hash
+sig
)%0A
|
e1cb95db85ef7edb274a278e1e2c46c2e87d0cb1
|
Update NLP thingie
|
api/controllers/nlp.py
|
api/controllers/nlp.py
|
from flask import Flask
from flask import session
from flask import Response
from flask import request
from flask import redirect, url_for
from flask import render_template
from api.core import app, db
from api.models.user import User
from api.models.page import Page
from gensim import corpora, models, similarities
import urllib
import json
import datetime
@app.route('/nlp/sentiment')
def nlp_sentiment():
params = {
'apikey': '2ccd6f653c1e4253b6ac5ee0dadb284bde58331e',
'text': 'I hate this tv show. It became really booring.',
'outputMode': 'json'
}
params = urllib.urlencode(params)
sentiment = json.loads(
urllib.urlopen('http://access.alchemyapi.com/calls/text/TextGetTextSentiment?%s' % params).read()
)
return Response(json.dumps({
'accounts': sentiment
}), mimetype='application/json')
@app.route('/nlp/rank_keywords')
def npl_rank_keywords():
params = {
'apikey': '2ccd6f653c1e4253b6ac5ee0dadb284bde58331e',
'text': 'I hate this tv show. It became really booring.',
'outputMode': 'json',
'sentiment': 1
}
params = urllib.urlencode(params)
ranked = json.loads(
urllib.urlopen('http://access.alchemyapi.com/calls/text/TextGetRankedKeywords?%s' % params).read()
)
return Response(json.dumps({
'accounts': ranked
}), mimetype='application/json')
@app.route('/nlp/similar')
def nlp_similar():
documents = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
# remove common words and tokenize
stoplist = set('for a of the and to in by from on with as a '.split())
texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]
# Create dictionary
dictionary = corpora.Dictionary(texts)
# Define corpus
corpus = [dictionary.doc2bow(text) for text in texts]
# Define LSI space
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=10)
# Get similarity of teh doc vs documents
doc = "Human computer interaction"
vector = dictionary.doc2bow(doc.lower().split())
vector_lsi = lsi[vector]
index = similarities.MatrixSimilarity(lsi[corpus])
sims = index[vector_lsi]
# return Response(list(sims))
|
Python
| 0
|
@@ -2742,18 +2742,16 @@
si%5D%0A%0A
- #
return
@@ -2763,16 +2763,41 @@
nse(
-list(sims)
+sims, mimetype = 'application/json'
)%0A
|
425de26b5a7041905862519c727491dd8ec17173
|
Add root tag.
|
api/gen_detail_file.py
|
api/gen_detail_file.py
|
import timeit
import gevent
import math
from gevent.queue import Queue
from detail import AnimeDetail
from gen_id_file import IDS_FILENAME
from gevent import monkey; monkey.patch_socket()
DETAILS_FILENAME = 'animes.xml'
BATCH_SIZE = 10
WORKER_NUM = 8
MAXIMUM_WORKER_NUM = 8
NAMES_FOR_WORKER = ['Joe', 'Adam', 'Matt', 'Bob', 'Sam', 'Mary', 'Jack', 'Peter']
FILE_SUFFIX = '_batch.xml'
# stores tuple like (start, end)
tasks = Queue()
def worker(name, work):
with open(name + FILE_SUFFIX, 'w') as f:
gevent.sleep(0)
ad = AnimeDetail()
while not tasks.empty():
task = tasks.get()
request = '/'.join([id.strip() for id in work[task[0]:task[1]]])
print name + ' woke up doing work.. ' + request
batch_data = ad.fetch_details(request)
f.write(batch_data)
def boss(name, work):
print name + ' woke up...'
count = 0
for i in range(int(math.ceil(float(len(work)) / BATCH_SIZE))):
start = i * BATCH_SIZE
end = min((i + 1) * BATCH_SIZE, len(work))
tasks.put((start, end))
count += 1
print 'Work has been divided into ' + str(count) + ' batches.'
def process(list, num_workers):
# make sure worker num doesn't exceeds limitation
num_workers = min(num_workers, MAXIMUM_WORKER_NUM)
# boss starts
gevent.spawn(boss, 'Terence', work).join()
# workers start
gevent.joinall([gevent.spawn(worker, NAMES_FOR_WORKER[i], work) for i in range(num_workers)])
if __name__ == '__main__':
# put all details into string
ad = AnimeDetail()
detail = ''
work = []
try:
with open(IDS_FILENAME, 'r') as idsf:
work = [id for id in idsf]
except IOError as e:
print 'Please run gen_id_file.py first.'
start = timeit.default_timer()
process(work, WORKER_NUM)
stop = timeit.default_timer()
print 'It took ' + str(stop - start) + 's to run ' + str(len(work)) + ' queries.'
|
Python
| 0
|
@@ -501,16 +501,42 @@
) as f:%0A
+ f.write('%3Croot%3E')%0A
@@ -862,16 +862,43 @@
h_data)%0A
+ f.write('%3C/root%3E')%0A
%0A%0Adef bo
@@ -1642,24 +1642,8 @@
l()%0A
- detail = ''%0A
|
5a1cb6408397e3dd8eed227f0adcea924de7b0e1
|
enable new checkin report
|
reporting/utils.py
|
reporting/utils.py
|
"""
Utility functions for reports
"""
from .reports import FamilyTotalOverTimeReport, DependentsTotalOverTimeReport
from .reports import FamilyCheckoutsPerWeekReport, DependentCheckoutsPerWeekReport
from .reports import EmptyFamilyCheckoutsPerWeekReport
from .reports import ItemsPerCategoryPerMonthReport, IndividualsByAgeReport
from .reports import FamiliesPerZipReport, CheckoutFrequencyPerMonthReport
from .reports import VolunteersHoursWorkedReport
availableReports = {}
availableReports[1] = FamilyTotalOverTimeReport
availableReports[2] = DependentsTotalOverTimeReport
availableReports[3] = FamilyCheckoutsPerWeekReport
availableReports[4] = EmptyFamilyCheckoutsPerWeekReport
availableReports[5] = DependentCheckoutsPerWeekReport
availableReports[6] = ItemsPerCategoryPerMonthReport
availableReports[7] = IndividualsByAgeReport
availableReports[8] = FamiliesPerZipReport
availableReports[9] = CheckoutFrequencyPerMonthReport
availableReports[10] = VolunteersHoursWorkedReport
def determineAndCreateReport(report_num, startDate='', endDate=''):
"""Determine the report"""
return availableReports[report_num](startDate, endDate)
|
Python
| 0
|
@@ -239,32 +239,61 @@
utsPerWeekReport
+, FamilyCheckInsPerWeekReport
%0Afrom .reports i
@@ -728,16 +728,66 @@
rts%5B5%5D =
+ FamilyCheckInsPerWeekReport%0AavailableReports%5B6%5D =
Depende
@@ -824,25 +824,25 @@
ableReports%5B
-6
+7
%5D = ItemsPer
@@ -877,25 +877,25 @@
ableReports%5B
-7
+8
%5D = Individu
@@ -922,25 +922,25 @@
ableReports%5B
-8
+9
%5D = Families
@@ -965,25 +965,26 @@
ableReports%5B
-9
+10
%5D = Checkout
@@ -1021,25 +1021,25 @@
bleReports%5B1
-0
+1
%5D = Voluntee
|
8da28a564ae14986046b576f91c2690aa5c0ff09
|
fix dict sintax
|
api/omnitransaction.py
|
api/omnitransaction.py
|
import urlparse
import os, sys, re, random, pybitcointools, bitcoinrpc, math
from decimal import Decimal
from msc_apps import *
from blockchain_utils import *
import config
class OmniTransaction:
confirm_target=6
HEXSPACE_SECOND='21'
mainnet_exodus_address='1EXoDusjGwvnjZUyKkxZ4UHEf77z6A5S4P'
testnet_exodus_address='mpexoDuSkGGqvqrkrjiFng38QPkJQVFyqv'
def __init__(self,tx_type,form):
self.conn = getRPCconn()
self.testnet = False
self.magicbyte = 0
self.exodus_address=self.mainnet_exodus_address
if 'testnet' in form and ( form['testnet'] in ['true', 'True'] ):
self.testnet =True
self.magicbyte = 111
self.exodus_address=self.testnet_exodus_address
try:
if config.D_PUBKEY and ( 'donate' in form ) and ( form['donate'] in ['true', 'True'] ):
print "We're Donating to pubkey for: "+pybitcointools.pubkey_to_address(config.D_PUBKEY)
self.pubkey = config.D_PUBKEY
else:
print "not donating"
self.pubkey = form['pubkey']
except NameError, e:
print e
self.pubkey = form['pubkey']
self.fee = estimateFee(self.confirm_target)['result']
self.rawdata = form
self.tx_type = tx_type
def get_unsigned(self):
# get payload
payload = self.__generate_payload()
# Add exodous output
rawtx = createrawtx_reference(self.exodus_address)['result']
if 'transaction_to' in self.rawdata:
# Add reference for reciever
rawtx = createrawtx_reference(self.rawdata['transaction_to'], rawtx)['result']
# Add the payload
if len(payload) <= 160: #80bytes
rawtx = createrawtx_opreturn(payload, rawtx)['result']
else:
rawtx = createrawtx_multisig(payload, rawtx)['result']
# Decode transaction to get total needed amount
decodedtx = decoderawtransaction(rawtx)['result']
# Sumup the outputs
fee_total = Decimal(self.fee)
for output in decodedtx['vout']:
fee_total += Decimal(output['value'])
fee_total_satoshi = int( round( fee_total * Decimal(1e8) ) )
# Get utxo to generate inputs
print "Calling bc_getutxo with ", self.rawdata['transaction_from'], fee_total_satoshi
dirty_txes = bc_getutxo( self.rawdata['transaction_from'], fee_total_satoshi )
print "received", dirty_txes
if (dirty_txes['error'][:3]=='Con'):
raise Exception({ "status": "NOT OK", "error": "Couldn't get list of unspent tx's. Response Code: " + dirty_txes['code'] })
if (dirty_txes['error'][:3]=='Low'):
raise Exception({ "status": "NOT OK", "error": "Not enough funds, try again. Needed: " + str(fee_total) + " but Have: " + dirty_txes['avail'] })
total_amount = dirty_txes['avail']
unspent_tx = dirty_txes['utxos']
change = total_amount - fee_total_satoshi
#DEBUG
print [ "Debugging...", dirty_txes,"miner fee sats: ", self.fee, "change: ",change,"total_amt: ", total_amount,"fee tot sat: ", fee_total_satoshi,"utxo ", unspent_tx,"to ", self.rawdata['transaction_to'] ]
#source script is needed to sign on the client credit grazcoin
hash160=bc_address_to_hash_160(self.rawdata['transaction_from']).encode('hex_codec')
prevout_script='OP_DUP OP_HASH160 ' + hash160 + ' OP_EQUALVERIFY OP_CHECKSIG'
validnextinputs = [] #get valid redeemable inputs
for unspent in unspent_tx:
#retrieve raw transaction to spend it
prev_tx = getrawtransaction(unspent[0])['result']
for output in prev_tx.vout:
if 'reqSigs' in output['scriptPubKey'] and output['scriptPubKey']['reqSigs'] == 1 and output['scriptPubKey']['type'] != 'multisig':
for address in output['scriptPubKey']['addresses']:
if address == self.rawdata['transaction_from'] and int(output['n']) == int(unspent[1]):
validnextinputs.append({ "txid": prev_tx.txid, "vout": output['n'], "scriptPubKey" : output['scriptPubKey']['hex'], "value" : output['value']})
break
# Add the inputs
for input in validnextinputs:
rawtx = createrawtx_input(input['txid'],input['vout'],rawtx)['result']
# Add the change if above dust
if change > 5757:
rawtx = createrawtx_change(rawtx, validnextinputs, self.rawdata['transaction_from'], fee_total)['result']
return { 'status':200, 'unsignedhex': rawtx , 'sourceScript': prevout_script }
def __generate_payload(self):
if self.tx_type == 0:
return getsimplesendPayload(self.rawdata['currency_identifier'], self.rawdata['amount_to_transfer'])['result']
if self.tx_type == 20:
return getdexsellPayload(self.rawdata['currency_identifier'], self.rawdata['amount_for_sale'], self.rawdata['amount_desired'], self.rawdata['blocks'], self.rawdata['min_buyer_fee'], self.rawdata['action'])['result']
if self.tx_type == 22:
return getdexacceptPayload(self.rawdata['currency_identifier'], self.rawdata['amount'])['result']
if self.tx_type == 50:
return getissuancefixedPayload(self.rawdata['ecosystem'],self.rawdata['property_type'],self.rawdata['previous_property_id'],self.rawdata['property_category'],self.rawdata['property_subcategory'],self.rawdata['property_name'],self.rawdata['property_url'],self.rawdata['property_data'],self.rawdata['number_properties'])['result']
if self.tx_type == 51:
return getissuancecrowdsalePayload(self.rawdata['ecosystem'],self.rawdata['property_type'],self.rawdata['previous_property_id'],self.rawdata['property_category'],self.rawdata['property_subcategory'],self.rawdata['property_name'],self.rawdata['property_url'],self.rawdata['property_data'],self.rawdata['currency_identifier_desired'],self.rawdata['number_properties'], self.rawdata['deadline'], self.rawdata['earlybird_bonus'], self.rawdata['percentage_for_issuer'])['result']
if self.tx_type == 54:
return getissuancemanagedPayload(self.rawdata['ecosystem'],self.rawdata['property_type'],self.rawdata['previous_property_id'],self.rawdata['property_category'],self.rawdata['property_subcategory'],self.rawdata['property_name'],self.rawdata['property_url'],self.rawdata['property_data'])['result']
if self.tx_type == 55:
return getgrantPayload(self.rawdata['currency_identifier'], self.rawdata['amount'], self.rawdata['memo'])['result']
if self.tx_type == 56:
return getrevokePayload(self.rawdata['currency_identifier'], self.rawdata['amount'], self.rawdata['memo'])['result']
|
Python
| 0.000027
|
@@ -3741,13 +3741,16 @@
v_tx
-.
+%5B'
vout
+'%5D
:%0A
@@ -4151,13 +4151,16 @@
v_tx
-.
+%5B'
txid
+'%5D
, %22v
|
1f192121b0c769cf07af052327f503b3a47c77a0
|
Remove join_args option in run_command() (#5272)
|
airflow/task/task_runner/base_task_runner.py
|
airflow/task/task_runner/base_task_runner.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import getpass
import os
import subprocess
import threading
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow import configuration as conf
from airflow.utils.configuration import tmp_configuration_copy
PYTHONPATH_VAR = 'PYTHONPATH'
class BaseTaskRunner(LoggingMixin):
"""
Runs Airflow task instances by invoking the `airflow run` command with raw
mode enabled in a subprocess.
"""
def __init__(self, local_task_job):
"""
:param local_task_job: The local task job associated with running the
associated task instance.
:type local_task_job: airflow.jobs.LocalTaskJob
"""
# Pass task instance context into log handlers to setup the logger.
super().__init__(local_task_job.task_instance)
self._task_instance = local_task_job.task_instance
popen_prepend = []
if self._task_instance.run_as_user:
self.run_as_user = self._task_instance.run_as_user
else:
try:
self.run_as_user = conf.get('core', 'default_impersonation')
except conf.AirflowConfigException:
self.run_as_user = None
# Add sudo commands to change user if we need to. Needed to handle SubDagOperator
# case using a SequentialExecutor.
self.log.debug("Planning to run as the %s user", self.run_as_user)
if self.run_as_user and (self.run_as_user != getpass.getuser()):
# We want to include any environment variables now, as we won't
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
# might not be able to run the cmds to get credentials
cfg_path = tmp_configuration_copy(chmod=0o600,
include_env=True,
include_cmds=True)
# Give ownership of file to user; only they can read and write
subprocess.call(
['sudo', 'chown', self.run_as_user, cfg_path],
close_fds=True
)
# propagate PYTHONPATH environment variable
pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')
popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]
if pythonpath_value:
popen_prepend.append('{}={}'.format(PYTHONPATH_VAR, pythonpath_value))
else:
# Always provide a copy of the configuration file settings. Since
# we are running as the same user, and can pass through environment
# variables then we don't need to include those in the config copy
# - the runner can read/execute those values as it needs
cfg_path = tmp_configuration_copy(chmod=0o600,
include_env=False,
include_cmds=False)
self._cfg_path = cfg_path
self._command = popen_prepend + self._task_instance.command_as_list(
raw=True,
pickle_id=local_task_job.pickle_id,
mark_success=local_task_job.mark_success,
job_id=local_task_job.id,
pool=local_task_job.pool,
cfg_path=cfg_path,
)
self.process = None
def _read_task_logs(self, stream):
while True:
line = stream.readline()
if isinstance(line, bytes):
line = line.decode('utf-8')
if len(line) == 0:
break
self.log.info('Job %s: Subtask %s %s',
self._task_instance.job_id, self._task_instance.task_id,
line.rstrip('\n'))
def run_command(self, run_with=None, join_args=False):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs
``['airflow run']``
:param join_args: bool
:return: the process that was run
:rtype: subprocess.Popen
"""
run_with = run_with or []
cmd = [" ".join(self._command)] if join_args else self._command
full_cmd = run_with + cmd
self.log.info('Running: %s', full_cmd)
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc
def start(self):
"""
Start running the task instance in a subprocess.
"""
raise NotImplementedError()
def return_code(self):
"""
:return: The return code associated with running the task instance or
None if the task is not yet done.
:rtype: int
"""
raise NotImplementedError()
def terminate(self):
"""
Kill the running task instance.
"""
raise NotImplementedError()
def on_finish(self):
"""
A callback that should be called when this is done running.
"""
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
else:
os.remove(self._cfg_path)
|
Python
| 0
|
@@ -4627,28 +4627,11 @@
None
-, join_args=False
):%0A
+
@@ -4795,178 +4795,8 @@
ist%0A
- :param join_args: whether to concatenate the list of command tokens e.g. %60%60%5B'airflow', 'run'%5D%60%60 vs%0A %60%60%5B'airflow run'%5D%60%60%0A :param join_args: bool%0A
@@ -4916,80 +4916,8 @@
%5B%5D%0A
- cmd = %5B%22 %22.join(self._command)%5D if join_args else self._command%0A
@@ -4942,18 +4942,28 @@
_with +
-cm
+self._comman
d%0A%0A
|
0c1b0a7787bd6824815ae208edab8f208b53af09
|
Add comment to override of status code
|
api/base/exceptions.py
|
api/base/exceptions.py
|
def jsonapi_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array with a 'detail' member
"""
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
if response is not None:
if 'detail' in response.data:
response.data = {'errors': [response.data]}
else:
response.data = {'errors': [{'detail': response.data}]}
if response is not None and response.data['errors'][0]['detail'] == "Authentication credentials were not provided.":
response.status_code = 401
return response
|
Python
| 0
|
@@ -455,16 +455,124 @@
ata%7D%5D%7D%0A%0A
+ # Returns 401 instead of 403 during unauthorized requests without having user to log in with Basic Auth%0A
if r
|
b225011d1892f285b96f67f0c0734c62b4e196b6
|
Make transactions work again #nodebug
|
api/base/middleware.py
|
api/base/middleware.py
|
from pymongo.errors import OperationFailure
from raven.contrib.django.raven_compat.models import sentry_exception_handler
from framework.transactions import commands, messages, utils
from .api_globals import api_globals
# TODO: Verify that a transaction is being created for every
# individual request.
class TokuTransactionsMiddleware(object):
"""TokuMX transaction middleware."""
def process_request(self, request):
"""Begin a transaction if one doesn't already exist."""
try:
commands.begin()
except OperationFailure as err:
message = utils.get_error_message(err)
if messages.TRANSACTION_EXISTS_ERROR not in message:
raise err
def process_exception(self, request, exception):
"""If an exception occurs, rollback the current transaction
if it exists.
"""
sentry_exception_handler(request=request)
try:
commands.rollback()
except OperationFailure as err:
message = utils.get_error_message(err)
if messages.NO_TRANSACTION_ERROR not in message:
raise
commands.disconnect()
return None
def process_response(self, request, response):
"""Commit transaction if it exists, rolling back in an
exception occurs.
"""
try:
if response.status_code >= 400:
commands.rollback()
else:
commands.commit()
except OperationFailure as err:
message = utils.get_error_message(err)
if messages.NO_TRANSACTION_TO_COMMIT_ERROR not in message:
pass
# raise err
except Exception as err:
try:
commands.rollback()
except OperationFailure:
pass
else:
pass
raise err
commands.disconnect()
return response
class DjangoGlobalMiddleware(object):
"""
Store request object on a thread-local variable for use in database caching mechanism.
"""
def process_request(self, request):
api_globals.request = request
def process_exception(self, request, exception):
sentry_exception_handler(request=request)
api_globals.request = None
return None
def process_response(self, request, response):
api_globals.request = None
return response
|
Python
| 0.000001
|
@@ -1667,31 +1667,8 @@
- pass%0A #
rai
|
32189cbdfcce0c4bf2d23214da529ab7f253e8f7
|
Update NLP thingie
|
api/controllers/nlp.py
|
api/controllers/nlp.py
|
from flask import Flask
from flask import session
from flask import Response
from flask import request
from flask import redirect, url_for
from flask import render_template
from api.core import app, db
from api.models.user import User
from api.models.page import Page
from gensim import corpora, models, similarities
import urllib
import json
import datetime
@app.route('/nlp/sentiment')
def nlp_sentiment():
params = {
'apikey': '2ccd6f653c1e4253b6ac5ee0dadb284bde58331e',
'text': 'I hate this tv show. It became really booring.',
'outputMode': 'json'
}
params = urllib.urlencode(params)
sentiment = json.loads(
urllib.urlopen('http://access.alchemyapi.com/calls/text/TextGetTextSentiment?%s' % params).read()
)
return Response(json.dumps({
'accounts': sentiment
}), mimetype='application/json')
@app.route('/nlp/rank_keywords')
def npl_rank_keywords():
params = {
'apikey': '2ccd6f653c1e4253b6ac5ee0dadb284bde58331e',
'text': 'I hate this tv show. It became really booring.',
'outputMode': 'json',
'sentiment': 1
}
params = urllib.urlencode(params)
ranked = json.loads(
urllib.urlopen('http://access.alchemyapi.com/calls/text/TextGetRankedKeywords?%s' % params).read()
)
return Response(json.dumps({
'accounts': ranked
}), mimetype='application/json')
def match_similar(inputs, questions):
# remove common words and tokenize
stoplist = set('for a of the and to in by from on with as a'.split())
texts = [[word for word in question.lower().split() if word not in stoplist] for question in questions]
# Create dictionary
dictionary = corpora.Dictionary(texts)
# Define corpus
corpus = [dictionary.doc2bow(text) for text in texts]
# Define LSI space
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)
# Get similarity of teh doc vs documents
vector = dictionary.doc2bow(inputs.lower().split())
vector_lsi = lsi[vector]
index = similarities.MatrixSimilarity(lsi[corpus])
sims = index[vector_lsi]
return sims
def match_group(inputs, groups, min_threshold):
group_questions = []
for key, group in groups.items():
group_questions.append(group['content'])
group_similarity = sorted(enumerate(match_similar(inputs, group_questions)), key=lambda item: -item[1])
print group_similarity
for group_id, similarity in group_similarity:
if similarity > min_threshold:
return groups[str(group_id)]
# Create new group
print('New Group created');
@app.route('/nlp/similar')
def nlp_similar():
min_threshold = 0.4;
groups = {
'1': {
'id': 1,
'content': 'Cars are awesome.',
},
'2': {
'id': 2,
'content': 'Will the show continue?',
}
}
questions = {
'1': {
'id': 1,
'question': "Human machine interface for lab abc computer applications",
'similarity': 0.9,
'group_id': 1
},
'2': {
'id': 2,
'question': "A survey of user opinion of computer system response time",
'similarity': 9.1,
'group_id': 1
},
'3': {
'id': 3,
'question': "The EPS user interface management system",
'similarity': 0.6,
'group_id': 2
},
'4': {
'id': 4,
'question': "System and human system engineering testing of EPS",
'similarity': 7.7,
'group_id': 2
},
'5': {
'id': 5,
'question': "Relation of user perceived response time to error measurement",
'similarity': 8.1,
'group_id': 0
},
'6': {
'id': 6,
'question': "The generation of random binary unordered trees",
'similarity': 7.2,
'group_id': 1
},
'7': {
'id': 7,
'question': "The intersection graph of paths in trees",
'similarity': 1.6,
'group_id': 2
},
'8': {
'id': 8,
'question': "Graph minors IV Widths of trees and well quasi ordering",
'similarity': 2.3,
'group_id': 0
},
'9': {
'id': 6,
'question': "Graph minors A survey",
'similarity': 3.1,
'group_id': 1
}
}
inputs = "I like watching television"
sims = match_group(inputs, groups, min_threshold)
return Response(json.dumps(sims), mimetype='application/json')
|
Python
| 0
|
@@ -1504,16 +1504,18 @@
nize%0A
+ #
stoplis
@@ -1580,16 +1580,18 @@
t())%0A
+ #
texts =
@@ -1686,16 +1686,100 @@
estions%5D
+%0A texts = %5B%5Bword for word in question.lower().split()%5D for question in questions%5D
%0A%0A #
@@ -2901,17 +2901,16 @@
awesome
-.
',%0A
@@ -2997,17 +2997,16 @@
continue
-?
',%0A
|
c7cfbed48718c58753a3d36dfe017f1700956f45
|
Update NLP thingie
|
api/controllers/nlp.py
|
api/controllers/nlp.py
|
from flask import Flask
from flask import session
from flask import Response
from flask import request
from flask import redirect, url_for
from flask import render_template
from api.core import app, db
from api.models.user import User
from api.models.page import Page
from gensim import corpora, models, similarities
import urllib
import json
import datetime
@app.route('/nlp/sentiment')
def nlp_sentiment():
params = {
'apikey': '2ccd6f653c1e4253b6ac5ee0dadb284bde58331e',
'text': 'I hate this tv show. It became really booring.',
'outputMode': 'json'
}
params = urllib.urlencode(params)
sentiment = json.loads(
urllib.urlopen('http://access.alchemyapi.com/calls/text/TextGetTextSentiment?%s' % params).read()
)
return Response(json.dumps({
'accounts': sentiment
}), mimetype='application/json')
@app.route('/nlp/rank_keywords')
def npl_rank_keywords():
params = {
'apikey': '2ccd6f653c1e4253b6ac5ee0dadb284bde58331e',
'text': 'I hate this tv show. It became really booring.',
'outputMode': 'json',
'sentiment': 1
}
params = urllib.urlencode(params)
ranked = json.loads(
urllib.urlopen('http://access.alchemyapi.com/calls/text/TextGetRankedKeywords?%s' % params).read()
)
return Response(json.dumps({
'accounts': ranked
}), mimetype='application/json')
def match_similar(inputs, questions):
# remove common words and tokenize
stoplist = set('for a of the and to in by from on with as a'.split())
texts = [[word for word in question.lower().split() if word not in stoplist] for question in questions]
# Create dictionary
dictionary = corpora.Dictionary(texts)
# Define corpus
corpus = [dictionary.doc2bow(text) for text in texts]
# Define LSI space
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)
# Get similarity of teh doc vs documents
vector = dictionary.doc2bow(inputs.lower().split())
vector_lsi = lsi[vector]
index = similarities.MatrixSimilarity(lsi[corpus])
sims = index[vector_lsi]
return sims
def match_group(inputs, groups, min_threshold):
group_questions = []
for key, group in groups.items():
group_questions.append(group['content'])
group_similarity = sorted(enumerate(match_similar(inputs, group_questions)), key=lambda item: -item[1])
for group_id, similarity in group_similarity:
if similarity > min_threshold:
return groups[group_id]
# Create new group
print('New Group created');
@app.route('/nlp/similar')
def nlp_similar():
min_threshold = 0.4;
groups = {
'1': {
'id': 1,
'content': 'Will the show continue?',
},
'2': {
'id': 2,
'content': 'Do you like TV?',
}
}
questions = {
'1': {
'id': 1,
'question': "Human machine interface for lab abc computer applications",
'similarity': 0.9,
'group_id': 1
},
'2': {
'id': 2,
'question': "A survey of user opinion of computer system response time",
'similarity': 9.1,
'group_id': 1
},
'3': {
'id': 3,
'question': "The EPS user interface management system",
'similarity': 0.6,
'group_id': 2
},
'4': {
'id': 4,
'question': "System and human system engineering testing of EPS",
'similarity': 7.7,
'group_id': 2
},
'5': {
'id': 5,
'question': "Relation of user perceived response time to error measurement",
'similarity': 8.1,
'group_id': 0
},
'6': {
'id': 6,
'question': "The generation of random binary unordered trees",
'similarity': 7.2,
'group_id': 1
},
'7': {
'id': 7,
'question': "The intersection graph of paths in trees",
'similarity': 1.6,
'group_id': 2
},
'8': {
'id': 8,
'question': "Graph minors IV Widths of trees and well quasi ordering",
'similarity': 2.3,
'group_id': 0
},
'9': {
'id': 6,
'question': "Graph minors A survey",
'similarity': 3.1,
'group_id': 1
}
}
inputs = "I like watching series"
sims = match_group(inputs, groups, min_threshold)
return Response(json.dumps(sims), mimetype='application/json')
|
Python
| 0
|
@@ -2519,16 +2519,43 @@
eshold:%0A
+ print group_id%0A
|
f6d7283a04a146d73a3f873c507700202f023211
|
Remove commented out code
|
raiden/tests/conftest.py
|
raiden/tests/conftest.py
|
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position,redefined-outer-name,unused-wildcard-import,wildcard-import
import re
import gevent
import py
import sys
from gevent import monkey
monkey.patch_all()
import pytest
from ethereum.tools.keys import PBKDF2_CONSTANTS
from raiden.exceptions import RaidenShuttingDown
from raiden.tests.fixtures import * # noqa: F401,F403
from raiden.log_config import configure_logging
gevent.get_hub().SYSTEM_ERROR = BaseException
gevent.get_hub().NOT_ERROR = (gevent.GreenletExit, SystemExit, RaidenShuttingDown)
PBKDF2_CONSTANTS['c'] = 100
CATCH_LOG_HANDLER_NAME = 'catch_log_handler'
def pytest_addoption(parser):
parser.addoption(
'--blockchain-type',
choices=['geth'],
default='geth',
)
parser.addoption(
'--blockchain-cache',
action='store_true',
default=False,
)
parser.addoption(
'--initial-port',
type=int,
default=29870,
help='Base port number used to avoid conflicts while running parallel tests.',
)
parser.addoption(
'--log-config',
default=None,
)
parser.addoption(
'--plain-log',
action='store_true',
default=False,
help='Do not colorize console log output'
)
parser.addoption(
'--profiler',
default=None,
choices=['cpu', 'sample'],
)
@pytest.fixture(autouse=True)
def profiler(request, tmpdir):
if request.config.option.profiler == 'cpu':
from raiden.utils.profiling.cpu import CpuProfiler
profiler = CpuProfiler(str(tmpdir))
profiler.start()
yield
profiler.stop()
elif request.config.option.profiler == 'sample':
from raiden.utils.profiling.sampler import SampleProfiler
profiler = SampleProfiler(str(tmpdir))
profiler.start()
yield
profiler.stop()
else:
# do nothing, but yield a valid generator otherwise the autouse fixture
# will fail
yield
@pytest.fixture(autouse=True, scope='session')
def logging_level(request):
""" Configure the structlog level.
For integration tests this also sets the geth verbosity.
"""
if request.config.option.verbose > 3:
level = 'DEBUG'
elif request.config.option.verbose > 1:
level = 'INFO'
else:
level = 'WARNING'
if request.config.option.log_cli_level:
level = request.config.option.log_cli_level
configure_logging(
level,
colorize=not request.config.option.plain_log,
log_file=request.config.option.log_file
)
@pytest.fixture(scope='session', autouse=True)
def enable_greenlet_debugger(request):
if request.config.option.usepdb:
from raiden.utils.debug import enable_greenlet_debugger
enable_greenlet_debugger()
@pytest.fixture(scope='session', autouse=True)
def validate_solidity_compiler():
""" Check the solc prior to running any test. """
from raiden.blockchain.abi import validate_solc
validate_solc()
# Connect catchlog's handler to structlog's root logger
# @pytest.hookimpl(hookwrapper=True, trylast=True)
# def pytest_runtest_call(item):
# catchlog_handler = getattr(item, CATCH_LOG_HANDLER_NAME, None)
# if catchlog_handler and catchlog_handler not in structlog.rootLogger.handlers:
# structlog.rootLogger.addHandler(catchlog_handler)
# yield
# if catchlog_handler and catchlog_handler in structlog.rootLogger.handlers:
# structlog.rootLogger.removeHandler(catchlog_handler)
if sys.platform == 'darwin':
# On macOS the temp directory base path is already very long.
# To avoid failures on ipc tests (ipc path length is limited to 104/108 chars on macOS/linux)
# we override the pytest tmpdir machinery to produce shorter paths.
@pytest.fixture(scope='session', autouse=True)
def _tmpdir_short(request):
"""Shorten tmpdir paths"""
from _pytest.tmpdir import TempdirFactory
def getbasetemp(self):
""" return base temporary directory. """
try:
return self._basetemp
except AttributeError:
basetemp = self.config.option.basetemp
if basetemp:
basetemp = py.path.local(basetemp)
if basetemp.check():
basetemp.remove()
basetemp.mkdir()
else:
rootdir = py.path.local.get_temproot()
rootdir.ensure(dir=1)
basetemp = py.path.local.make_numbered_dir(prefix='pyt', rootdir=rootdir)
self._basetemp = t = basetemp.realpath()
self.trace('new basetemp', t)
return t
TempdirFactory.getbasetemp = getbasetemp
try:
delattr(request.config._tmpdirhandler, '_basetemp')
except AttributeError:
pass
@pytest.fixture
def tmpdir(request, tmpdir_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
"""
name = request.node.name
name = re.sub(r'[\W]', '_', name)
MAXVAL = 15
if len(name) > MAXVAL:
name = name[:MAXVAL]
return tmpdir_factory.mktemp(name, numbered=True)
|
Python
| 0
|
@@ -3070,516 +3070,8 @@
)%0A%0A%0A
-# Connect catchlog's handler to structlog's root logger%0A# @pytest.hookimpl(hookwrapper=True, trylast=True)%0A# def pytest_runtest_call(item):%0A# catchlog_handler = getattr(item, CATCH_LOG_HANDLER_NAME, None)%0A# if catchlog_handler and catchlog_handler not in structlog.rootLogger.handlers:%0A# structlog.rootLogger.addHandler(catchlog_handler)%0A%0A# yield%0A%0A# if catchlog_handler and catchlog_handler in structlog.rootLogger.handlers:%0A# structlog.rootLogger.removeHandler(catchlog_handler)%0A%0A%0A
if s
|
47b9aeff1400beb723cd759109993003d9b0586b
|
Comment out alpha version of answer generation algorithm.
|
src/answer_processing/answer_processing.py
|
src/answer_processing/answer_processing.py
|
# LING 573 Question Answering System
# Code last updated 4/18/14 by Claire Jaja
# This code implements an Answer Processor for the question answering system.
from general_classes import AnswerTemplate, Passage
from operator import itemgetter, attrgetter
from collections import Counter, defaultdict
class AnswerProcessor:
def __init__(self,passages,answer_template):
self.passages = passages
self.answer_template = answer_template
self.ranked_answers = []
def generate_and_rank_answers(self):
# get answers from the passages
self.extract_answers()
# reweight answers based on answer template
self.reweight_answers()
# sort answers by score
self.rank_answers()
# return top 20 highest ranked answers
return self.ranked_answers[:20]
# a method to extract possible answers from the passages and rank them
def extract_answers(self):
# for now, just take the entire text of the passage as the answer
for passage in self.passages:
answer_candidate = AnswerCandidate(passage.passage,passage.doc_id)
answer_candidate.set_score(passage.weight)
self.ranked_answers.append(answer_candidate)
# later, do something more clever, like count n-grams
# also increment scores based on something like inverse passage rank
# here's a possible clever answer extractor
answer_docs = defaultdict(set)
answer_score = defaultdict(lambda:0)
for passage in self.passages:
for i in range(len(passage.passage)): # is this a string or a list of strings? assuming list
answers = []
# unigram
answers.append(passage.passage[i])
if i < len(passage.passage) - 2: # can do bigrams
answers.append(" ".join(passage.passage[i:i+2]))
if i < len(passage.passage) - 3: # can do trigrams
answers.append(" ".join(passage.passage[i:i+3]))
if i < len(passage.passage) - 4: # can do 4-grams
answers.append(" ".join(passage.passage[i:i+4]))
for answer in answers:
answer_docs[answer].add(passage.doc_id)
answer_score[answer] += passage.weight
# then find answers with highest score?
answer_score_list = []
for answer,score in answer_score.iteritems():
answer_score_list.append((answer,score))
sorted_answers = sorted(answer_score_list,key=itemgetter(1))
# remove words from original question and stop words
# a method to check candidate answers against the answer template
def reweight_answers(self):
for answer_candidate in self.ranked_answers:
# find NEs and types
for NE_type,weight in self.answer_template.type_weights.iteritems():
# if this NE type is in the answer_candidate
# set the score to the previous score times the type's weight
# for now, assume all NE types in all answer candidates
new_score = answer_candidate.score*weight
answer_candidate.set_score(new_score)
def rank_answers(self):
self.ranked_answers.sort(reverse=True,key=attrgetter('score'))
class AnswerCandidate:
def __init__(self,answer,doc_id):
self.answer = answer
self.doc_id = doc_id
self.score = 0
def set_score(self,score):
self.score = score
|
Python
| 0
|
@@ -1444,16 +1444,17 @@
tractor%0A
+#
@@ -1484,16 +1484,17 @@
ct(set)%0A
+#
@@ -1530,16 +1530,17 @@
mbda:0)%0A
+#
@@ -1561,32 +1561,33 @@
self.passages:%0A
+#
for
@@ -1675,16 +1675,17 @@
ng list%0A
+#
@@ -1731,16 +1731,17 @@
unigram%0A
+#
@@ -1783,17 +1783,17 @@
age%5Bi%5D)%0A
-
+#
@@ -1845,24 +1845,25 @@
do bigrams%0A
+#
@@ -1915,24 +1915,25 @@
ge%5Bi:i+2%5D))%0A
+#
@@ -1987,24 +1987,25 @@
do trigrams%0A
+#
@@ -2065,24 +2065,25 @@
ge%5Bi:i+3%5D))%0A
+#
@@ -2143,24 +2143,25 @@
do 4-grams%0A
+#
@@ -2229,16 +2229,17 @@
:i+4%5D))%0A
+#
@@ -2261,32 +2261,33 @@
wer in answers:%0A
+#
@@ -2322,32 +2322,33 @@
passage.doc_id)%0A
+#
@@ -2438,16 +2438,17 @@
score?%0A
+#
@@ -2466,24 +2466,25 @@
e_list = %5B%5D%0A
+#
for
@@ -2517,32 +2517,33 @@
re.iteritems():%0A
+#
answ
@@ -2579,16 +2579,17 @@
score))%0A
+#
|
0bd4224bfa737942930bfa1e1a67ecfdf25e70e7
|
Apply WidgetAdapter to CheckboxSelectMultiple.
|
wagtail/core/widget_adapters.py
|
wagtail/core/widget_adapters.py
|
"""
Register Telepath adapters for core Django form widgets, so that they can
have corresponding Javascript objects with the ability to render new instances
and extract field values.
"""
from django import forms
from django.utils.functional import cached_property
from wagtail.admin.staticfiles import versioned_static
from wagtail.core.telepath import Adapter, register
class WidgetAdapter(Adapter):
js_constructor = 'wagtail.widgets.Widget'
def js_args(self, widget):
return [
widget.render('__NAME__', None, attrs={'id': '__ID__'}),
widget.id_for_label('__ID__'),
]
def get_media(self, widget):
media = super().get_media(widget)
return media + widget.media
@cached_property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/telepath/widgets.js'),
])
register(WidgetAdapter(), forms.widgets.Input)
register(WidgetAdapter(), forms.Textarea)
register(WidgetAdapter(), forms.Select)
class RadioSelectAdapter(WidgetAdapter):
js_constructor = 'wagtail.widgets.RadioSelect'
register(RadioSelectAdapter(), forms.RadioSelect)
|
Python
| 0
|
@@ -1013,16 +1013,72 @@
Select)%0A
+register(WidgetAdapter(), forms.CheckboxSelectMultiple)%0A
%0A%0Aclass
|
8b38f24fbdee425fde52d33b2154812d10c9d3db
|
Bump version
|
wagtailaltgenerator/__init__.py
|
wagtailaltgenerator/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
wagtailaltgenerator
----------
Insert image description and tags with the help of computer vision
"""
__title__ = "wagtailaltgenerator"
__version__ = "4.1.0"
__build__ = 410
__author__ = "Martin Sandström"
__license__ = "MIT"
__copyright__ = "Copyright 2016-2018 Martin Sandström"
default_app_config = "wagtailaltgenerator.apps.AltGeneratorAppConfig"
|
Python
| 0
|
@@ -200,17 +200,17 @@
= %224.1.
-0
+1
%22%0A__buil
@@ -217,17 +217,17 @@
d__ = 41
-0
+1
%0A__autho
@@ -306,17 +306,17 @@
2016-201
-8
+9
Martin
|
522aedce37a79dc8551c04592c637cde5528022e
|
Add compatibility with gevent 1.0
|
wal_e/worker/pg/wal_transfer.py
|
wal_e/worker/pg/wal_transfer.py
|
import gevent
import os
import re
import traceback
from gevent import queue
from os import path
from wal_e.exception import UserCritical
from wal_e import storage
class WalSegment(object):
def __init__(self, seg_path, explicit=False):
self.path = seg_path
self.explicit = explicit
self.name = path.basename(self.path)
def mark_done(self):
"""Mark the archive status of this segment as 'done'.
This is most useful when performing out-of-band parallel
uploads of segments, so that Postgres doesn't try to go and
upload them again.
This amounts to messing with an internal bookkeeping mechanism
of Postgres, but that mechanism is not changing too fast over
the last five years and seems simple enough.
"""
# Recheck that this is not an segment explicitly passed from Postgres
if self.explicit:
raise UserCritical(
msg='unexpected attempt to modify wal metadata detected',
detail=('Segments explicitly passed from postgres should not '
'engage in archiver metadata manipulation: {0}'
.format(self.path)),
hint='report a bug')
# Attempt a rename of archiver metadata, wrapping unexpected
# raised exceptions into a UserCritical.
try:
status_dir = path.join(path.dirname(self.path),
'archive_status')
ready_metadata = path.join(status_dir, self.name + '.ready')
done_metadata = path.join(status_dir, self.name + '.done')
os.rename(ready_metadata, done_metadata)
except StandardError:
raise UserCritical(
msg='problem moving .ready archive status to .done',
detail='Traceback is: {0}'.format(traceback.format_exc()),
hint='report a bug')
@staticmethod
def from_ready_archive_status(xlog_dir):
status_dir = path.join(xlog_dir, 'archive_status')
statuses = os.listdir(status_dir)
# Try to send earliest segments first.
statuses.sort()
for status in statuses:
# Only bother with segments, not history files and such;
# it seems like special treatment of such quantities is
# more likely to change than that of the WAL segments,
# which are bulky and situated in a particular place for
# crash recovery.
match = re.match(storage.SEGMENT_READY_REGEXP, status)
if match:
seg_name = match.groupdict()['filename']
seg_path = path.join(xlog_dir, seg_name)
yield WalSegment(seg_path, explicit=False)
class WalTransferGroup(object):
"""Concurrency and metadata manipulation for parallel transfers.
It so happens that it looks like WAL segment uploads and downloads
can be neatly done with one mechanism, so do so here.
"""
def __init__(self, transferer):
# Injected transfer mechanism
self.transferer = transferer
# Synchronization and tasks
self.wait_change = queue.Queue(maxsize=0)
self.expect = 0
self.closed = False
# Maintain a list of running greenlets for gevent.killall.
#
# Abrupt termination of WAL-E (e.g. calling exit, as seen with
# a propagated error) will not result in clean-ups
# (e.g. 'finally' clauses) being run, so it's necessary to
# retain the greenlets, inject asynchronous exceptions, and
# then wait on termination.
self.greenlets = set([])
def join(self):
"""Wait for transfer to exit, raising errors as necessary."""
self.closed = True
while self.expect > 0:
val = self.wait_change.get()
self.expect -= 1
if val is not None:
# Kill all the running greenlets, waiting for them to
# clean up and exit.
#
# As a fail-safe against indefinite blocking of
# gevent.killall, time out after a liberal amount of
# time. This is not expected to ever occur except for
# bugs and very dire situations, so do not take pains
# to convert it into a UserException or anything.
gevent.killall(self.greenlets, block=True, timeout=60)
raise val
def start(self, segment):
"""Begin transfer for an indicated wal segment."""
if self.closed:
raise UserCritical(msg='attempt to transfer wal after closing',
hint='report a bug')
g = gevent.Greenlet(self.transferer, segment)
g.link(self._complete_execution)
self.greenlets.add(g)
# Increment .expect before starting the greenlet, or else a
# very unlucky .join could be fooled as to when pool is
# complete.
self.expect += 1
g.start()
def _complete_execution(self, g):
"""Forward any raised exceptions across a channel."""
# Triggered via completion callback.
#
# Runs in its own greenlet, so take care to forward the
# exception, if any, to fail the entire transfer in event of
# trouble.
assert g.ready()
self.greenlets.remove(g)
placed = UserCritical(msg='placeholder bogus exception',
hint='report a bug')
if g.successful():
try:
segment = g.get()
if not segment.explicit:
segment.mark_done()
except StandardError, e:
# Absorb and forward exceptions across the channel.
placed = e
else:
placed = None
else:
placed = g.exception
self.wait_change.put(placed)
|
Python
| 0
|
@@ -4419,16 +4419,21 @@
killall(
+list(
self.gre
@@ -4438,16 +4438,17 @@
reenlets
+)
, block=
|
e704ddf988159f11fe579f2404b683756b6844ac
|
fix baidu crawler
|
icrawler/builtin/baidu.py
|
icrawler/builtin/baidu.py
|
# -*- coding: utf-8 -*-
import json
from icrawler import Crawler, Feeder, Parser, ImageDownloader
from icrawler.builtin.filter import Filter
class BaiduFeeder(Feeder):
def get_filter(self):
search_filter = Filter()
# type filter
type_code = {
'portrait': 's=3&lm=0&st=-1&face=0',
'face': 's=0&lm=0&st=-1&face=1',
'clipart': 's=0&lm=0&st=1&face=0',
'linedrawing': 's=0&lm=0&st=2&face=0',
'animated': 's=0&lm=6&st=-1&face=0',
'static': 's=0&lm=7&st=-1&face=0'
}
def format_type(img_type):
return type_code[img_type]
type_choices = list(type_code.keys())
search_filter.add_rule('type', format_type, type_choices)
# color filter
color_code = {
'red': 1,
'orange': 256,
'yellow': 2,
'green': 4,
'purple': 32,
'pink': 64,
'teal': 8,
'blue': 16,
'brown': 12,
'white': 1024,
'black': 512,
'blackandwhite': 2048
}
def format_color(color):
return 'ic={}'.format(color_code[color])
color_choices = list(color_code.keys())
search_filter.add_rule('color', format_color, color_choices)
# size filter
def format_size(size):
if size in ['extralarge', 'large', 'medium', 'small']:
size_code = {
'extralarge': 9,
'large': 3,
'medium': 2,
'small': 1
}
return 'z={}'.format(size_code[size])
elif size.startswith('='):
wh = size[1:].split('x')
assert len(wh) == 2
return 'width={}&height={}'.format(*wh)
else:
raise ValueError(
'filter option "size" must be one of the following: '
'extralarge, large, medium, small, >[]x[] '
'([] is an integer)')
search_filter.add_rule('size', format_size)
return search_filter
def feed(self, keyword, offset, max_num, filters=None):
base_url = ('http://image.baidu.com/search/acjson?tn=resultjson_com'
'&ipn=rj&word={}&pn={}&rn=30')
self.filter = self.get_filter()
filter_str = self.filter.apply(filters, sep='&')
for i in range(offset, offset + max_num, 30):
url = base_url.format(keyword, i)
if filter_str:
url += '&' + filter_str
self.out_queue.put(url)
self.logger.debug('put url to url_queue: {}'.format(url))
class BaiduParser(Parser):
def _decode_url(self, encrypted_url):
url = encrypted_url
map1 = {'_z2C$q': ':', '_z&e3B': '.', 'AzdH3F': '/'}
map2 = {
'w': 'a', 'k': 'b', 'v': 'c', '1': 'd', 'j': 'e',
'u': 'f', '2': 'g', 'i': 'h', 't': 'i', '3': 'j',
'h': 'k', 's': 'l', '4': 'm', 'g': 'n', '5': 'o',
'r': 'p', 'q': 'q', '6': 'r', 'f': 's', 'p': 't',
'7': 'u', 'e': 'v', 'o': 'w', '8': '1', 'd': '2',
'n': '3', '9': '4', 'c': '5', 'm': '6', '0': '7',
'b': '8', 'l': '9', 'a': '0'
} # yapf: disable
for (ciphertext, plaintext) in map1.items():
url = url.replace(ciphertext, plaintext)
char_list = [char for char in url]
for i in range(len(char_list)):
if char_list[i] in map2:
char_list[i] = map2[char_list[i]]
url = ''.join(char_list)
return url
def parse(self, response):
try:
content = response.content.decode('utf-8', 'ignore')
content = json.loads(content, strict=False)
except:
self.logger.error('Fail to parse the response in json format')
return
for item in content['data']:
if 'objURL' in item:
img_url = self._decode_url(item['objURL'])
elif 'hoverURL' in item:
img_url = item['hoverURL']
else:
continue
yield dict(file_url=img_url)
class BaiduImageCrawler(Crawler):
def __init__(self,
feeder_cls=BaiduFeeder,
parser_cls=BaiduParser,
downloader_cls=ImageDownloader,
*args,
**kwargs):
super(BaiduImageCrawler, self).__init__(
feeder_cls, parser_cls, downloader_cls, *args, **kwargs)
def crawl(self,
keyword,
filters=None,
offset=0,
max_num=1000,
min_size=None,
max_size=None,
file_idx_offset=0,
overwrite=False):
if offset + max_num > 1000:
if offset > 1000:
self.logger.error('Offset cannot exceed 1000, otherwise you '
'will get duplicated searching results.')
return
elif max_num > 1000:
max_num = 1000 - offset
self.logger.warning('Due to Baidu\'s limitation, you can only '
'get the first 1000 result. "max_num" has '
'been automatically set to %d',
1000 - offset)
else:
pass
feeder_kwargs = dict(
keyword=keyword, offset=offset, max_num=max_num, filters=filters)
downloader_kwargs = dict(
max_num=max_num,
min_size=min_size,
max_size=max_size,
file_idx_offset=file_idx_offset,
overwrite=overwrite)
super(BaiduImageCrawler, self).crawl(
feeder_kwargs=feeder_kwargs, downloader_kwargs=downloader_kwargs)
|
Python
| 0.000004
|
@@ -3775,17 +3775,83 @@
-8',
- 'ignore'
+%0A 'ignore').replace(%22%5C%5C'%22, %22'%22
)%0A
@@ -4578,24 +4578,38 @@
mageCrawler,
+%0A
self).__ini
@@ -4612,29 +4612,16 @@
_init__(
-%0A
feeder_c
@@ -4658,16 +4658,45 @@
, *args,
+%0A
**kwarg
@@ -5297,16 +5297,37 @@
warning(
+%0A
'Due to
@@ -5382,32 +5382,16 @@
-
-
'get the
@@ -5446,32 +5446,16 @@
-
-
'been au
@@ -5481,44 +5481,8 @@
%25d',
-%0A
100
|
a155e8654a95969abc2290d4198622991d6cb00e
|
Remove duplicate entry for vikidia and gutenberg in burundi boxes
|
ideascube/conf/idb_bdi.py
|
ideascube/conf/idb_bdi.py
|
"""Generic config for Ideasbox of Burundi"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['refugee_id', 'short_name', 'full_name', 'birth_year', 'gender', 'phone']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the camp'), ['camp_entry_date', 'camp_activities', 'current_occupation', 'camp_address']), # noqa
(_('Origin'), ['country', 'city', 'country_of_origin_occupation', 'school_level', 'is_sent_to_school']), # noqa
(_('Language skills'), ['rn_level', 'sw_level', 'fr_level']),
(_('National residents'), ['id_card_number']),
)
HOME_CARDS = HOME_CARDS + [
{
'id': 'vikidia',
},
{
'id': 'gutenberg',
},
{
'id': 'cpassorcier',
},
{
'id': 'ted',
},
]
|
Python
| 0
|
@@ -820,86 +820,8 @@
%7B%0A
- 'id': 'vikidia',%0A %7D,%0A %7B%0A 'id': 'gutenberg',%0A %7D,%0A %7B%0A
|
05c68831cba1aa82c86fcf37fc1769f5ee017794
|
Remove excluded from submission csv
|
web/portal/views/submissions.py
|
web/portal/views/submissions.py
|
import re
import datetime
import csv
import tempfile
import io
from flask import render_template, request, redirect, url_for, flash, make_response
from flask_security import login_required, current_user, roles_required
from sqlalchemy import func
from portal import app, db
from portal.models import *
from portal.forms import *
from portal.helpers import *
from portal.datatypes import *
@app.route('/submissions')
@app.route('/submissions?page=<int:page>')
@login_required
@roles_required('admin')
def submissions_index(page=1):
q = db.session.query(
RecruitStatus.invoice_year,
RecruitStatus.invoice_quarter,
func.count().label('participants')
).join(
RecruitStatus.recruit
).filter(
RecruitStatus.invoice_year != ''
).filter(
RecruitStatus.invoice_quarter != ''
).group_by(
RecruitStatus.invoice_year,
RecruitStatus.invoice_quarter
)
submissions = (
q.order_by(
RecruitStatus.invoice_year,
RecruitStatus.invoice_quarter
).paginate(
page=page,
per_page=10,
error_out=False))
return render_template('submissions/index.html', submissions=submissions)
@app.route('/submissions/<string:invoice_year>/<string:invoice_quarter>')
@app.route('/submissions/<string:invoice_year>/<string:invoice_quarter>?page=<int:page>')
@login_required
@roles_required('admin')
def submissions_participants(invoice_year, invoice_quarter, page=1):
q = RecruitStatus.query.join(
Recruit, RecruitStatus.recruit
).join(
PracticeRegistration, Recruit.practice_registration
).filter(
RecruitStatus.invoice_year == invoice_year
).filter(
RecruitStatus.invoice_quarter == invoice_quarter
)
participants = (
q.order_by(
PracticeRegistration.code,
Recruit.date_recruited.asc()
).paginate(
page=page,
per_page=10,
error_out=False))
return render_template('submissions/participants.html', page=page, participants=participants, invoice_year=invoice_year, invoice_quarter=invoice_quarter)
@app.route('/submissions/<string:invoice_year>/<string:invoice_quarter>/csv')
@login_required
@roles_required('admin')
def submissions_csv(invoice_year, invoice_quarter):
COL_RECRUITED_DATE = 'Study Entry Date'
COL_STATUS = 'Status'
COL_PATIENT_ID = 'Patient ID'
COL_PRACTICE_CODE = 'Practice Code'
COL_PRACTICE_NAME = 'Practice Name'
COL_PRACTICE_ADDRESS = 'Prcatice Address'
COL_CCG = 'CCG'
fieldnames = [
COL_RECRUITED_DATE,
COL_STATUS,
COL_PATIENT_ID,
COL_PRACTICE_CODE,
COL_PRACTICE_NAME,
COL_PRACTICE_ADDRESS,
COL_CCG
]
si = io.StringIO()
output = csv.DictWriter(
si,
fieldnames=fieldnames,
quoting=csv.QUOTE_NONNUMERIC
)
output.writeheader()
q = RecruitStatus.query.join(
Recruit, RecruitStatus.recruit
).join(
PracticeRegistration, Recruit.practice_registration
).filter(
RecruitStatus.invoice_year == invoice_year
).filter(
RecruitStatus.invoice_quarter == invoice_quarter
)
participants = q.order_by(
PracticeRegistration.code,
Recruit.date_recruited.asc()
).all()
for p in participants:
output.writerow({
COL_RECRUITED_DATE: p.recruit.date_recruited,
COL_STATUS: p.status,
COL_PATIENT_ID: p.study_id,
COL_PRACTICE_CODE: p.recruit.practice_registration.code,
COL_PRACTICE_NAME: p.recruit.practice_registration.practice.name,
COL_PRACTICE_ADDRESS: p.recruit.practice_registration.practice.address,
COL_CCG: p.recruit.practice_registration.practice.ccg_name
})
resp = make_response(si.getvalue())
resp.headers["Content-Disposition"] = "attachment; filename=Genvasc_Submissions_{}{}.csv".format(invoice_year, invoice_quarter)
resp.headers["Content-type"] = "text/csv"
resp.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
resp.headers["Pragma"] = "no-cache"
resp.headers["Expires"] = 0
return resp
|
Python
| 0
|
@@ -3228,32 +3228,89 @@
invoice_quarter%0A
+ ).filter(%0A RecruitStatus.status != 'Excluded'%0A
)%0A%0A parti
|
923bcc555c6a039df649d1c2725cc67ad8c79660
|
Make CMS errors JSON-able if requested via AJAX
|
cms/djangoapps/contentstore/views/error.py
|
cms/djangoapps/contentstore/views/error.py
|
from django.http import HttpResponseServerError, HttpResponseNotFound
from mitxmako.shortcuts import render_to_string, render_to_response
__all__ = ['not_found', 'server_error', 'render_404', 'render_500']
def not_found(request):
return render_to_response('error.html', {'error': '404'})
def server_error(request):
return render_to_response('error.html', {'error': '500'})
def render_404(request):
return HttpResponseNotFound(render_to_string('404.html', {}))
def render_500(request):
return HttpResponseServerError(render_to_string('500.html', {}))
|
Python
| 0.000002
|
@@ -16,16 +16,31 @@
p import
+ (HttpResponse,
HttpRes
@@ -56,16 +56,41 @@
erError,
+%0A
HttpRes
@@ -102,16 +102,17 @@
NotFound
+)
%0Afrom mi
@@ -171,16 +171,45 @@
response
+%0Aimport functools%0Aimport json
%0A%0A__all_
@@ -272,16 +272,692 @@
500'%5D%0A%0A%0A
+def jsonable_error(status=500, message=%22The Studio servers encountered an error%22):%0A %22%22%22%0A A decorator to make an error view return an JSON-formatted message if%0A it was requested via AJAX.%0A %22%22%22%0A def outer(func):%0A @functools.wraps(func)%0A def inner(request, *args, **kwargs):%0A if request.is_ajax():%0A content = json.dumps(%7B%22error%22: message%7D)%0A return HttpResponse(content, content_type=%22application/json%22,%0A status=status)%0A else:%0A return func(request, *args, **kwargs)%0A return inner%0A return outer%0A%0A%0A@jsonable_error(404, %22Resource not found%22)%0A
def not_
@@ -1032,24 +1032,88 @@
: '404'%7D)%0A%0A%0A
+@jsonable_error(500, %22The Studio servers encountered an error%22)%0A
def server_e
@@ -1187,24 +1187,67 @@
: '500'%7D)%0A%0A%0A
+@jsonable_error(404, %22Resource not found%22)%0A
def render_4
@@ -1327,16 +1327,80 @@
%7B%7D))%0A%0A%0A
+@jsonable_error(500, %22The Studio servers encountered an error%22)%0A
def rend
|
521839eb8f9d81c55d9cdc851df8af65278c4f45
|
scale output after vectorizer in segmenter
|
kraken/blla.py
|
kraken/blla.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.blla
~~~~~~~~~~~~~~
Trainable baseline layout analysis tools for kraken
"""
import json
import torch
import logging
import numpy as np
import pkg_resources
import torch.nn.functional as F
from typing import Tuple, Sequence, List
from scipy.ndimage.filters import (gaussian_filter, uniform_filter,
maximum_filter)
from kraken.lib import morph, sl, vgsl, segmentation, dataset
from kraken.lib.util import pil2array, is_bitonal, get_im_str
from kraken.lib.exceptions import KrakenInputException
from kraken.rpred import rpred
from kraken.serialization import max_bbox
__all__ = ['segment']
logger = logging.getLogger(__name__)
def segment(im, text_direction='horizontal-lr', mask=None, model=pkg_resources.resource_filename(__name__, 'blla.mlmodel')):
"""
Segments a page into text lines using the baseline segmenter.
Segments a page into text lines and returns the polyline formed by each
baseline and their estimated environment.
Args:
im (PIL.Image): An RGB image.
text_direction (str): Ignored by the segmenter but kept for
serialization.
mask (PIL.Image): A bi-level mask image of the same size as `im` where
0-valued regions are ignored for segmentation
purposes. Disables column detection.
Returns:
{'text_direction': '$dir',
'type': 'baseline',
'lines': [
{'baseline': [[x0, y0], [x1, y1], ..., [x_n, y_n]], 'boundary': [[x0, y0, x1, y1], ... [x_m, y_m]]},
{'baseline': [[x0, ...]], 'boundary': [[x0, ...]]}
]
}: A dictionary containing the text direction and under the key 'lines'
a list of reading order sorted baselines (polylines) and their
respective polygonal boundaries. The last and first point of each
boundary polygon is connected.
Raises:
KrakenInputException if the input image is not binarized or the text
direction is invalid.
"""
im_str = get_im_str(im)
logger.info('Segmenting {}'.format(im_str))
model = vgsl.TorchVGSLModel.load_model(model)
if mask:
if mask.mode != '1' and not is_bitonal(mask):
logger.error('Mask is not bitonal')
raise KrakenInputException('Mask is not bitonal')
mask = mask.convert('1')
if mask.size != im.size:
logger.error('Mask size {} doesn\'t match image size {}'.format(mask.size, im.size))
raise KrakenInputException('Mask size {} doesn\'t match image size {}'.format(mask.size, im.size))
logger.info('Masking enabled in segmenter.')
mask = pil2array(mask)
batch, channels, height, width = model.input
transforms = dataset.generate_input_transforms(batch, height, width, channels, 0, valid_norm=False)
with torch.no_grad():
logger.debug('Running network forward pass')
o = model.nn(transforms(im).unsqueeze(0))
logger.debug('Upsampling network output')
o = F.interpolate(o, size=im.size[::-1])
logger.debug('Vectorizing network output')
baselines = segmentation.vectorize_lines(o)
logger.debug('Reordering baselines')
baselines = segmentation.polygon_order_lines(baselines, text_direction[-2:])
return {'text_direction': text_direction,
'type': 'baselines',
'lines': [{'script': 'default', 'baseline': bl, 'boundary': pl} for bl, pl in baselines]}
|
Python
| 0
|
@@ -2779,16 +2779,33 @@
(model)%0A
+ model.eval()%0A
if m
@@ -3657,24 +3657,54 @@
rk output')%0A
+ o = o.squeeze(0).numpy()%0A#
o = F.in
@@ -3735,16 +3735,35 @@
e%5B::-1%5D)
+.squeeze(0).numpy()
%0A log
@@ -3850,16 +3850,177 @@
ines(o)%0A
+ logger.debug('Scaling vectorized lines')%0A scale = np.divide(im.size, o.shape%5B:0:-1%5D)%0A baselines = segmentation.scale_polygonal_lines(baselines, scale)%0A
logg
@@ -4092,20 +4092,24 @@
ygon
+al_reading
_order
-_lines
(bas
|
f164dd8141133cb78ceeab3ecccc5d756c36da3c
|
Add optional structured properties for og:image and og:video
|
lassie/filters/social.py
|
lassie/filters/social.py
|
# -*- coding: utf-8 -*-
"""
lassie.filters.social
~~~~~~~~~~~~~~~~~~~~~
This module contains data social related content to help Lassie filter for content.
"""
from ..compat import str
import re
SOCIAL_MAPS = {
'meta': {
'open_graph': { # http://ogp.me/
'pattern': re.compile(r"^og:", re.I),
'map': {
'og:url': 'url',
'og:title': 'title',
'og:description': 'description',
'og:locale': 'locale',
'og:image': 'src',
'og:image:width': 'width',
'og:image:height': 'height',
'og:video': 'src',
'og:video:width': 'width',
'og:video:height': 'height',
'og:video:type': 'type',
},
'image_key': str('og:image'),
'video_key': str('og:video'),
'key': 'property',
},
'twitter_card': { # https://dev.twitter.com/docs/cards
'pattern': re.compile(r"^twitter:", re.I),
'map': {
'twitter:url': 'url',
'twitter:title': 'title',
'twitter:description': 'description',
'twitter:image': 'src',
'twitter:image:width': 'width',
'twitter:image:height': 'height',
'twitter:player': 'src',
'twitter:player:width': 'width',
'twitter:player:height': 'height',
'twitter:player:content_type': 'type',
},
'image_key': str('twitter:image'),
'video_key': str('twitter:player'),
'key': 'name',
},
}
}
|
Python
| 0
|
@@ -561,100 +561,325 @@
age:
-width': 'width',%0A 'og:image:height': 'height',%0A%0A 'og:video': '
+url': 'src',%0A 'og:image:secure_url': 'secure_src',%0A 'og:image:width': 'width',%0A 'og:image:height': 'height',%0A 'og:image:type': 'type',%0A%0A 'og:video': 'src',%0A 'og:video:url': 'src',%0A 'og:video:secure_url': 'secure_
src'
|
bc199a9eaa2416b35d1d691f580e6c9ca0b1a2ae
|
Remove node counts and update docstrings on new view for activity
|
website/discovery/views.py
|
website/discovery/views.py
|
from website import settings
from website.project import Node
from website.project import utils
from modularodm.query.querydialect import DefaultQueryDialect as Q
def activity():
node_data = utils.get_node_data()
if node_data:
hits = utils.hits(node_data)
else:
hits = {}
# New Projects
new_and_noteworthy_pointers = Node.find_one(Q('_id', 'eq', settings.NEW_AND_NOTEWORTHY_LINKS_NODE)).nodes_pointer
new_and_noteworthy_projects = [pointer.node for pointer in new_and_noteworthy_pointers]
# Popular Projects
popular_public_projects = Node.find_one(Q('_id', 'eq', settings.POPULAR_LINKS_NODE)).nodes_pointer
# Popular Registrations
popular_public_registrations = Node.find_one(Q('_id', 'eq', settings.POPULAR_LINKS_NODE_REGISTRATIONS)).nodes_pointer
return {
'new_and_noteworthy_projects': new_and_noteworthy_projects,
'recent_public_registrations': utils.recent_public_registrations(),
'popular_public_projects': popular_public_projects,
'popular_public_registrations': popular_public_registrations,
'hits': hits,
}
|
Python
| 0
|
@@ -179,140 +179,346 @@
():%0A
-%0A node_data = utils.get_node_data()%0A if node_data:%0A hits = utils.hits(node_data)%0A else:%0A hits = %7B%7D%0A%0A # New
+ %22%22%22Reads node activity from pre-generated popular projects and registrations.%0A New and Noteworthy projects are set manually or through %60scripts/populate_new_and_noteworthy_projects.py%60%0A Popular projects and registrations are generated by %60scripts/populate_popular_projects_and_registrations.py%60%0A %22%22%22%0A%0A # New and Noreworthy
Pro
@@ -1303,30 +1303,8 @@
ns,%0A
- 'hits': hits,%0A
|
daba688f4898bdca680ac1cd4b1e2efbc3ef1a1f
|
Deal with incorrect username.
|
lastscrape/lastscrape.py
|
lastscrape/lastscrape.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""usage: lastscrape.py USER [OUTPUT_FILE]"""
import sys
import time
import codecs
import urllib2
from BeautifulSoup import BeautifulSoup
sys.stdout = codecs.lookup('utf-8')[-1](sys.stdout)
def parse_page(page):
"""Parse a page of recently listened tracks and return a list."""
soup = BeautifulSoup(urllib2.urlopen(page),
convertEntities=BeautifulSoup.HTML_ENTITIES)
for row in soup.find('table', 'candyStriped tracklist').findAll('tr'):
artist, track, timestamp = parse_track(row)
# Tracks submitted before 2005 have no timestamp
if artist and track:
yield (artist, track, timestamp)
def parse_track(row):
"""Return a tuple containing track data."""
try:
track_info = row.find('td', 'subjectCell')
artist, track = track_info.findAll('a')
timestamp = row.find('abbr')['title']
artist = artist.contents[0].strip()
track = track.contents[0].strip()
return (artist, track, timestamp)
except:
# Parsing failed
print 'parsing failed'
return (None, None, None)
def fetch_tracks(user, request_delay=0.5):
"""Fetch all tracks from a profile page and return a list."""
url = 'http://last.fm/user/%s/tracks' % user
soup = BeautifulSoup(urllib2.urlopen(url),
convertEntities=BeautifulSoup.HTML_ENTITIES)
try:
num_pages = int(soup.find('a', 'lastpage').contents[0])
except:
num_pages = 1
for cur_page in range(1, num_pages + 1):
try:
tracks = parse_page(url + '?page=' + str(cur_page))
except:
time.sleep(1)
tracks = parse_page(url + '?page=' + str(cur_page))
for artist, track, timestamp in tracks:
yield (artist, track, timestamp)
if cur_page < num_pages:
time.sleep(request_delay)
def main(*args):
if len(args) == 2:
# Print to stdout
for artist, track, timestamp in fetch_tracks(args[1]):
print u'%s\t%s\t%s' % (artist, track, timestamp)
elif len(args) == 3:
# Write to file
f = codecs.open(args[2], 'w', 'utf-8')
for artist, track, timestamp in fetch_tracks(args[1]):
f.write(u'%s\t%s\t%s\n' % (artist, track, timestamp))
print u'%s\t%s\t%s' % (artist, track, timestamp)
f.close()
else:
print __doc__
if __name__ == '__main__':
sys.exit(main(*sys.argv))
|
Python
| 0
|
@@ -1314,16 +1314,149 @@
%25 user%0A
+ try:%0A f = urllib2.urlopen(url)%0A except urllib2.HTTPError:%0A raise Exception(%22Username probably does not exist.%22)%0A
soup
|
a9803a8a523b8603db93ae18eb7e75e63bb8c44b
|
Add `dpi` option into `plot_setup`; Fix an importing bug with `matplotlib.pyplot` (#4)
|
easypyplot/pdf.py
|
easypyplot/pdf.py
|
""" $lic$
Copyright (c) 2016-2021, Mingyu Gao
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from contextlib import contextmanager
import matplotlib.backends.backend_pdf
from .format import paper_plot
def plot_setup(name, figsize=None, fontsize=9, font='paper'):
""" Setup a PDF page for plot.
name: PDF file name. If not ending with .pdf, will automatically append.
figsize: dimension of the plot in inches, should be an array of length two.
fontsize: fontsize for legends and labels.
font: font for legends and labels, 'paper' uses Times New Roman, 'default'
uses default, a tuple of (family, font, ...) customizes font.
"""
paper_plot(fontsize=fontsize, font=font)
if not name.endswith('.pdf'):
name += '.pdf'
pdfpage = matplotlib.backends.backend_pdf.PdfPages(name)
fig = matplotlib.pyplot.figure(figsize=figsize)
return pdfpage, fig
def plot_teardown(pdfpage, fig=None):
""" Tear down a PDF page after plotting.
pdfpage: PDF page.
fig: the figure to save.
"""
pdfpage.savefig(fig)
pdfpage.close()
@contextmanager
def plot_open(name, figsize=None, fontsize=9, font='paper'):
""" Open a context of PDF page for plot, used for the `with` statement.
name: PDF file name. If not ending with .pdf, will automatically append.
figsize: dimension of the plot in inches, should be an array of length two.
fontsize: fontsize for legends and labels.
font: font for legends and labels, 'paper' uses Times New Roman, 'default'
uses default, a tuple of (family, font, ...) customizes font.
"""
pdfpage, fig = plot_setup(name, figsize=figsize, fontsize=fontsize,
font=font)
yield fig
plot_teardown(pdfpage, fig)
|
Python
| 0.000001
|
@@ -652,16 +652,41 @@
kend_pdf
+%0Aimport matplotlib.pyplot
%0A%0Afrom .
@@ -762,32 +762,42 @@
=9, font='paper'
+, dpi=None
):%0A %22%22%22 Setup
@@ -1160,32 +1160,67 @@
ustomizes font.%0A
+ dpi: resolution of the figure.%0A
%22%22%22%0A pape
@@ -1424,16 +1424,25 @@
=figsize
+, dpi=dpi
)%0A re
@@ -1726,16 +1726,26 @@
='paper'
+, dpi=None
):%0A %22
@@ -2161,24 +2161,59 @@
mizes font.%0A
+ dpi: resolution of the figure.%0A
%22%22%22%0A
@@ -2280,16 +2280,16 @@
ntsize,%0A
-
@@ -2315,24 +2315,33 @@
font=font
+, dpi=dpi
)%0A yield
|
1147781292b32177e0733be134f8c83101f0abbf
|
add comment
|
weiss/flows/flowManager.py
|
weiss/flows/flowManager.py
|
"""
Flow Mnager
NOTE: this class should be created by signleton factory in factory.py
this class is responsible for
1. user state factory.
2. loop up state by sid
3. make transit from one state based on a given action
Author: Ming Fang <mingf@cs.cmu.edu>
"""
import logging
logger = logging.getLogger(__name__)
class FlowManager:
def __init__(self):
self._flowTable = {}
def register(self, uid, flow):
self._flowTable[uid] = flow
return
def lookUp(self, uid):
if not self._flowTable.has_key(uid):
return None
else:
return self._flowTable[uid]
|
Python
| 0
|
@@ -3,16 +3,17 @@
%22%0AFlow M
+a
nager%0A%0AN
@@ -55,10 +55,10 @@
y si
-g
n
+g
leto
@@ -123,114 +123,61 @@
1.
-user state factory.%0A 2. loop up state by sid%0A 3. make transit from one state based on a given action
+register a user's flow%0A 2. loop up flow by user id
%0A%0AAu
@@ -391,166 +391,456 @@
-self._flowTable%5Buid%5D = flow%0A return%0A%0A def lookUp(self, uid):%0A if not self._flowTable.has_key(uid):%0A return None%0A else:%0A
+%22%22%22Register a user's flow object%0A%0A :param uid: the user id%0A :param flow: the flow obj associated with the user%0A :return: void%0A %22%22%22%0A self._flowTable%5Buid%5D = flow%0A return%0A%0A def lookUp(self, uid):%0A %22%22%22Look up a flow given a user id%0A%0A :param uid: the user id to be looked up, get it by request.user%0A :return: the flow obj associated with the given user, or None if not found%0A %22%22%22%0A
@@ -865,14 +865,24 @@
lowTable
-%5Buid%5D
+.get(uid, None)
%0A
|
1526e11509c5124cb80edeb5742b55dc75c31b67
|
update avg->avg_sat
|
county_avg_sat.py
|
county_avg_sat.py
|
# Written by Jonathan Saewitz, released April 11th, 2016 for Statisti.ca
# Released under the MIT License (https://opensource.org/licenses/MIT)
from __future__ import division
import csv, requests, re, collections, plotly.plotly as plotly, plotly.graph_objs as go
from plotly.graph_objs import Scatter, Layout
schools=[]
with open('pa_schools.csv', 'r') as f: #add all of the schools to the 'schools' list
#pa_schools.csv from: http://www.edna.ed.state.pa.us/Screens/Extracts/wfExtractPublicSchools.aspx
reader=csv.reader(f)
next(reader) #skip header row
for row in reader:
try:
schools.append({'aun': int(row[0]), 'county': row[5]}) #row[0] is the aun, row[5] is the county name
except ValueError:
pass
schools_sats=[]
with open('pa_sat_scores.csv', 'r') as f: #add each high school's sat score
#pa_sat_scores.csv from: http://www.education.pa.gov/K-12/Assessment%20and%20Accountability/Pages/SAT-and-ACT.aspx (Public School SAT Scores 2015)
reader=csv.reader(f)
for i in range(8): #skip header rows
next(reader)
for row in reader:
try:
schools_sats.append({'aun': int(row[0]), 'score': int(row[8])}) #add each school's AUN (Administrative Unit Number) and score
except ValueError:
pass
for school in schools_sats: #loop through every school's aun and score
for s in schools: #loop through every school
if s['aun']==school['aun']: #match the school's aun and the aun of the sat score list
school.update({'county': s['county']}) #add the school's county
del school['aun'] #remove the aun from the school
grouped=collections.defaultdict(list) #created a defaultdict
for county in schools_sats:
grouped[county['county']].append(county['score']) #append the scores to counties in the defaultdict
county_avg_scores=[]
for county, scores in grouped.iteritems(): #get the average scores for each county
county_avg_scores.append({'county': county, 'avg': sum(scores)/len(scores)})
#get each county's per capita income
for county_avg_score in county_avg_scores: #loop through every county's average sat scores
with open('pa_avg_income.csv', 'r') as f: #pa_avg_income.csv from: https://en.wikipedia.org/wiki/List_of_Pennsylvania_counties_by_per_capita_income#Pennsylvania_counties_ranked_by_per_capita_income (from US Census Bureau)
reader=csv.reader(f)
for row in reader: #loop through every county average income
if county_avg_score['county']==row[1]: #row[1] is the county name
per_capita_income=int(row[2].replace("$", "").replace(",", "")) #format money (e.g. "$41,251"->41251)
county_avg_score.update({'per_capita_income': per_capita_income})
break #if we already found the county's income, no need to keep looping
sats=[]
incomes=[]
names=[]
f=open('counties_avg_sat.csv', 'w')
w=csv.writer(f)
w.writerow(["county", "average sat score", "per capita income"])
for c in county_avg_scores:
sats.append(c['avg'])
incomes.append(c['per_capita_income'])
names.append(c['county'])
w.writerow([c['county'], c['avg'], c['per_capita_income']])
f.close()
trace=go.Scatter(
x=incomes,
y=sats,
text=names,
mode='markers'
)
data=[trace]
fig=go.Figure(data=data)
plotly.plot(fig) #plot the scatter plot!
|
Python
| 0
|
@@ -1902,16 +1902,20 @@
ty, 'avg
+_sat
': sum(s
@@ -2888,16 +2888,20 @@
d(c%5B'avg
+_sat
'%5D)%0A%09inc
@@ -2995,16 +2995,20 @@
, c%5B'avg
+_sat
'%5D, c%5B'p
|
c36b90db0f3e633f459afa3328e50b6f5e8b4e1b
|
Remove debug print.
|
redash/authentication.py
|
redash/authentication.py
|
import functools
import hashlib
import hmac
from flask import request, make_response
from flask.ext.googleauth import GoogleFederated
import time
from werkzeug.contrib.fixers import ProxyFix
import werkzeug.wrappers
from redash import data, settings
class HMACAuthentication(object):
def __init__(self, auth):
self.auth = auth
def required(self, fn):
wrapped_fn = self.auth.required(fn)
@functools.wraps(fn)
def decorated(*args, **kwargs):
signature = request.args.get('signature')
expires = int(request.args.get('expires') or 0)
query_id = request.view_args.get('query_id', None)
if signature and query_id and time.time() < expires:
query = data.models.Query.objects.get(pk=query_id)
h = hmac.new(str(query.api_key), msg=request.path, digestmod=hashlib.sha1)
h.update(str(expires))
print h.hexdigest()
if query.api_key and signature == h.hexdigest():
return fn(*args, **kwargs)
# Work around for flask-restful testing only for flask.wrappers.Resource instead of
# werkzeug.wrappers.Response
resp = wrapped_fn(*args, **kwargs)
if isinstance(resp, werkzeug.wrappers.Response):
resp = make_response(resp)
return resp
return decorated
def setup_authentication(app):
openid_auth = GoogleFederated(settings.GOOGLE_APPS_DOMAIN, app)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.secret_key = settings.COOKIE_SECRET
openid_auth.force_auth_on_every_request = True
return HMACAuthentication(openid_auth)
|
Python
| 0.000001
|
@@ -922,45 +922,8 @@
))%0A%0A
- print h.hexdigest()%0A%0A
@@ -1371,16 +1371,17 @@
orated%0A%0A
+%0A
def setu
|
0e2d058efd310c2060df2a86b1de989eca1e2ea5
|
Modify cafetaria scraper
|
server/scraper/resto/cafetaria.py
|
server/scraper/resto/cafetaria.py
|
from backoff import retry_session
from bs4 import BeautifulSoup
from util import parse_money, stderr_print
import json
import sys
from requests.exceptions import ConnectionError, Timeout
HTML_PARSER = 'lxml'
OUTFILE = "resto/2.0/extrafood.json"
BASE_URL = 'https://www.ugent.be/student/nl/meer-dan-studeren/resto/ophetmenu/'
def get_breakfast():
r = retry_session.get(BASE_URL + 'ontbijt.htm')
soup = BeautifulSoup(r.text, HTML_PARSER)
data = []
for row in soup.table.find_all('tr'):
columns = row.find_all('td')
data.append({'name': columns[0].string,
'price': parse_money(columns[1].string)})
return data
def get_drinks():
r = retry_session.get(BASE_URL + 'desserten-drank.htm')
soup = BeautifulSoup(r.text, HTML_PARSER)
data = []
for row in soup.table.find_all('tr'):
columns = row.find_all('td')
data.append({'name': columns[0].string,
'price': parse_money(columns[1].string)})
return data
def get_desserts():
r = retry_session.get(BASE_URL + 'desserten-drank.htm')
soup = BeautifulSoup(r.text, HTML_PARSER)
data = []
for row in soup.find_all('table')[1].find_all('tr'):
columns = row.find_all('td')
data.append({'name': columns[0].string,
'price': parse_money(columns[1].string)})
return data
if __name__ == '__main__':
try:
data = {'breakfast': get_breakfast(), 'drinks': get_drinks(), 'desserts': get_desserts()}
except (ConnectionError, Timeout) as e:
stderr_print("Failed to connect: ", e)
sys.exit(1)
with open(OUTFILE, 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=4, separators=(',', ': '))
|
Python
| 0.000001
|
@@ -1,12 +1,39 @@
+import argparse%0Aimport os%0A%0A
from backoff
@@ -118,32 +118,26 @@
ey,
-stderr_print%0Aimport json
+write_json_to_file
%0Aimp
@@ -202,16 +202,16 @@
imeout%0A%0A
+
HTML_PAR
@@ -227,46 +227,8 @@
ml'%0A
-OUTFILE = %22resto/2.0/extrafood.json%22%0A%0A
BASE
@@ -1359,60 +1359,49 @@
a%0A%0A%0A
-if __name__ == '__main__':%0A try:%0A data = %7B
+def main(output):%0A result = %7B%0A
'bre
@@ -1425,16 +1425,24 @@
kfast(),
+%0A
'drinks
@@ -1457,16 +1457,24 @@
rinks(),
+%0A
'desser
@@ -1496,240 +1496,679 @@
ts()
-%7D%0A except (ConnectionError, Timeout) as e:%0A stderr_print(%22Failed to connect: %22, e)%0A sys.exit(1
+%0A %7D%0A%0A output_file = os.path.join(output, %22extrafood.json%22)%0A write_json_to_file(result, output_file)%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description='Run cafetaria scraper')%0A parser.add_argument('output',%0A help='Path of the folder in which the output must be written. Will be created if needed.')%0A args = parser.parse_args(
)%0A
+%0A
-with open(OUTFILE, 'w') as outfile:%0A json.dump(data, outfile, sort_keys=True, indent=4, separators=(',', ': ')
+output_path = os.path.abspath(args.output) # Like realpath%0A os.makedirs(output_path, exist_ok=True) # Like mkdir -p%0A%0A try:%0A main(output_path)%0A except (ConnectionError, Timeout) as e:%0A print(%22Failed to connect: %22, e, file=sys.stderr)%0A sys.exit(1
)%0A
|
bb35e83b7977e15cd7df9d1f2e810a730dbafa45
|
update happy number
|
leetcode/happy_number.py
|
leetcode/happy_number.py
|
# Created by lujin at 6/3/2017
#
# 202. Happy Number
#
# Description:
#
# Write an algorithm to determine if a number is "happy".
#
# A happy number is a number defined by the following process: Starting with any positive integer,
# replace the number by the sum of the squares of its digits, and repeat the process until the number
# equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1.
# Those numbers for which this process ends in 1 are happy numbers.
#
# Example: 19 is a happy number
#
# 12 + 92 = 82
# 82 + 22 = 68
# 62 + 82 = 100
# 12 + 02 + 02 = 1
#
import math
class Solution(object):
def isHappy(self, n):
"""
找寻happy number,如果最后结果为1,则true,否则会进入循环,而且循环的数字是固定的那么几个...
:type n: int
:rtype: bool
"""
if n <= 0:
return False
loop = []
while n != 1:
num = 0
while n:
num += int(math.pow(n % 10, 2))
n //= 10
n = num
if n in loop:
break
else:
loop.append(num)
return n == 1
def isHappy(self, n):
"""
重复的数字中必定有4,所以可以直接判断,含有4,必定不是happy number
:param n:
:return:
"""
if n <= 0:
return False
while n != 1 and n != 4:
num = 0
while n:
num += int(math.pow(n % 10, 2))
n //= 10
n = num
return n == 1
def test(self):
print(self.isHappy(1))
print(self.isHappy(7))
print(self.isHappy(11))
print(self.isHappy(100))
if __name__ == '__main__':
Solution().test()
|
Python
| 0.000002
|
@@ -1133,32 +1133,33 @@
%0A def isHappy
+2
(self, n):%0A
|
d516377d452d02554e9d0dc08f4cd1ea6e612492
|
Remove duplicate copy() and small clean up in sampler.py
|
cpnest/sampler.py
|
cpnest/sampler.py
|
import sys
import os
import numpy as np
from math import log
from collections import deque
from random import random,randrange
from . import parameter
from . import proposal
class Sampler(object):
"""
Sampler class.
Initialisation arguments:
usermodel:
user defined model to sample
maxmcmc:
maximum number of mcmc steps to be used in the sampler
verbose:
display debug information on screen
default: False
poolsize:
number of objects for the affine invariant sampling
default: 1000
"""
def __init__(self,usermodel,maxmcmc,verbose=False,poolsize=1000):
self.user = usermodel
self.maxmcmc = maxmcmc
self.Nmcmc = maxmcmc
self.Nmcmc_exact = float(maxmcmc)
self.proposals = proposal.DefaultProposalCycle()
self.poolsize = poolsize
self.evolution_points = deque(maxlen=self.poolsize + 1) # +1 for the point to evolve
self.verbose=verbose
self.acceptance=0.0
self.initialised=False
def reset(self):
"""
Initialise the sampler
"""
for n in range(self.poolsize):
while True:
if self.verbose > 2: sys.stderr.write("process {0!s} --> generating pool of {1:d} points for evolution --> {2:.0f} % complete\r".format(os.getpid(), self.poolsize, 100.0*float(n+1)/float(self.poolsize)))
p = self.user.new_point()
p.logP = self.user.log_prior(p)
if np.isfinite(p.logP): break
p.logL=self.user.log_likelihood(p)
self.evolution_points.append(p)
if self.verbose > 2: sys.stderr.write("\n")
self.proposals.set_ensemble(self.evolution_points)
for _ in range(len(self.evolution_points)):
s = self.evolution_points.popleft()
self.acceptance,jumps,s = self.metropolis_hastings(s,-np.inf)
self.evolution_points.append(s)
self.proposals.set_ensemble(self.evolution_points)
self.initialised=True
def estimate_nmcmc(self, safety=5, tau=None):
"""
Estimate autocorrelation length of chain using acceptance fraction
ACL = (2/acc) - 1
multiplied by a safety margin of 5
Uses moving average with decay time tau iterations (default: self.poolsize)
Taken from W. Farr's github.com/farr/Ensemble.jl
"""
if tau is None: tau = self.poolsize
if self.acceptance == 0.0:
self.Nmcmc_exact = (1.0 + 1.0/tau)*self.Nmcmc_exact
else:
self.Nmcmc_exact = (1.0 - 1.0/tau)*self.Nmcmc_exact + (safety/tau)*(2.0/self.acceptance - 1.0)
if np.isfinite(self.Nmcmc_exact):
self.Nmcmc = max(safety,min(self.maxmcmc, int(round(self.Nmcmc_exact))))
else:
self.Nmcmc = max(safety,self.maxmcmc)
return self.Nmcmc
def produce_sample(self, queue, logLmin, seed, ip, port, authkey):
"""
main loop that generates samples and puts them in the queue for the nested sampler object
"""
if not self.initialised:
self.reset()
# Prevent process from zombification if consumer thread exits
queue.cancel_join_thread()
self.seed = seed
np.random.seed(seed=self.seed)
self.counter=0
while(1):
# Pick a random point from the ensemble to start with
# Pop it out the stack to prevent cloning
param = self.evolution_points.popleft()
if logLmin.value==np.inf:
break
acceptance,jumps,outParam = self.metropolis_hastings(param.copy(),logLmin.value)
# If we bailed out then flag point as unusable
if acceptance==0.0:
outParam.logL=-np.inf
# Put sample back in the stack
self.evolution_points.append(outParam.copy())
# Push the sample onto the queue
queue.put((acceptance,jumps,outParam))
# Update the ensemble every now and again
if (self.counter%(self.poolsize/10))==0 or self.acceptance<0.001:
self.proposals.set_ensemble(self.evolution_points)
self.counter += 1
sys.stderr.write("Sampler process {0!s}, exiting\n".format(os.getpid()))
return 0
def metropolis_hastings(self,inParam,logLmin):
"""
metropolis-hastings loop to generate the new live point taking nmcmc steps
"""
accepted = 0
rejected = 0
jumps = 0
oldparam = inParam.copy()
logp_old = self.user.log_prior(oldparam)
while (jumps < self.Nmcmc) or accepted == 0:
newparam = self.proposals.get_sample(oldparam.copy())
newparam.logP = self.user.log_prior(newparam)
if newparam.logP-logp_old + self.proposals.log_J > log(random()):
newparam.logL = self.user.log_likelihood(newparam)
if newparam.logL > logLmin:
oldparam = newparam.copy()
logp_old = newparam.logP
accepted+=1
else:
rejected+=1
else:
rejected+=1
jumps+=1
if jumps==10*self.maxmcmc:
if self.verbose > 2: print('Warning, MCMC chain exceeded {0} iterations!'.format(10*self.maxmcmc))
break
self.acceptance = float(accepted)/float(rejected+accepted)
self.estimate_nmcmc(tau=jumps)
return (float(accepted)/float(rejected+accepted),jumps,oldparam)
|
Python
| 0
|
@@ -3619,23 +3619,16 @@
gs(param
-.copy()
,logLmin
@@ -5508,48 +5508,23 @@
rn (
-float(accepted)/float(rejected+accepted)
+self.acceptance
,jum
|
d1bf3d9853125700e99a0fc1ae705e76a1127035
|
Convert aes key to string. Crypto does not like unicode keys
|
bluebottle/token_auth/auth/booking.py
|
bluebottle/token_auth/auth/booking.py
|
import base64
import hashlib
import hmac
import logging
import re
import urllib
from datetime import timedelta
import string
from datetime import datetime
from django.utils.dateparse import parse_datetime
from django.utils import timezone
from Crypto import Random
from Crypto.Cipher import AES
from bluebottle.token_auth.models import CheckedToken
from bluebottle.token_auth.auth.base import BaseTokenAuthentication
from bluebottle.token_auth.exceptions import TokenAuthenticationError
from bluebottle.token_auth.utils import get_settings
logger = logging.getLogger(__name__)
def _encode_message(message):
"""
Helper method which returns an encoded version of the
message passed as an argument.
It returns a tuple containing a string formed by two elements:
1. A string formed by the initialization vector and the AES-128
encrypted message.
2. The HMAC-SHA1 hash of that string.
"""
aes_key = get_settings()['aes_key']
hmac_key = get_settings()['hmac_key']
pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(
AES.block_size - len(s) % AES.block_size)
init_vector = Random.new().read(AES.block_size)
cipher = AES.new(aes_key, AES.MODE_CBC, init_vector)
padded_message = pad(message)
aes_message = init_vector + cipher.encrypt(padded_message)
hmac_digest = hmac.new(str(hmac_key), str(aes_message), hashlib.sha1)
return aes_message, hmac_digest
def generate_token(email, username, first_name, last_name):
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message = 'time={0}|username={1}|name={2} {3}|' \
'email={4}'.format(timestamp, username, first_name, last_name, email)
aes_message, hmac_digest = _encode_message(message)
token = base64.urlsafe_b64encode(aes_message + hmac_digest.digest())
return token
class TokenAuthentication(BaseTokenAuthentication):
"""
This authentication backend expects a token, encoded in URL-safe Base64, to
be received from the user to be authenticated. The token must be built like
this:
- The first 16 bytes are the AES key to be used to decrypt the message.
- The last 20 bytes are the HMAC-SHA1 signature of the message AND the AES
key, to provide an extra safety layer on the process.
- The rest of the token, between the first 16 bytes and the latest 20, is
the encrypted message to be read.
The backend performs the next operations over a received token in order to
authenticate the user who is sending it:
1. Checks that the token was not used previously, to prevent replay.
2. Decodes it through Base64.
3. Checks the HMAC-SHA1 signature of the message.
4. Decrypts the AES-encoded message to read the data.
5. Read the timestamp included in the message to check if the token already
expired or if its finally valid.
"""
def check_hmac_signature(self, message):
"""
Checks the HMAC-SHA1 signature of the message.
"""
data = message[:-20]
checksum = message[-20:]
hmac_data = hmac.new(str(self.settings['hmac_key']), str(data), hashlib.sha1)
return True if hmac_data.digest() == checksum else False
def get_login_data(self, data):
"""
Obtains the data from the decoded message. Returns a Python tuple
of 4 elements containing the login data. The elements, from zero
to three, are:
0. Timestamp.
1. Username.
2. Complete name.
3. Email.
"""
expression = r'(.*?)\|'
pattern = r'time={0}username={0}name={0}email=(.*)'.format(expression)
login_data = re.search(pattern, data)
return login_data.groups()
def check_timestamp(self, data):
timestamp = datetime.strptime(data['timestamp'], '%Y-%m-%d %H:%M:%S')
time_limit = datetime.now() - \
timedelta(seconds=self.settings['token_expiration'])
if timestamp < time_limit:
raise TokenAuthenticationError('Authentication token expired')
def check_token_used(self):
if not self.args.get('token'):
raise TokenAuthenticationError(value='No token provided')
try:
CheckedToken.objects.get(token=self.args['token'])
raise TokenAuthenticationError(
value='Token was already used and is not valid')
except CheckedToken.DoesNotExist:
# Token was not used previously. Continue with auth process.
pass
def decrypt_message(self):
"""
Decrypts the AES encoded message.
"""
token = str(self.args['token'])
message = base64.urlsafe_b64decode(token)
# Check that the message is valid (HMAC-SHA1 checking).
if not self.check_hmac_signature(message):
raise TokenAuthenticationError('HMAC authentication failed')
init_vector = message[:16]
enc_message = message[16:-20]
aes = AES.new(self.settings['aes_key'], AES.MODE_CBC, init_vector)
message = aes.decrypt(enc_message)
# Get the login data in an easy-to-use tuple.
try:
login_data = self.get_login_data(message)
except AttributeError:
# Regex failed, so data was not valid.
raise TokenAuthenticationError('Message does not contain valid login data')
name = login_data[2].strip()
first_name = name.split(' ').pop(0)
parts = name.split(' ')
parts.pop(0)
last_name = " ".join(parts)
email = login_data[3].strip()
email = filter(lambda x: x in string.printable, email)
data = {
'timestamp': login_data[0],
'remote_id': email,
'email': email,
'first_name': first_name,
'last_name': last_name,
'username': email
}
return data
def get_metadata(self):
metadata = "<sso-url>{0}</sso-url>".format(self.sso_url())
return metadata
def sso_url(self, target_url=None):
url = self.settings['sso_url']
if target_url:
url += '?{}'.format(urllib.urlencode({'url': target_url.encode('utf-8')}))
return url
@property
def target_url(self):
return self.args['link']
def authenticate_request(self):
self.check_token_used()
data = self.decrypt_message()
self.check_timestamp(data)
return data
def finalize(self, user, data):
timestamp = timezone.make_aware(parse_datetime(data['timestamp']))
CheckedToken.objects.create(token=self.args['token'], user=user,
timestamp=timestamp).save()
|
Python
| 0.999999
|
@@ -4991,16 +4991,20 @@
AES.new(
+str(
self.set
@@ -5019,16 +5019,17 @@
es_key'%5D
+)
, AES.MO
|
c91dcf3a55bf12ae8e19b167bfd593cc6dbfc980
|
Fixing whitespace
|
windmill/browser/chrome.py
|
windmill/browser/chrome.py
|
import commands
import tempfile
import logging
import signal
import subprocess
import sys, os
import urlparse
import windmill
logger = logging.getLogger(__name__)
import safari
class Chrome(safari.Safari):
def __init__(self):
self.chrome_binary = windmill.settings['CHROME_BINARY']
self.test_url = windmill.settings['TEST_URL']
# def create_redirect(self):
# self.redirection_page = tempfile.mktemp(suffix='.html')
# f = open(self.redirection_page, 'w')
# test_url = windmill.get_test_url(windmill.settings['TEST_URL'])
# f.write( html_redirection.replace('{replace}', test_url) )
# f.flush() ; f.close()
# def set_proxy_mac(self):
# """Set local Proxy"""
# self.netsetup_binary = windmill.settings['NETWORKSETUP_BINARY']
# interface_name = find_default_interface_name()
# uri = urlparse.urlparse(self.test_url)
# set_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxy',
# '"'+interface_name+'"',
# 'localhost',
# str(windmill.settings['SERVER_HTTP_PORT'])
# ])
# commands.getoutput(set_proxy_command)
# enable_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxystate',
# '"'+interface_name+'"',
# 'on'
# ])
# commands.getoutput(enable_proxy_command)
# self.create_redirect()
# self.interface_name = interface_name
#
# def unset_proxy_mac(self):
# commands.getoutput(' '.join([self.netsetup_binary, '-setwebproxystate', '"'+self.interface_name+'"', 'off']))
def set_proxy_windows(self):
import ie
self.ie_obj = ie.InternetExplorer()
self.ie_obj.set_proxy()
#
# def unset_proxy_windows(self):
# self.ie_obj.unset_proxy()
def start(self):
"""Start Chrome"""
# if sys.platform == 'darwin':
# self.set_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.set_proxy_windows()
kwargs = {'stdout':sys.stdout ,'stderr':sys.stderr, 'stdin':sys.stdin}
self.p_handle = subprocess.Popen([self.chrome_binary, '--homepage', self.test_url], **kwargs)
logger.info([self.chrome_binary, self.redirection_page])
def kill(self, kill_signal=None):
"""Stop Chrome"""
# if sys.platform == 'darwin':
# self.unset_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.unset_proxy_windows()
try:
self.p_handle.kill(group=True)
except:
logger.error('Cannot kill Chrome')
# def stop(self):
# self.kill(signal.SIGTERM)
#
# def is_alive(self):
# if self.p_handle.poll() is None:
# return False
# return True
|
Python
| 0.999999
|
@@ -1922,20 +1922,17 @@
f'%5D))%0A%09%0A
-
+%09
def set_
@@ -1952,20 +1952,17 @@
(self):%0A
-
+%09
impo
@@ -1967,20 +1967,17 @@
port ie%0A
-
+%09
self
@@ -2008,20 +2008,17 @@
lorer()%0A
-
+%09
self
|
06e0499c5a83a15dafc8563579295b9a8b51f79f
|
Remove typing annotations for backwards compatibility
|
cppimport/find.py
|
cppimport/find.py
|
import os
import sys
import cppimport.config
def find_file_in_folders(filename, paths):
for d in paths:
if not os.path.exists(d):
continue
if os.path.isfile(d):
continue
for f in os.listdir(d):
if f == filename:
return os.path.join(d, f)
return None
def find_matching_path_dirs(moduledir):
if moduledir is '':
return sys.path
ds = []
for dir in sys.path:
test_path = os.path.join(dir, moduledir)
if os.path.exists(test_path) and os.path.isdir(test_path):
ds.append(test_path)
return ds
def find_module_cpppath(modulename):
modulepath_without_ext = modulename.replace('.', os.sep)
moduledir = os.path.dirname(modulepath_without_ext + '.throwaway')
matching_dirs = find_matching_path_dirs(moduledir)
matching_dirs = [os.getcwd() if d == '' else d for d in matching_dirs]
for ext in cppimport.config.file_exts:
modulefilename = os.path.basename(modulepath_without_ext + ext)
outfilename = find_file_in_folders(modulefilename, matching_dirs)
if outfilename is not None:
return outfilename
return None
def find_module_path(module_name: str, search_path: str = None) -> str:
"""
Find the module path (pyd / so), while accounting for platform/arch naming
:param module_name: The name of the module
:param search_path: The path to search in. If None, searches system path.
:return: The full path to the library or None if not found.
"""
# Use importlib if python 3.4+, else imp
if sys.version_info[0] > 3 or (sys.version_info[0] == 3 and sys.version_info[1] >= 4):
from importlib.machinery import FileFinder, ExtensionFileLoader, EXTENSION_SUFFIXES
file_finder = FileFinder(search_path, (ExtensionFileLoader, EXTENSION_SUFFIXES))
# The search caches must be cleared to guaranteed find dynamically created modules
file_finder.invalidate_caches()
result = file_finder.find_spec(module_name)
return None if not result else result.origin
else:
from imp import find_module # Deprecated in 3.4
try:
result = find_module(module_name, [search_path])
except ImportError:
result = None
return None if not result else result[1]
|
Python
| 0.000001
|
@@ -1229,21 +1229,16 @@
ule_name
-: str
, search
@@ -1246,28 +1246,14 @@
path
-: str = None) -%3E str
+=None)
:%0A
|
aa059d6f2fd45ec0d7de7bb2cd0477ff3552eb14
|
Handle TLS errors in receivequeuethread
|
src/network/receivequeuethread.py
|
src/network/receivequeuethread.py
|
import Queue
import sys
import threading
import time
import addresses
from bmconfigparser import BMConfigParser
from debug import logger
from helper_threading import StoppableThread
from inventory import Inventory
from network.connectionpool import BMConnectionPool
from network.bmproto import BMProto
from queues import receiveDataQueue
import protocol
import state
class ReceiveQueueThread(threading.Thread, StoppableThread):
def __init__(self, num=0):
threading.Thread.__init__(self, name="ReceiveQueue_%i" %(num))
self.initStop()
self.name = "ReceiveQueue_%i" % (num)
logger.info("init receive queue thread %i", num)
def run(self):
while not self._stopped and state.shutdown == 0:
try:
dest = receiveDataQueue.get(block=True, timeout=1)
except Queue.Empty:
continue
if self._stopped:
break
# cycle as long as there is data
# methods should return False if there isn't enough data, or the connection is to be aborted
# state_* methods should return False if there isn't enough data,
# or the connection is to be aborted
try:
BMConnectionPool().getConnectionByAddr(dest).process()
# KeyError = connection object not found
# AttributeError = state isn't implemented
except (KeyError, AttributeError):
pass
receiveDataQueue.task_done()
|
Python
| 0
|
@@ -1,20 +1,47 @@
import
-Queue
+errno%0Aimport Queue%0Aimport socket
%0Aimport
@@ -1494,16 +1494,273 @@
pass%0A
+ except socket.error as err:%0A if err.errno == errno.EBADF:%0A BMConnectionPool().getConnectionByAddr(dest).set_state(%22close%22, 0)%0A else:%0A logger.error(%22Socket error: %25s%22, str(err))%0A
|
e968983001cced5391a163ab282ef2f2ded492f6
|
Remove link to private document.
|
eliot/__init__.py
|
eliot/__init__.py
|
"""
Eliot: An Opinionated Logging Library
Suppose we turn from outside estimates of a man, to wonder, with keener
interest, what is the report of his own consciousness about his doings or
capacity: with what hindrances he is carrying on his daily labors; what
fading of hopes, or what deeper fixity of self-delusion the years are
marking off within him; and with what spirit he wrestles against universal
pressure, which will one day be too heavy for him, and bring his heart to
its final pause.
-- George Eliot, "Middlemarch"
See http://wiki.hybrid-cluster.com/index.php?title=Logging_Design_Document for
motivation.
"""
# Expose the public API:
from ._message import Message
from ._action import startAction, startTask, Action
from ._output import ILogger, Logger, MemoryLogger
from ._validation import Field, MessageType, ActionType
from ._traceback import writeTraceback, writeFailure
addDestination = Logger._destinations.add
removeDestination = Logger._destinations.remove
__all__ = ["Message", "writeTraceback", "writeFailure",
"startAction", "startTask", "Action",
"Field", "MessageType", "ActionType",
"ILogger", "Logger", "MemoryLogger", "addDestination",
"removeDestination",
]
|
Python
| 0
|
@@ -8,38 +8,31 @@
ot:
-An Opinionated Logging Library
+Logging as Storytelling
%0A%0A
@@ -554,100 +554,8 @@
rch%22
-%0A%0ASee http://wiki.hybrid-cluster.com/index.php?title=Logging_Design_Document for%0Amotivation.
%0A%22%22%22
|
6b2e24700a420c55aca7af214d8a16b79ff90fd4
|
Fix single commit diff.
|
ellen/git/diff.py
|
ellen/git/diff.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pygit2 import GIT_OBJ_COMMIT
from pygit2 import GIT_DIFF_IGNORE_WHITESPACE
from ellen.utils import JagareError
from ellen.utils.git import _resolve_version
from ellen.utils.git import format_diff
def diff_wrapper(repository, *w, **kw):
''' Jagare's diff wrapper '''
try:
kws = {}
ignore_space = kw.get('ignore_space', None)
if ignore_space:
flags = kw.get('flags', 0)
flags |= GIT_DIFF_IGNORE_WHITESPACE
kws.update({'flags': flags})
from_ref = kw.get('from_ref', None)
if from_ref:
kws.update({'from_ref': from_ref})
context_lines = kw.get('context_lines', None)
if context_lines:
kws.update({'context_lines': context_lines})
path = kw.get('path', None)
paths = kw.get('paths', None)
if path:
kws.update({'paths': [path]})
if paths:
kws.update({'paths': paths})
# call diff
d = diff(repository, *w, **kws)
rename_detection = kw.get('rename_detection', None)
if rename_detection:
d['diff'].find_similar()
#d.find_similar()
# return formated diff dict
return format_diff(d)
except JagareError:
return []
def diff(repository, ref, from_ref=None, **kwargs):
"""git diff command, pygit2 wrapper"""
# TODO: add merge_base support
_diff = {}
ref = ref.strip()
sha = _resolve_version(repository, ref)
if not sha:
raise JagareError("%s...%s" % (from_ref, ref))
commit = get_commit_by_sha(repository, sha)
from_commit = None
if from_ref:
from_ref = from_ref.strip()
from_sha = _resolve_version(repository, from_ref)
if not from_sha:
raise JagareError("%s...%s" % (from_ref, ref))
from_commit = get_commit_by_sha(repository, from_sha)
# get pygit2 diff
if from_commit:
diff, _diff['old_sha'] = diff_commits(repository, commit, from_commit,
**kwargs)
else:
diff, _diff['old_sha'] = diff_commit(repository, commit, **kwargs)
_diff['new_sha'] = commit.hex
_diff['diff'] = diff
return _diff
def diff_commits(repository, commit, from_commit=None, **kwargs):
tree = commit.tree
from_tree = from_commit.tree if from_commit else None
# call pygit2 diff
if from_tree:
diff = repository.diff(from_tree, tree, **kwargs)
old_sha = from_commit.hex
else:
diff = tree.diff_to_tree(swap=True, **kwargs)
old_sha = None
return diff, old_sha
def diff_commit(repository, commit, **kwargs):
''' one commit, default diff with parent '''
parents = commit.parents
if len(parents) >= 1:
diff = diff_commits(repository, commit, parents[0], **kwargs)
else:
diff = diff_commits(repository, commit, **kwargs)
return diff
def get_commit_by_sha(repository, sha):
try:
commit = repository[sha]
except (ValueError, KeyError, TypeError):
raise JagareError("Commit '%s' is invalid." % sha)
if commit and commit.type == GIT_OBJ_COMMIT:
return commit
|
Python
| 0.000001
|
@@ -2809,9 +2809,9 @@
ts)
-%3E
+=
= 1:
@@ -2873,32 +2873,232 @@
s%5B0%5D, **kwargs)%0A
+ elif len(parents) == 2:%0A diff = diff_commits(repository, parents%5B0%5D, parents%5B-1%5D, **kwargs)%0A elif len(parents) %3E 2:%0A diff = diff_commits(repository, commit, parents%5B0%5D, **kwargs)%0A
else:%0A
|
bf2f6a43271c5cd082c1d122937d7b84ec8885c7
|
fix sprite __init__
|
cocos/sprite.py
|
cocos/sprite.py
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Action Sprite
Animating a sprite
==================
To execute any action you need to create an action::
move = MoveBy( (50,0), 5 )
In this case, ``move`` is an action that will move the sprite
50 pixels to the right (``x`` coordinate) and 0 pixel in the ``y`` coordinate
in 5 seconds.
And now tell the sprite to execute it::
sprite.do( move )
'''
__docformat__ = 'restructuredtext'
import cocosnode
from batch import *
import pyglet
from pyglet import image
from pyglet.gl import *
__all__ = [ 'Sprite', # Sprite class
]
class Sprite( BatchableNode, pyglet.sprite.Sprite):
'''Sprites are sprites that can execute actions.
Example::
sprite = Sprite('grossini.png')
'''
def __init__( self, image, position=(0,0), rotation=0, scale=1, opacity = 255, color=(255,255,255), anchor = None ):
'''Initialize the sprite
:Parameters:
`image` : string or image
name of the image resource or a pyglet image.
`position` : tuple
position of the anchor. Defaults to (0,0)
`rotation` : float
the rotation (degrees). Defaults to 0.
`scale` : float
the zoom factor. Defaults to 1.
`opacity` : int
the opacity (0=transparent, 255=opaque). Defaults to 255.
`color` : tuple
the color to colorize the child (RGB 3-tuple). Defaults to (255,255,255).
`anchor` : (float, float)
(x,y)-point from where the image will be positions, rotated and scaled in pixels. For example (image.width/2, image.height/2) is the center (default).
'''
if isinstance(image, str):
image = pyglet.resource.image(image)
pyglet.sprite.Sprite.__init__(self, image)
cocosnode.CocosNode.__init__(self)
if anchor is None:
if isinstance(self.image, pyglet.image.Animation):
anchor = (image.frames[0].image.width / 2,
image.frames[0].image.height / 2)
else:
anchor = image.width / 2, image.height / 2
self.image_anchor = anchor
self.anchor = (0, 0)
#: group.
#: XXX what is this?
self.group = None
#: children group.
#: XXX what is this ?
self.children_group = None
#: position of the sprite in (x,y) coordinates
self.position = position
#: rotation degrees of the sprite. Default: 0 degrees
self.rotation = rotation
#: scale of the sprite where 1.0 the default value
self.scale = scale
#: opacity of the sprite where 0 is transparent and 255 is solid
self.opacity = opacity
#: color of the sprite in R,G,B format where 0,0,0 is black and 255,255,255 is white
self.color = color
def contains(self, x, y):
'''Test whether this (untransformed) Sprite contains the pixel coordinates
given.
'''
sx, sy = self.position
ax, ay = self.image_anchor
sx -= ax
sy -= ay
if x < sx or x > sx + self.width: return False
if y < sy or y > sy + self.height: return False
return True
def _set_anchor_x(self, value):
if isinstance(self.image, pyglet.image.Animation):
for img in self.image.frames:
img.image.anchor_x = value
self._texture.anchor_x = value
else:
self.image.anchor_x = value
self._update_position()
def _get_anchor_x(self):
if isinstance(self.image, pyglet.image.Animation):
return self.image.frames[0].image.anchor_x
else:
return self.image.anchor_x
image_anchor_x = property(_get_anchor_x, _set_anchor_x)
def _set_anchor_y(self, value):
if isinstance(self.image, pyglet.image.Animation):
for img in self.image.frames:
img.image.anchor_y = value
self._texture.anchor_y = value
else:
self.image.anchor_y = value
self._update_position()
def _get_anchor_y(self):
if isinstance(self.image, pyglet.image.Animation):
return self.image.frames[0].image.anchor_y
else:
return self.image.anchor_y
image_anchor_y = property(_get_anchor_y, _set_anchor_y)
def _set_anchor(self, value):
self._set_anchor_x(value[0])
self._set_anchor_y(value[1])
def _get_anchor(self):
return (self._get_anchor_x(), self._get_anchor_y())
image_anchor = property(_get_anchor, _set_anchor)
def draw(self):
self._group.set_state()
if self._vertex_list is not None:
self._vertex_list.draw(GL_QUADS)
self._group.unset_state()
Sprite.supported_classes = Sprite
|
Python
| 0.000005
|
@@ -3681,23 +3681,17 @@
-cocosnode.Cocos
+Batchable
Node
|
f1a84b0dc2d14b25b7d6e2e3f2f2d68002600d1a
|
Update routes
|
proptrunk/proptrunk.py
|
proptrunk/proptrunk.py
|
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('templates/index.html'. title='Prop Trunk')
@app.route('/inventory')
def inventory():
return 'Inventory'
@app.route('/checkout')
def checkout():
return 'Checkout'
@app.route('/checkin')
def checkin():
return 'Checkin'
@app.route('/users')
def user():
return 'Users'
@app.route('/reports')
def reports():
return 'Repoert'
@app.errorhandler(404)
def not_found(error):
return render_template('error.html'), 404
if __name__ == '__main__':
app.run()
|
Python
| 0.000001
|
@@ -81,18 +81,35 @@
oute('/'
+, methods=%5B'GET'%5D
)%0A
-
def hell
@@ -214,22 +214,43 @@
ventory'
+, methods=%5B'GET'%5D
)%0Adef
+get_
inventor
@@ -278,16 +278,172 @@
ntory'%0A%0A
+@app.route('/inventory', methods=%5B'POST'%5D)%0Adef post_inventory():%0A # Process post body (JSON)%0A # Insert%0A # return object id%0A return 'Inventory'%0A%0A
@app.rou
|
f434e0b58f2040650f773dff65b667f6703948de
|
Version bump
|
proscatter/__init__.py
|
proscatter/__init__.py
|
__version__ = '0.0.1a'
|
Python
| 0.000001
|
@@ -16,8 +16,7 @@
0.0.
-1a
+2
'%0A
|
cbe84c3220a3da4db36f10d84c63dc08231b3b9f
|
fix indentation
|
src/cirrus/plugins/uploaders/fabric_put.py
|
src/cirrus/plugins/uploaders/fabric_put.py
|
#!/usr/bin/env python
"""
_fabric_put_
Uploader plugin that uses fabric to do a remote put
"""
from fabric.operations import put
from cirrus.fabric_helpers import FabricHelper
from cirrus.logger import get_logger
from cirrus.upload_plugins import Uploader
from cirrus.configuration import get_pypi_auth
LOGGER = get_logger()
class Pypi(Uploader):
PLUGGAGE_OBJECT_NAME = 'fabric'
def upload(self, opts, build_artifact):
"""
upload to pypi via fabric over ssh
"""
pypi_conf = self.package_conf.pypi_config()
pypi_auth = get_pypi_auth()
if opts.pypi_url:
pypi_url = opts.pypi_url
else:
pypi_url = pypi_conf['pypi_url']
if pypi_auth['ssh_username'] is not None:
pypi_user = pypi_auth['ssh_username']
else:
pypi_user = pypi_auth['username']
package_dir = pypi_conf['pypi_upload_path']
LOGGER.info("Uploading {0} to {1}".format(build_artifact, pypi_url))
with FabricHelper(
pypi_url,
pypi_user,
pypi_auth['ssh_key']):
# fabric put the file onto the pypi server
put(build_artifact, package_dir, use_sudo=opts.pypi_sudo)
|
Python
| 0.000358
|
@@ -1025,77 +1025,28 @@
per(
-%0A pypi_url,%0A pypi_user,%0A
+pypi_url, pypi_user,
pyp
@@ -1065,17 +1065,16 @@
key'%5D):%0A
-%0A
|
a4b5ac03d693f438dc77b138f007083ec62cc189
|
set on_delete on FKs to be Django>2 compliant
|
shop/models/defaults/commodity.py
|
shop/models/defaults/commodity.py
|
# -*- coding: utf-8 -*-
"""
In django-SHOP, a Commodity product-model is considered a very basic product without any attributes,
which can be used on a generic CMS page to describe anything.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.models.fields import PlaceholderField
from filer.fields import image
from djangocms_text_ckeditor.fields import HTMLField
from polymorphic.query import PolymorphicQuerySet
from shop.conf import app_settings
from shop.models.product import BaseProduct, BaseProductManager, CMSPageReferenceMixin, AvailableProductMixin
from shop.models.defaults.mapping import ProductPage
from shop.money.fields import MoneyField
class CommodityMixin(AvailableProductMixin):
"""
Common methods used by both default Commodity models.
"""
def get_price(self, request):
return self.unit_price
if settings.USE_I18N:
assert 'parler' in settings.INSTALLED_APPS, "Requires `django-parler`, if configured as multilingual project"
from parler.managers import TranslatableManager, TranslatableQuerySet
from parler.models import TranslatableModelMixin, TranslatedFieldsModel
from parler.fields import TranslatedField
class ProductQuerySet(TranslatableQuerySet, PolymorphicQuerySet):
pass
class ProductManager(BaseProductManager, TranslatableManager):
queryset_class = ProductQuerySet
@python_2_unicode_compatible
class Commodity(CMSPageReferenceMixin, TranslatableModelMixin, CommodityMixin, BaseProduct):
"""
Generic Product Commodity to be used whenever the merchant does not require product specific
attributes and just required a placeholder field to add arbitrary data.
"""
# common product fields
product_code = models.CharField(
_("Product code"),
max_length=255,
unique=True,
)
unit_price = MoneyField(
_("Unit price"),
decimal_places=3,
help_text=_("Net price for this product"),
)
# controlling the catalog
order = models.PositiveIntegerField(
verbose_name=_("Sort by"),
db_index=True,
)
cms_pages = models.ManyToManyField(
'cms.Page',
through=ProductPage,
help_text=_("Choose list view this product shall appear on."),
)
sample_image = image.FilerImageField(
verbose_name=_("Sample Image"),
blank=True,
null=True,
help_text=_("Sample image used in the catalog's list view."),
)
show_breadcrumb = models.BooleanField(
_("Show Breadcrumb"),
default=True,
help_text=_("Shall the detail page show the product's breadcrumb."),
)
placeholder = PlaceholderField("Commodity Details")
quantity = models.PositiveIntegerField(
_("Quantity"),
default=0,
validators=[MinValueValidator(0)],
help_text=_("Available quantity in stock")
)
# translatable fields for the catalog's list- and detail views
product_name = TranslatedField()
slug = TranslatedField()
caption = TranslatedField()
# filter expression used to search for a product item using the Select2 widget
lookup_fields = ['product_code__startswith', 'product_name__icontains']
objects = ProductManager()
class Meta:
app_label = app_settings.APP_LABEL
ordering = ['order']
verbose_name = _("Commodity")
verbose_name_plural = _("Commodities")
def __str__(self):
return self.product_code
class CommodityTranslation(TranslatedFieldsModel):
master = models.ForeignKey(
Commodity,
related_name='translations',
null=True,
)
product_name = models.CharField(
max_length=255,
verbose_name=_("Product Name"),
)
slug = models.SlugField(verbose_name=_("Slug"))
caption = HTMLField(
verbose_name=_("Caption"),
blank=True,
null=True,
help_text=_("Short description for the catalog list view."),
)
class Meta:
app_label = app_settings.APP_LABEL
unique_together = [('language_code', 'master')]
else:
@python_2_unicode_compatible
class Commodity(CMSPageReferenceMixin, CommodityMixin, BaseProduct):
"""
Generic Product Commodity to be used whenever the merchant does not require product specific
attributes and just required a placeholder field to add arbitrary data.
"""
# common product fields
product_name = models.CharField(
max_length=255,
verbose_name=_("Product Name"),
)
product_code = models.CharField(
_("Product code"),
max_length=255,
unique=True,
)
unit_price = MoneyField(
_("Unit price"),
decimal_places=3,
help_text=_("Net price for this product"),
)
# controlling the catalog
order = models.PositiveIntegerField(
verbose_name=_("Sort by"),
db_index=True,
)
cms_pages = models.ManyToManyField(
'cms.Page',
through=ProductPage,
help_text=_("Choose list view this product shall appear on."),
)
sample_image = image.FilerImageField(
verbose_name=_("Sample Image"),
blank=True,
null=True,
help_text=_("Sample image used in the catalog's list view."),
)
show_breadcrumb = models.BooleanField(
_("Show Breadcrumb"),
default=True,
help_text=_("Shall the detail page show the product's breadcrumb."),
)
placeholder = PlaceholderField("Commodity Details")
quantity = models.PositiveIntegerField(
_("Quantity"),
default=0,
validators=[MinValueValidator(0)],
help_text=_("Available quantity in stock")
)
# common fields for the catalog's list- and detail views
slug = models.SlugField(verbose_name=_("Slug"))
caption = HTMLField(
verbose_name=_("Caption"),
blank=True,
null=True,
help_text=_("Short description for the catalog list view."),
)
# filter expression used to search for a product item using the Select2 widget
lookup_fields = ['product_code__startswith', 'product_name__icontains']
objects = BaseProductManager()
class Meta:
app_label = app_settings.APP_LABEL
ordering = ('order',)
verbose_name = _("Commodity")
verbose_name_plural = _("Commodities")
def __str__(self):
return self.product_code
|
Python
| 0.000001
|
@@ -2732,32 +2732,100 @@
null=True,%0A
+ default=None,%0A on_delete=models.SET_DEFAULT,%0A
help
@@ -4165,16 +4165,54 @@
tions',%0A
+ on_delete=models.CASCADE,%0A
@@ -5985,32 +5985,100 @@
null=True,%0A
+ default=None,%0A on_delete=models.SET_DEFAULT,%0A
help
|
e7b27982bbad6461db0e85c14df875815397f353
|
fix test
|
proso_user/api_test.py
|
proso_user/api_test.py
|
from proso.django.test import TestCase
import json
class UserAPITest(TestCase):
def test_anonymous_profile(self):
response = self.client.get('/user/profile/')
self.assertEqual(response.status_code, 404, 'There is no profile for anonymous user.')
def test_signup_and_profile(self):
# signup
response = self.client.post('/user/signup/', json.dumps({
'username': 'new_user',
'email': 'new_user@domain.com',
'password': 'some_password',
'password_check': 'some_password',
}), content_type='application/json')
self.assertEqual(response.status_code, 201, 'The user is successfuly created.')
expected_profile = {
"send_emails": True,
"user": {
"username": "new_user",
"first_name": "",
"last_name": "",
"object_type": "user",
"email": "new_user@domain.com",
"id": 1
},
"object_type": "user_profile",
"id": 1,
"public": False
}
self.assertEqual(
json.loads(response.content)['data'], expected_profile,
'The given profile has been created.'
)
# check profile
response = self.client.get('/user/profile/')
self.assertEqual(response.status_code, 200, 'There is a profile for user logged in.')
self.assertEqual(
json.loads(response.content)['data'], expected_profile,
'The profile matches.'
)
# update profile
response = self.client.post('/user/profile/', json.dumps({
'public': True,
'user': {
'first_name': 'Kvido'
}
}), content_type='application/json')
expected_profile['public'] = True
expected_profile['user']['first_name'] = 'Kvido'
self.assertEqual(response.status_code, 202, 'The profile can be updated.')
self.assertEqual(
json.loads(response.content)['data'], expected_profile,
'The updated profile matches.'
)
def test_signup_without_email(self):
response = self.client.post('/user/signup/', json.dumps({
'username': 'new_user',
'password': 'some_password',
'password_check': 'some_password',
}), content_type='application/json')
self.assertEqual(response.status_code, 400, 'The user without e-mail can not be registered.')
self.assertEqual(json.loads(response.content)['error_type'], 'email_empty')
def test_signup_wrong_password_check(self):
response = self.client.post('/user/signup/', json.dumps({
'username': 'new_user',
'email': 'new_user@domain.com',
'password': 'some_password',
'password_check': 'some_password_wrong',
}), content_type='application/json')
self.assertEqual(response.status_code, 400, 'The user with wrong password check can not be registered.')
self.assertEqual(json.loads(response.content)['error_type'], 'password_not_match')
def test_sesssion(self):
# check session
response = self.client.get('/user/session/')
self.assertEqual(response.status_code, 200, 'There is session available.')
content = json.loads(response.content)['data']
keys = ['display_height', 'display_width', 'http_user_agent', 'location', 'user_id', 'id', 'object_type']
for key in keys:
self.assertTrue(key in content, '"%s" is in the session' % key)
# update session
update = {
'locale': 'cs_CZ',
'display_width': 666,
'display_height': 777,
'time_zone': 'Prague'
}
response = self.client.post('/user/session/', json.dumps(update), content_type='application/json')
self.assertEqual(response.status_code, 202, 'The session can be modified.')
response = self.client.get('/user/session/')
self.assertEqual(response.status_code, 200, 'There is session available.')
content = json.loads(response.content)['data']
for k, v in update.iteritems():
msg = '"%s" is correct after session is updated.' % k
if k == 'time_zone':
self.assertEqual(content[k]['content'], v, msg)
else:
self.assertEqual(content[k], v, msg)
|
Python
| 0.000004
|
@@ -987,16 +987,49 @@
%22id%22: 1
+,%0A %22staff%22: False,
%0A
|
d332f6586a11d9bb122146155bbd5c981dbf8bd4
|
Add error handling for missing parameters for codeModule image
|
endpoint/image.py
|
endpoint/image.py
|
import os
import magic
import falcon
from sqlalchemy.exc import SQLAlchemyError
from .profile import ALLOWED_MIME_TYPES
from db import session
import model
import util
class Image(object):
def on_get(self, req, resp, context, id):
if context == 'profile':
try:
user = session.query(model.User).\
filter(model.User.id == int(id)).\
first()
except SQLAlchemyError:
session.rollback()
raise
if not user or not user.profile_picture:
resp.status = falcon.HTTP_404
return
image = user.profile_picture
elif context == 'codeExecution':
try:
execution = session.query(model.CodeExecution).get(id)
except SQLAlchemyError:
session.rollback()
raise
if not execution:
resp.status = falcon.HTTP_400
return
if not req.get_param('file'):
resp.status = falcon.HTTP_400
return
image = os.path.join(
util.programming.code_execution_dir(execution.user, execution.module),
os.path.basename(req.get_param('file')))
elif context == 'codeModule':
if not req.get_param('file'):
resp.status = falcon.HTTP_400
return
user_id = int(req.get_param('user'))
module_id = int(req.get_param('module'))
filename = os.path.basename(req.get_param('file'))
image = os.path.join(
util.programming.code_execution_dir(user_id, module_id),
filename
)
else:
resp.status = falcon.HTTP_400
return
if not os.path.isfile(image):
resp.status = falcon.HTTP_400
return
resp.content_type = magic.Magic(mime=True).from_file(image)
resp.stream_len = os.path.getsize(image)
resp.stream = open(image, 'rb')
|
Python
| 0
|
@@ -1358,32 +1358,92 @@
et_param('file')
+ or not req.get_param('module') or not req.get_param('user')
:%0A
|
3b053c1f1c490ee9426bf65c5032e24e9f184568
|
Increase delay
|
broken_links/broken_links/settings.py
|
broken_links/broken_links/settings.py
|
# -*- coding: utf-8 -*-
# Scrapy settings for broken_links project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'broken_links'
SPIDER_MODULES = ['broken_links.spiders']
NEWSPIDER_MODULE = 'broken_links.spiders'
# Custom useragent to enable easy server side monitoring
USER_AGENT = "scrapy_link_spider"
# Allow saving non-200 status codes.
HTTPERROR_ALLOW_ALL = True
# Make sure the crawler doesn't cause problems for the servers.
# http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 10.0 # default is 5
AUTOTHROTTLE_MAX_DELAY = 120.0 # default is 60
|
Python
| 0.000002
|
@@ -706,11 +706,9 @@
Y =
-10.
+2
0 #
@@ -750,11 +750,13 @@
= 1
-20.
+0 * 6
0 #
|
67a13cbfd66e1d7b023ff26d6160117680b8883e
|
Remove unneeded semi-colons.
|
src/pysws.py
|
src/pysws.py
|
#!/usr/bin/python2.7
'''
Written by Derek Steinmoeller, 2015.
'''
import numpy as np
from numpy.fft import fft, ifft, fftshift
import matplotlib.pyplot as plt
# Run-specific constants.
LX = 3.0e3
LY = 3.0e3
G = 9.81
F = 0.0
FINAL_TIME = 300.0
NX = 128
NY = 128
CFL = 0.45
CONST_DEPTH = 10.0
LOGGING_INTERVAL = 10
ONLINE_VISUALIZATION = True
# Some module config.
np.set_printoptions(threshold='nan', precision=4)
plt.ioff()
# Build physical grid and grid in spectral space.
x_1d, dx = np.linspace(0, LX, NX, False, True)
y_1d, dy = np.linspace(0, LY, NY, False, True)
x, y = np.meshgrid(x_1d, y_1d)
k_1d = fftshift(np.array(range(0 ,NX))*((2*np.pi)/LX))
l_1d = fftshift(np.array(range(0, NY))*((2*np.pi)/LY))
k, l = np.meshgrid(k_1d, l_1d)
# Declare background depth profile.
background_depth = CONST_DEPTH - 3.0*np.exp(-np.square((x - 0.5*LX)/(LX/5.0)))
# Compute bed slopes.
background_depth_xderiv = np.real(ifft(1.j*k*fft(background_depth, axis=1), axis=1))
background_depth_yderiv = np.real(ifft(1.j*l*fft(background_depth, axis=0), axis=0))
# Declare initial conditions.
eta_initial = 1.e-1*np.exp(-np.square((x - 0.5*LX)/(LX/15.0)) -
np.square((y - 0.5*LY)/(LY/15.0)))
u_initial = np.zeros([NY, NX])
v_initial = np.zeros([NY, NX])
# Initialize all storage needed for SW equations.
NUM_FIELDS = 3
q = np.zeros([NY, NX, NUM_FIELDS])
rhs_q = np.zeros([NY, NX, NUM_FIELDS])
res_q = np.zeros([NY, NX, NUM_FIELDS])
flux_qx = np.zeros([NY, NX, NUM_FIELDS])
flux_qy = np.zeros([NY, NX, NUM_FIELDS])
div_q = np.zeros([NY, NX, NUM_FIELDS])
source = np.zeros([NY, NX, NUM_FIELDS])
# Set initial conditions for SW.
q[:,:,0] = background_depth + eta_initial # h
q[:,:,1] = q[:,:,0]*u_initial # hu
q[:,:,2] = q[:,:,0]*v_initial # hv
# Define time-stepper: LSERK4.
LSERK4_STAGES = range(0,5)
RK4A = [ 0.0,
-567301805773.0/1357537059087.0,
-2404267990393.0/2016746695238.0,
-3550918686646.0/2091501179385.0,
-1275806237668.0/842570457699.0];
RK4B = [ 1432997174477.0/9575080441755.0,
5161836677717.0/13612068292357.0,
1720146321549.0/2090206949498.0,
3134564353537.0/4481467310338.0,
2277821191437.0/14882151754819.0];
time = 0.0
count = 0
while time < FINAL_TIME:
# calculate shallow water time step.
max_wave_speed = np.sqrt(G*np.max(q[:,:,0]))
dt = (np.min(np.array([dx, dy])) / max_wave_speed)*CFL
# Standard logging output
if ((count % LOGGING_INTERVAL) == 0) or count == 1:
print "t: ", time, ", dt: ", dt, " max h: ", np.max(q[:,:,0]), " max hu: ", np.max(q[:,:,1])
if ONLINE_VISUALIZATION == True:
plt.clf()
plt.pcolor(x, y, q[:,:,0] - background_depth)
plt.colorbar()
plt.draw()
plt.show(block=False)
# Loop through Runge-Kutta stages.
for intrk in LSERK4_STAGES:
# compute right hand side of sw equations
# Compute shallow water flux vectors and source term.
# [ hu [ hv
# hu^2 + 0.5*g*h^2 & huv
# huv ] hv^2 + 0.5*g*h^2 ]
flux_qx[:,:,0] = q[:,:,1]
flux_qx[:,:,1] = q[:,:,1]*q[:,:,1]/q[:,:,0] + 0.5*G*q[:,:,0]*q[:,:,0]
flux_qx[:,:,2] = q[:,:,1]*q[:,:,2]/q[:,:,0]
flux_qy[:,:,0] = q[:,:,2]
flux_qy[:,:,1] = q[:,:,1]*q[:,:,2]/q[:,:,0]
flux_qy[:,:,2] = q[:,:,2]*q[:,:,2]/q[:,:,0] + 0.5*G*q[:,:,0]*q[:,:,0]
source[:,:,0] = np.zeros([NY, NX])
source[:,:,1] = G*q[:,:,0]*background_depth_xderiv - F*q[:,:,2]
source[:,:,2] = G*q[:,:,0]*background_depth_yderiv + F*q[:,:,1]
for i in range(0, NUM_FIELDS):
# Compute divergence of flux (axis=1 is x, axis=0 is y).
div_q[:,:,i] = np.real(ifft((1.j*k)*fft(flux_qx[:,:,i], axis=1), axis=1)) + np.real(ifft((1.j*l)*fft(flux_qy[:,:,i], axis=0), axis=0))
# Compute RHS of PDE (LHS contains only dq/dt).
rhs_q = -div_q + source
# Compute Runge-Kutta residual.
res_q = RK4A[intrk]*res_q + dt*rhs_q;
# Update fields.
q += RK4B[intrk]*res_q;
# prepare for next time-step.
time += dt
count += 1
print ("======================================================="
"=======================")
print "Simulation complete."
plt.show()
|
Python
| 0.000138
|
@@ -4259,17 +4259,16 @@
dt*rhs_q
-;
%0A%0A
@@ -4316,17 +4316,16 @@
k%5D*res_q
-;
%0A %0A
|
7ccd320aede6ad1674915cd8367206ee1ea3dff3
|
Remove unused import
|
smoothtest/autotest/TestRunner.py
|
smoothtest/autotest/TestRunner.py
|
# -*- coding: utf-8 -*-
'''
Smoothtest
Copyright (c) 2014 Juju. Inc
Code Licensed under MIT License. See LICENSE file.
'''
import importlib
import unittest
import rel_imp; rel_imp.init()
from .base import ChildBase
from smoothtest.autotest.base import TestResult, TestException
class TestRunner(ChildBase):
'''
Responsabilities
- Import the Test Class
- Run test over all methods or specific methods
- Report any errors
'''
def __init__(self, webdriver=None):
super(TestRunner, self).__init__()
self._set_webdriver(webdriver)
self._already_setup = set()
def _set_webdriver(self, webdriver):
if webdriver:
from ..webunittest.TestCase import TestBase
TestBase.set_webdriver(webdriver)
def test(self, test_paths, argv=[], smoke=False):
'''
:param test_paths: iterable like ['package.module.test_class.test_method', ...]
'''
results = []
if smoke or not test_paths:
self.log.i('Ignoring %r \n (smoke mode or no tests found)'%list(test_paths))
return results
for tpath in test_paths:
class_ = self._import_test(tpath)
if isinstance(class_, TestException):
results.append((tpath, class_))
else:
result = self._run_test(tpath, argv, class_)
results.append((tpath, result))
return results
def io_loop(self, conn, stdin=None, stdout=None, stderr=None):
while True:
self._dispatch_cmds(conn)
def _run_test(self, test_path, argv, class_):
try:
_, _, methstr = self._split_path(test_path)
suite = unittest.TestSuite()
suite.addTest(class_(methstr))
runner = unittest.TextTestRunner()
if (hasattr(class_, 'setUpProcess')
and test_path not in self._already_setup):
class_.setUpProcess(argv)
self._already_setup.add(test_path)
result = runner.run(suite)
return self.to_pickable_result(result)
except Exception as e:
return self.reprex(e)
def _split_path(self, test_path):
return self.split_test_path(test_path, meth=True)
def _import_test(self, test_path):
modstr, clsstr, _ = self._split_path(test_path)
try:
module = importlib.import_module(modstr)
module = reload(module)
class_ = getattr(module, clsstr)
return class_
except Exception as e:
return self.reprex(e)
def smoke_test_module():
from .base import AutotestCmd
test_paths = ['smoothtest.tests.example.Example']
tr = TestRunner()
class DummyIpc(object):
def recv(self):
cmds = [
AutotestCmd('test', (test_paths,), dict(smoke=True)),
]
self.recv = self.recv2
return cmds
def recv2(self):
cmds = [
AutotestCmd(TestRunner._kill_command, (0,), {}),
]
self.recv = lambda : []
return cmds
def send(self, msg):
print 'Sending:', msg
return 1
def close(self):
pass
try:
tr.io_loop(DummyIpc())
except SystemExit as e:
tr.log.i(e)
if __name__ == "__main__":
smoke_test_module()
|
Python
| 0.000001
|
@@ -249,20 +249,8 @@
port
- TestResult,
Tes
|
3e1971b8b61a375db6f379f155437788ed524f81
|
Use new Django's import_string()
|
snorky/backend/django/__init__.py
|
snorky/backend/django/__init__.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import json
from snorky.backend import SnorkyHTTPTransport, SnorkyBackend, SnorkyError
from django.conf import settings
from django.utils.module_loading import import_by_path
from django.db.models.signals import pre_save, post_save, pre_delete, \
post_delete
SNORKY_DATASYNC_SERVICE = getattr(settings, "SNORKY_DATASYNC_SERVICE",
"datasync_backend")
"""The name of the DataSyncService instance registered in Snorky."""
SNORKY_JSON_ENCODER = getattr(settings, 'SNORKY_JSON_ENCODER',
json.JSONEncoder)
SNORKY_JSON_ENCODER = import_by_path(SNORKY_JSON_ENCODER)
"""JSON encoder class used to send messages to Snorky, by default it is
`json.JSONEncoder`."""
class JSONModule(object):
@staticmethod
def dumps(obj, *args, **kwargs):
kwargs['cls'] = SNORKY_JSON_ENCODER
return json.dumps(obj, *args, **kwargs)
@staticmethod
def loads(obj, *args, **kwargs):
return json.loads(obj, *args, **kwargs)
http_transport = SnorkyHTTPTransport(url=settings.SNORKY_BACKEND_URL,
key=settings.SNORKY_API_KEY)
snorky_backend = SnorkyBackend(http_transport, json=JSONModule)
def publish_deltas(deltas):
"""Send deltas to the Snorky server."""
snorky_backend.call(SNORKY_DATASYNC_SERVICE,
"publishDeltas", deltas=deltas)
def authorize_subscription(items):
"""Authorize a subscription with the specified items.
:param items: A list of dictionaries containing the subscription
requests. Each dictionary must specify the fields ``dealer`` and
``query``.
"""
try:
response = snorky_backend.call(SNORKY_DATASYNC_SERVICE,
"authorizeSubscription",
items=items)
return response
except SnorkyError as err:
raise RuntimeError("Error from Snorky server: %s" % err.args[0])
def handle_post_save(sender, instance, created, raw, using, update_fields,
**kwargs):
"""Called after a subscribable model is saved.
If the item was created it publishes an insertion delta with the current
data (which will include also the ``id`` field even if it was not assigned
before saving).
If the item was updated it publishes the update delta stored in the
`_snorky_delta` property..
"""
if created:
publish_deltas([{
"type": "insert",
"model": sender.__name__,
"data": instance.jsonify(),
}])
else:
# Send a saved delta now that it"s been saved
publish_deltas([instance._snorky_delta])
instance._snorky_delta = None
def handle_pre_save(sender, instance, raw, using, update_fields, **kwargs):
"""Called when a subscribable model is about to be saved.
Queries the database to get the old data. If no data is found the item is assumed to be new.
If the item did exist before, stores an update delta in ``_snorky_delta``.
"""
existent_object_query = sender.objects.filter(id=instance.id)
created = (len(existent_object_query) == 0)
if not created:
new_data = instance.jsonify()
old_data = existent_object_query[0].jsonify()
delta = {
"type": "update",
"model": sender.__name__,
"newData": new_data,
"oldData": old_data,
}
# post_save will send this delta
instance._snorky_delta = delta
def handle_pre_delete(sender, instance, using, **kwargs):
"""Called when a subscribable model is about to be deleted.
Fetches the current data of the object from the databases and stores it in
an internal property within the model, ``_snorky_delta``."""
data = sender.objects.get(id=instance.id).jsonify()
delta = {
"type": "delete",
"model": sender.__name__,
"data": data,
}
instance._snorky_delta = delta
def handle_post_delete(sender, instance, using, **kwargs):
"""Called after a subscribable model is removed.
Publishes a deletion delta, using the data stored in ``_snorky_delta``.
"""
publish_deltas([instance._snorky_delta])
instance._snorky_delta = None
def rest_framework_jsonify(self):
"""Serialize a model with the default serializer class of Django REST
Framework."""
from rest_framework.settings import api_settings
from rest_framework.renderers import UnicodeJSONRenderer
default_serializer_base = api_settings.DEFAULT_MODEL_SERIALIZER_CLASS
class DefaultSerializer(default_serializer_base):
class Meta:
model = self.__class__
data = DefaultSerializer(self).data
return data
def subscribable(model_class):
"""Decorator that adds signals to make a Django model class emit change
notifications automatically."""
pre_save.connect(receiver=handle_pre_save, sender=model_class)
post_save.connect(receiver=handle_post_save, sender=model_class)
pre_delete.connect(receiver=handle_pre_delete, sender=model_class)
post_delete.connect(receiver=handle_post_delete, sender=model_class)
# Default jsonify method
if not hasattr(model_class, "jsonify"):
model_class.jsonify = rest_framework_jsonify
return model_class
|
Python
| 0
|
@@ -352,16 +352,25 @@
ettings%0A
+try:%0A
from dja
@@ -416,16 +416,94 @@
by_path%0A
+except ImportError:%0A from django.utils.module_loading import import_string%0A
from dja
|
e2d29e9260528c2ab055d204d0ab547acd8a9560
|
Use correct section name.
|
zipline/pipeline/pipeline.py
|
zipline/pipeline/pipeline.py
|
from zipline.utils.preprocess import expect_types, optional
from .term import Term
from .filters import Filter
from .graph import TermGraph
class Pipeline(object):
"""
A Pipeline object represents a collection of named expressions to be
compiled and executed by a PipelineEngine.
A Pipeline has two important attributes: 'columns', a dictionary of named
`Term` instances, and 'screen', a Filter representing criteria for
including an asset in the results of a Pipeline.
To compute a pipeline in the context of a TradingAlgorithm, users must call
`attach_pipeline` in their `initialize` function to register that the
pipeline should be computed each trading day. The outputs of a pipeline on
a given day can be accessed by calling `pipeline_outputs` in `handle_data`
or `before_trading_start`.
Parameters
----------
columns : dict, optional
Initial columns.
screen : zipline.pipeline.term.Filter, optional
Initial screen.
Methods
-------
add
remove
set_screen
Attributes
----------
columns
screen
"""
__slots__ = ('_columns', '_screen', '__weakref__')
@expect_types(
columns=optional(dict),
screen=optional(Filter),
)
def __init__(self, columns=None, screen=None):
if columns is None:
columns = {}
self._columns = columns
self._screen = screen
@property
def columns(self):
"""
The columns currently applied to this pipeline.
"""
return self._columns
@property
def screen(self):
"""
The screen applied to the rows of this pipeline.
"""
return self._screen
@expect_types(term=Term, name=str)
def add(self, term, name, overwrite=False):
"""
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
self._columns[name] = term
@expect_types(name=str)
def remove(self, name):
"""
Remove a column.
Parameters
----------
name : str
The name of the column to remove.
Raises
------
KeyError
If `name` is not in self.columns.
Returns
-------
removed : zipline.pipeline.term.Term
The removed term.
"""
return self.columns.pop(name)
@expect_types(screen=Filter)
def set_screen(self, screen, overwrite=False):
"""
Set a screen on this Pipeline.
Parameter
---------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
def to_graph(self, screen_name, default_screen):
"""
Compile into a TermGraph.
Parameters
----------
screen_name : str
Name to supply for self.screen.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
"""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[screen_name] = screen
return TermGraph(columns)
|
Python
| 0
|
@@ -3171,16 +3171,17 @@
arameter
+s
%0A
@@ -3182,32 +3182,33 @@
---------
+-
%0A filter
|
e4b4f808e984da8d98f9c6bd9f2816bed4d04a50
|
Move steam splitting outside of exception "else" clause
|
yturl.py
|
yturl.py
|
#!/usr/bin/env python
try:
from urllib.request import urlopen
from urllib.parse import parse_qsl, urlparse
from itertools import chain, zip_longest
except ImportError: # Python 2 fallback
from urllib import urlopen
from urlparse import parse_qsl, urlparse
from itertools import chain, izip_longest as zip_longest
itags = {
# itag v-dimensions v-bitrate a-bitrate a-samplerate v-encoding
"5": (400*240, 0.25, 64, 22.05, "h263"),
"6": (480*270, 0.8, 64, 22.05, "h263"),
"13": (176*144, 0.5, 64, 22.05, "mp4v"),
"17": (176*144, 2, 64, 22.05, "mp4v"),
"18": (640*360, 0.5, 96, 44.1, "h264"),
"22": (1280*720, 2.9, 192, 44.1, "h264"),
"34": (640*360, 0.5, 128, 44.1, "h264"),
"35": (854*480, 1, 128, 44.1, "h264"),
"36": (320*240, 0.17, 38, 44.1, "mp4v"),
"37": (1920*1080, 2.9, 192, 44.1, "h264"),
"38": (4096*3072, 5, 192, 44.1, "h264"),
"43": (640*360, 0.5, 128, 44.1, "vp8"),
"44": (854*480, 1, 128, 44.1, "vp8"),
"45": (1280*720, 2, 192, 44.1, "vp8"),
"46": (1920*1080, 2, 192, 44.1, "vp8"),
}
itags_by_quality = sorted(itags, reverse=True, key=lambda itag: itags[itag])
def video_id_from_url(url):
"""
Parse a video ID from a YouTube URL.
:param url: a YouTube URL or string containing a video ID
:returns: the video ID contained in the URL or string
"""
parsed_url = urlparse(url)
url_params = dict(parse_qsl(parsed_url.query))
return url_params.get("v", parsed_url.path.split("/")[-1])
def itags_by_similarity(desired_itag):
"""
Return itags ordered by the similarity to the desired one. Similarity is
determined by seeking outwards from the index of the desired itag in the
sorted list of known itags.
:param desired_itag: the itag most desired
:returns: itags in order of similarity to the desired one
"""
desired_index = itags_by_quality.index(desired_itag)
pairs_by_distance = zip_longest(
itags_by_quality[desired_index::-1],
itags_by_quality[desired_index+1:],
)
return chain(*pairs_by_distance)
def itags_for_video(video_id):
"""
Return the available itags for a video with their associated URLs.
:param video_id: the video ID to get itags for
:returns: tuples of itags and their media URL
"""
url = "http://youtube.com/get_video_info?hl=en&video_id=" + video_id
res_handle = urlopen(url)
res_data = dict(parse_qsl(res_handle.read().decode("utf8")))
try:
streams_raw = res_data["url_encoded_fmt_stream_map"]
except KeyError:
raise LookupError(res_data["reason"])
else:
streams = streams_raw.split(",")
for stream in streams:
video = dict(parse_qsl(stream))
yield video["itag"], video["url"]
def itag_from_quality(group):
"""
Return the itag representing a quality group name, or if the quality is a
known itag, return that itag.
:param name: the name of the quality group to be parsed
:returns: the associated itag, or None if the group is unknown
"""
groups = {
"low": -1,
"medium": len(itags_by_quality) // 2,
"high": 0,
}
try:
return itags_by_quality[groups[group]]
except KeyError:
if group in itags_by_quality:
return group
def most_similar_available_itag(itags_by_similarity, itags_for_video):
"""
Return the most similar available itag to the desired itag. See
itags_by_similarity for information about how "similarity" is determined.
:param itags_by_similarity: a list of itags, from the most desired to least
desired
:param itags_for_video: the itags available for this video
:returns: the most similar available itag
"""
for itag in itags_by_similarity:
if itag in itags_for_video:
return itag
|
Python
| 0.000003
|
@@ -2961,22 +2961,9 @@
%22%5D)%0A
- else:%0A
+%0A
@@ -2995,20 +2995,16 @@
it(%22,%22)%0A
-
for
@@ -3022,20 +3022,16 @@
treams:%0A
-
@@ -3062,20 +3062,16 @@
tream))%0A
-
|
c906f44d905fd318d6ce018a6159212e82483513
|
Use the ssh_connection section for scp_if_ssh
|
lib/ansible/constants.py
|
lib/ansible/constants.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import pwd
import sys
import ConfigParser
def get_config(p, section, key, env_var, default):
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key)
except:
return default
return default
def load_config_file():
p = ConfigParser.ConfigParser()
path1 = os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/.ansible.cfg"))
path2 = os.getcwd() + "/ansible.cfg"
path3 = "/etc/ansible/ansible.cfg"
if os.path.exists(path1):
p.read(path1)
elif os.path.exists(path2):
p.read(path2)
elif os.path.exists(path3):
p.read(path3)
else:
return None
return p
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE '''
if path:
path = os.path.expanduser(path)
return path
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# Needed so the RPM can call setup.py and have modules land in the
# correct location. See #1277 for discussion
if getattr(sys, "real_prefix", None):
DIST_MODULE_PATH = os.path.join(sys.prefix, 'share/ansible/')
else:
DIST_MODULE_PATH = '/usr/share/ansible/'
# sections in config file
DEFAULTS='defaults'
# configurable things
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts'))
DEFAULT_MODULE_PATH = shell_expand_path(get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', DIST_MODULE_PATH))
DEFAULT_REMOTE_TMP = shell_expand_path(get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp'))
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False)
DEFAULT_REMOTE_PORT = int(get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', 22))
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'paramiko')
DEFAULT_SCP_IF_SSH = get_config(p, DEFAULTS, 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_ACTION_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'action_plugins', None, '/usr/share/ansible_plugins/action_plugins'))
DEFAULT_CALLBACK_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'callback_plugins', None, '/usr/share/ansible_plugins/callback_plugins'))
DEFAULT_CONNECTION_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'connection_plugins', None, '/usr/share/ansible_plugins/connection_plugins'))
DEFAULT_LOOKUP_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'lookup_plugins', None, '/usr/share/ansible_plugins/lookup_plugins'))
DEFAULT_VARS_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'vars_plugins', None, '/usr/share/ansible_plugins/vars_plugins'))
DEFAULT_FILTER_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'filter_plugins', None, '/usr/share/ansible_plugins/filter_plugins'))
# non-configurable things
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
ZEROMQ_PORT = int(get_config(p, 'fireball', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099))
|
Python
| 0.000007
|
@@ -4017,32 +4017,40 @@
t_config(p,
-DEFAULTS
+'ssh_connection'
, 'scp_if_ss
|
51771a5a0365c4f70df77ecd5d76ed51b2be1fe2
|
add some more messages for tests
|
csp/test/basic.py
|
csp/test/basic.py
|
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from csp.test_helpers import async
from csp import Channel, put, take, go, sleep, stop
from csp import put_then_callback, take_then_callback
class Putting(TestCase):
@async
def test_immediate_taken(self):
ch = Channel()
def taking():
yield take(ch)
go(taking())
self.assertEqual((yield put(ch, 42)), True)
@async
def test_immediate_buffered(self):
ch = Channel(1)
self.assertEqual((yield put(ch, 42)), True)
@async
def test_immediate_closed(self):
ch = Channel()
ch.close()
self.assertEqual((yield put(ch, 42)), False)
@async
def test_parked_taken(self):
ch = Channel()
def taking():
yield sleep(0.005)
yield take(ch)
go(taking())
self.assertEqual((yield put(ch, 42)), True)
@async
def test_parked_closed(self):
ch = Channel()
def closing():
yield sleep(0.005)
ch.close()
go(closing())
self.assertEqual((yield put(ch, 42)), False)
def test_parked_buffered(self):
d = Deferred()
ch = Channel(1)
var = {"count": 0}
def inc(ok):
var["count"] += 1
put_then_callback(ch, 42, inc)
put_then_callback(ch, 42, inc)
def taken(value):
def checking():
yield None
self.assertEqual(var["count"], 2)
d.callback(None)
go(checking())
take_then_callback(ch, taken)
return d
class Goroutine(TestCase):
@async
def test_yielding_normal_value(self):
values = [42, [42], (42,), {"x": 42}, None, True, False, lambda: None]
for value in values:
self.assertEqual((yield value), value)
@async
def test_returning_value(self):
def ident(x):
yield stop(x)
ch = go(ident(42), chan=True)
self.assertEqual((yield take(ch)), 42, "returned value is delivered")
self.assertEqual(ch.is_closed(), True, "output channel is closed")
|
Python
| 0
|
@@ -1524,16 +1524,50 @@
unt%22%5D, 2
+, %22second (buffered) put succeeds%22
)%0A
@@ -1914,16 +1914,59 @@
), value
+, %22yielded value is bounced back untouched%22
)%0A%0A @
|
6f6bfa4601ecbba57b7dbce32f337141fb4c9add
|
long_description
|
src/setup.py
|
src/setup.py
|
# Copyright (c) 2015, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from distutils.core import setup
import dws
setup(name='drop',
version=dws.__version__,
author='The DjaoDjin Team',
author_email='support@djaodjin.com',
packages=['dws'],
url='https://github.com/djaodjin/drop/',
download_url='https://github.com/djaodjin/drop/tarball/%s' \
% dws.__version__,
license='BSD',
description='DjaoDjin workspace management',
long_description=open('README.md').read(),
)
|
Python
| 0.999367
|
@@ -1758,16 +1758,19 @@
n=open('
+../
README.m
|
27d5f63aa4b460aca54eb0b3f12c86e1733b1cf4
|
version bump to 0.0.2
|
src/setup.py
|
src/setup.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = 'service-deployment-tools',
version = '0.0.1',
provides = ["service_deployment_tools"],
author = 'Kyle Anderson',
author_email = 'kwa@yelp.com',
description = 'Tools for Yelps SOA infrastructure',
packages = find_packages(exclude=["tests"]),
setup_requires = ['setuptools'],
include_package_data=True,
install_requires = [
'isodate',
'service-configuration-lib >= 0.5.0',
'marathon',
'argparse'
],
scripts = [
'service_deployment_tools/setup_marathon_job.py',
'service_deployment_tools/setup_chronos_jobs.py',
]
)
|
Python
| 0.000001
|
@@ -168,9 +168,9 @@
0.0.
-1
+2
',%0A
|
794af1c9c3368af21eab23dcc39818edae53e022
|
Simplify the trajectory in MacSmoother.
|
src/prpy/planning/mac_smoother.py
|
src/prpy/planning/mac_smoother.py
|
#!/usr/bin/env python
# Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: Michael Koval <mkoval@cs.cmu.edu>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..util import CopyTrajectory
from base import BasePlanner, PlanningError, PlanningMethod, UnsupportedPlanningError
from openravepy import Planner, PlannerStatus, RaveCreatePlanner, openrave_exception
class MacSmoother(BasePlanner):
def __init__(self):
super(MacSmoother, self).__init__()
self.blender = RaveCreatePlanner(self.env, 'PrSplineMacBlender')
if self.blender is None:
raise UnsupportedPlanningError(
'Unable to create PrSplineMacBlender planner. Is or_pr_spline'
' installed and in your OPENRAVE_PLUGIN path?'
)
self.retimer = RaveCreatePlanner(self.env, 'PrSplineMacTimer')
if self.retimer is None:
raise UnsupportedPlanningError(
'Unable to create PrSplineMacRetimer planner. Is or_pr_spline'
' installed and in your OPENRAVE_PLUGIN path?'
)
@PlanningMethod
def RetimeTrajectory(self, robot, path):
# Copy the input trajectory into the planning environment. This is
# necessary for two reasons: (1) the input trajectory may be in another
# environment and/or (2) the retimer modifies the trajectory in-place.
output_traj = CopyTrajectory(path, env=self.env)
# Blend the piecewise-linear input trajectory. The blender outputs a
# collision-free path, consisting of piecewise-linear segments and
# quintic blends through waypoints.
try:
params = Planner.PlannerParameters()
self.blender.InitPlan(robot, params)
status = self.blender.PlanPath(output_traj)
if status not in [ PlannerStatus.HasSolution,
PlannerStatus.InterruptedWithSolution ]:
raise PlanningError('Blending trajectory failed.')
except openrave_exception as e:
raise PlanningError('Blending trajectory failed: ' + str(e))
# Find the time-optimal trajectory that follows the blended path
# subject to joint velocity and acceleration limits.
try:
params = Planner.PlannerParameters()
self.retimer.InitPlan(robot, params)
status = self.retimer.PlanPath(output_traj)
if status not in [ PlannerStatus.HasSolution,
PlannerStatus.InterruptedWithSolution ]:
raise PlanningError('Timing trajectory failed.')
except openrave_exception as e:
raise PlanningError('Timing trajectory failed: ' + str(e))
return output_traj
|
Python
| 0.000001
|
@@ -1652,16 +1652,36 @@
ajectory
+, SimplifyTrajectory
%0Afrom ba
@@ -2926,16 +2926,77 @@
elf.env)
+%0A output_traj = SimplifyTrajectory(output_traj, robot)
%0A%0A
|
5bda2697254744dc42420751c1b49219f65be380
|
add head 'animation'
|
src/snake.py
|
src/snake.py
|
#!/usr/bin/python
from lib_common import *
class Snake:
def __init__(self, x=0, y=0, size=0):
self.position = Vec2(x,y)
self.speed = Vec2(0,0)
self.body_parts = list()
self.body_parts.append(self.position)
self.turning_points = list()
self.turning_points.append(self.position)
for i in xrange(1, size+1):
self.body_parts.append(Vec2(x-i, y))
def turn(self, x,y):
self.speed.x = x
self.speed.y = y
def update(self):
for part in self.body_parts:
part.add(self.speed)
def size(self):
return len(self.body_parts)
def toString(self):
msg = "SNAKE size: %d\n" % self.size()
msg += "pos: (%s) $ speed (%s)" %(self.position.toString(), self.speed.toString())
def draw(self):
draw_cur(self.getX(), self.getY(), "$")
for piece in self.body_parts[1:]:
draw_cur(piece.x, piece.y, "=")
def getX(self):
return self.position.x
def getY(self):
return self.position.y
return msg
|
Python
| 0.003916
|
@@ -547,16 +547,427 @@
parts)%0A%0A
+%0A%09def draw(self):%0A%09%09head_symbol = self.get_head_symbol()%0A%09%09draw_cur(self.getX(), self.getY(), head_symbol)%0A%09%09%0A%09%09for piece in self.body_parts%5B1:%5D:%0A%09%09%09draw_cur(piece.x, piece.y, %22=%22)%0A%0A%09def get_head_symbol(self):%0A%09%09speed_x = self.getSpeedX()%0A%09%09speed_y = self.getSpeedY()%0A%09%09%0A%09%09if speed_x %3E 0:%0A%09%09%09return %22%3E%22%0A%0A%09%09elif speed_x %3C 0:%0A%09%09%09return %22%3C%22%0A%0A%09%09elif speed_y %3E 0:%0A%09%09%09return %22%5E%22%0A%0A%09%09elif speed_y %3C 0:%0A%09%09%09return %22V%22%0A%0A%0A%0A
%09def toS
@@ -1106,26 +1106,40 @@
ing())%0A%0A
+%09%09return msg%0A%0A
%0A%09def
-draw
+getX
(self):%0A
@@ -1144,124 +1144,74 @@
:%0A%09%09
-draw_cur(self.getX(), self.getY(), %22$%22)%0A%09%09%0A%09%09for piece in self.body_parts%5B1:%5D:%0A%09%09%09draw_cur(piece.x, piece.y, %22=%22)%0A%09%09
+return self.position.x%0A%0A%0A%09def getY(self):%0A%09%09return self.position.y
%0A%0A%09d
@@ -1208,32 +1208,37 @@
tion.y%0A%0A%09def get
+Speed
X(self):%0A%09%09retur
@@ -1244,37 +1244,33 @@
rn self.
-position
+speed
.x%0A%0A
-%0A
%09def get
Y(self):
@@ -1253,32 +1253,37 @@
peed.x%0A%0A%09def get
+Speed
Y(self):%0A%09%09retur
@@ -1293,29 +1293,16 @@
elf.
-position.y%0A%0A%09%09return msg
+speed.y%0A%0A%09%09
%0A
|
075877c728e2db439b53a894fc66dff7d35922ba
|
Add search in landing
|
cultures/views.py
|
cultures/views.py
|
from django.shortcuts import get_object_or_404
from django.views.generic import(
ListView,
DetailView,
TemplateView
)
from .models import (
Culture,
God,
Temple,
Museum,
CultureHasPeriod,
)
class LandingView(ListView):
template_name = 'prueba.html'
model = Culture
class AdventureView(DetailView):
context_object_name = 'culture'
template_name = 'landing-maya.html'
model = Culture
def get_object(self):
culture = get_object_or_404(Culture, name__icontains=self.kwargs['name']) # NOQA
return culture
def get_context_data(self, **kwargs):
context = super(AdventureView, self).get_context_data(**kwargs)
context['gods'] = God.objects.filter(culture=self.get_object().pk)
context['temples'] = Temple.objects.filter(culture=self.get_object().pk) # NOQA
context['museums'] = Museum.objects.filter(cultures=self.get_object().pk) # NOQA
context['periods'] = CultureHasPeriod.objects.filter(culture=self.get_object().pk).order_by('pk') # NOQA
return context
|
Python
| 0.000001
|
@@ -41,16 +41,48 @@
or_404%0A%0A
+from django.db.models import Q%0A%0A
from dja
@@ -335,16 +335,588 @@
ulture%0A%0A
+ def get_queryset(self):%0A queryset = self.queryset%0A q = self.request.GET.get('q', None)%0A if q is not None:%0A queryset = Culture.objects.filter(%0A Q(name__icontains=q) %7C Q(summary__icontains=q) %7C%0A Q(religion__name__icontains=q) %7C Q(religion__description__icontains=q) %7C # NOQA%0A Q(region__name__icontains=q) %7C Q(region__country__name__icontains=q) %7C # NOQA%0A Q(region__description__icontains=q)%0A )%0A return queryset%0A else:%0A return queryset%0A%0A
%0Aclass A
|
c5a30d1a3c15c5c35c81c0e561dc2b700d309e13
|
Make objgraph optional
|
src/mcedit2/util/objgraphwidget.py
|
src/mcedit2/util/objgraphwidget.py
|
"""
objgraphwidget
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import contextlib
import inspect
import os
import tempfile
from PySide import QtGui
import logging
from PySide.QtCore import Qt
from PySide.QtGui import QWidget
import gc
from mcedit2.rendering import rendergraph
from mcedit2.widgets.layout import Column, Row
log = logging.getLogger(__name__)
import objgraph
class ObjGraphWidget(QWidget):
def __init__(self, *a, **kw):
super(ObjGraphWidget, self).__init__(*a, **kw)
self.inputWidget = QtGui.QLineEdit()
self.listWidget = QtGui.QListWidget()
self.scrollArea = QtGui.QScrollArea()
self.imageView = QtGui.QLabel()
#self.scrollArea.setMinimumSize(300, 300)
self.scrollArea.setWidget(self.imageView)
for name, count in objgraph.most_common_types(100):
item = QtGui.QListWidgetItem()
item.setText("%s (%d)" % (name, count))
item.setData(Qt.UserRole, name)
self.listWidget.addItem(item)
self.listWidget.itemSelectionChanged.connect(self.itemChanged)
refsButton = QtGui.QPushButton("Refs", clicked=self.showRefs)
backrefsButton = QtGui.QPushButton("Backrefs", clicked=self.showBackrefs)
graphButton = QtGui.QPushButton("Graph", clicked=self.showGraph)
garbageButton = QtGui.QPushButton("Garbage", clicked=self.showGarbage)
inputRow = Row(self.inputWidget, refsButton, backrefsButton, garbageButton, graphButton)
self.widthLimitBox = QtGui.QSpinBox(value=14)
self.depthLimitBox = QtGui.QSpinBox(value=7)
limitRow = Row(QtGui.QLabel("Graph Width"), self.widthLimitBox, QtGui.QLabel("Graph Depth"), self.depthLimitBox)
self.setLayout(Column(inputRow, limitRow, self.listWidget, (self.scrollArea, 1)))
self.setMinimumSize(800, 600)
def itemChanged(self):
items = self.listWidget.selectedItems()
if len(items) == 0:
return
objType = items[0].data(Qt.UserRole)
self.inputWidget.setText(objType)
self.showBackrefs()
@contextlib.contextmanager
def showTempImage(self):
fn = tempfile.mktemp('chain.png')
#fn = "graph.png"
yield fn
image = QtGui.QImage(fn)
self.imageView.setPixmap(QtGui.QPixmap(image))
self.imageView.setFixedSize(image.size())
os.unlink(fn)
def showGarbage(self):
with self.showTempImage() as fn:
objgraph.show_refs(gc.garbage, filename=fn)
def showRefs(self):
objType = str(self.inputWidget.text())
with self.showTempImage() as fn:
objgraph.show_refs(objgraph.by_type(objType), filename=fn)
def showBackrefs(self):
objType = str(self.inputWidget.text())
with self.showTempImage() as fn:
objgraph.show_chain(objgraph.find_backref_chain(objgraph.by_type(objType)[0],
objgraph.is_proper_module),
filename=fn)
def showGraph(self):
from mcedit2 import editorapp
editorApp = editorapp.MCEditApp.app
objName = str(self.inputWidget.text()) or "editorApp"
obj = eval(objName)
if isinstance(obj, rendergraph.RenderNode):
def edge_func(x):
return x.children
else:
def edge_func(x):
return gc.get_referents(x)
with self.showTempImage() as fn:
objgraph.show_graph(obj, edge_func=edge_func, swap_source_target=True,
extra_ignore=(str, int),
max_depth=self.depthLimitBox.value(),
too_many=self.widthLimitBox.value(), filename=fn, filter=lambda x: not inspect.isclass(x))
"editorApp.sessions[0].editorTab.views[5].worldView.renderGraph"
|
Python
| 0.000017
|
@@ -235,41 +235,8 @@
Qt%0A
-from PySide.QtGui import QWidget%0A
impo
@@ -366,16 +366,25 @@
ame__)%0A%0A
+try:%0A
import o
@@ -390,16 +390,56 @@
objgraph
+%0Aexcept ImportError:%0A objgraph = None
%0A%0Aclass
@@ -453,16 +453,22 @@
hWidget(
+QtGui.
QWidget)
@@ -555,24 +555,250 @@
(*a, **kw)%0A%0A
+ if objgraph is None:%0A self.setLayout(Row(QtGui.QLabel(%22objgraph is not installed (andyou probably don't have GraphViz %22%0A %22either...) %22), None))%0A return%0A%0A
self
|
1d59da17f9b938805a37adcf5eeb39ef2545a9f4
|
Store issue label with activity entry
|
src/sentry/plugins/bases/issue.py
|
src/sentry/plugins/bases/issue.py
|
"""
sentry.plugins.bases.issue
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django import forms
from django.conf import settings
from django.utils.html import escape
from django.utils.safestring import mark_safe
from social_auth.models import UserSocialAuth
from sentry.models import GroupMeta, Activity
from sentry.plugins import Plugin
from sentry.utils.auth import get_auth_providers
from sentry.utils.http import absolute_uri
from sentry.utils.safe import safe_execute
class NewIssueForm(forms.Form):
title = forms.CharField(max_length=200, widget=forms.TextInput(attrs={'class': 'span9'}))
description = forms.CharField(widget=forms.Textarea(attrs={'class': 'span9'}))
class IssueTrackingPlugin(Plugin):
# project_conf_form = BaseIssueOptionsForm
new_issue_form = NewIssueForm
create_issue_template = 'sentry/plugins/bases/issue/create_issue.html'
not_configured_template = 'sentry/plugins/bases/issue/not_configured.html'
needs_auth_template = 'sentry/plugins/bases/issue/needs_auth.html'
auth_provider = None
def _get_group_body(self, request, group, event, **kwargs):
result = []
for interface in event.interfaces.itervalues():
output = safe_execute(interface.to_string, event)
if output:
result.append(output)
return '\n\n'.join(result)
def _get_group_description(self, request, group, event):
output = [
absolute_uri(group.get_absolute_url()),
]
body = self._get_group_body(request, group, event)
if body:
output.extend([
'',
'```',
body,
'```',
])
return '\n'.join(output)
def _get_group_title(self, request, group, event):
return event.error()
def is_configured(self, request, project, **kwargs):
raise NotImplementedError
def get_auth_for_user(self, user, **kwargs):
"""
Return a ``UserSocialAuth`` object for the given user based on this plugins ``auth_provider``.
"""
assert self.auth_provider, 'There is no auth provider configured for this plugin.'
if not user.is_authenticated():
return None
try:
return UserSocialAuth.objects.filter(user=user, provider=self.auth_provider)[0]
except IndexError:
return None
def needs_auth(self, request, project, **kwargs):
"""
Return ``True`` if the authenticated user needs to associate an auth service before
performing actions with this plugin.
"""
if self.auth_provider is None:
return False
if not request.user.is_authenticated():
return True
return bool(not UserSocialAuth.objects.filter(user=request.user, provider=self.auth_provider).exists())
def get_new_issue_title(self, **kwargs):
"""
Return a string for the "Create new issue" action label.
"""
return 'Create %s Issue' % self.get_title()
def get_new_issue_form(self, request, group, event, **kwargs):
"""
Return a Form for the "Create new issue" page.
"""
return self.new_issue_form(request.POST or None, initial=self.get_initial_form_data(request, group, event))
def get_issue_url(self, group, issue_id, **kwargs):
"""
Given an issue_id (string) return an absolute URL to the issue's details
page.
"""
raise NotImplementedError
def get_issue_label(self, group, issue_id, **kwargs):
"""
Given an issue_id (string) return a string representing the issue.
e.g. GitHub represents issues as GH-XXX
"""
return '#%s' % issue_id
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
raise NotImplementedError
def get_initial_form_data(self, request, group, event, **kwargs):
return {
'description': self._get_group_description(request, group, event),
'title': self._get_group_title(request, group, event),
}
def has_auth_configured(self, **kwargs):
if not self.auth_provider:
return True
return self.auth_provider in get_auth_providers()
def view(self, request, group, **kwargs):
has_auth_configured = self.has_auth_configured()
if not (has_auth_configured and self.is_configured(project=group.project, request=request)):
if self.auth_provider:
required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider]
else:
required_auth_settings = None
return self.render(self.not_configured_template, {
'title': self.get_title(),
'project': group.project,
'has_auth_configured': has_auth_configured,
'required_auth_settings': required_auth_settings,
})
if self.needs_auth(project=group.project, request=request):
return self.render(self.needs_auth_template, {
'title': self.get_title(),
'project': group.project,
})
if GroupMeta.objects.get_value(group, '%s:tid' % self.get_conf_key(), None):
return None
prefix = self.get_conf_key()
event = group.get_latest_event()
form = self.get_new_issue_form(request, group, event)
if form.is_valid():
try:
issue_id = self.create_issue(
group=group,
form_data=form.cleaned_data,
request=request,
)
except forms.ValidationError as e:
form.errors['__all__'] = [u'Error creating issue: %s' % e]
if form.is_valid():
GroupMeta.objects.set_value(group, '%s:tid' % prefix, issue_id)
issue_information = {
'title': form.cleaned_data['title'],
'provider': self.get_title(),
'location': self.get_issue_url(group, issue_id),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
return self.redirect(group.get_absolute_url())
context = {
'form': form,
'title': self.get_new_issue_title(),
}
return self.render(self.create_issue_template, context)
def actions(self, request, group, action_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return action_list
prefix = self.get_conf_key()
if not GroupMeta.objects.get_value(group, '%s:tid' % prefix, None):
action_list.append((self.get_new_issue_title(), self.get_url(group)))
return action_list
def tags(self, request, group, tag_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return tag_list
prefix = self.get_conf_key()
issue_id = GroupMeta.objects.get_value(group, '%s:tid' % prefix)
if not issue_id:
return tag_list
tag_list.append(mark_safe('<a href="%s">%s</a>' % (
self.get_issue_url(group=group, issue_id=issue_id),
escape(self.get_issue_label(group=group, issue_id=issue_id)),
)))
return tag_list
def get_issue_doc_html(self, **kwargs):
return ""
IssuePlugin = IssueTrackingPlugin
|
Python
| 0
|
@@ -6324,32 +6324,111 @@
oup, issue_id),%0A
+ 'label': self.get_issue_label(group=group, issue_id=issue_id),%0A
%7D%0A
|
944ba46e135aea3b58dad63550aeba6cb97bf05e
|
Set manual episode limit to FUNimation episode feed
|
src/services/stream/funimation.py
|
src/services/stream/funimation.py
|
# All shows: http://www.funimation.com/feeds/ps/shows?limit=100000
# Single show: http://www.funimation.com/feeds/ps/videos?ut=FunimationSubscriptionUser&show_id=7556914
from logging import debug, info, warning, error
from datetime import datetime
from .. import AbstractServiceHandler
from data.models import Episode
class ServiceHandler(AbstractServiceHandler):
_show_url = "http://funimation.com/shows/{id}"
_episode_feed = "http://funimation.com/feeds/ps/videos?ut=FunimationSubscriptionUser&show_id={id}"
_episode_url = "http://www.funimation.com/shows/{show_slug}/videos/official/{ep_slug}?watch=sub"
def __init__(self):
super().__init__("funimation", "FUNimation", False)
def get_latest_episode(self, stream, **kwargs):
episodes = self._get_feed_episodes(stream.show_id, **kwargs)
if not episodes or len(episodes) == 0:
debug("No episodes found")
return None
# Hope the episodes were parsed in order and iterate down looking for the latest episode
# The show-specific feed was likely used, but not guaranteed
for episode in episodes:
if _is_valid_episode(episode, stream.show_id):
return self._digest_episode(episode, stream)
debug("Episode not found")
return None
def get_stream_link(self, stream):
# Just going to assume it's the correct service
return self._show_url.format(id=stream.show_key)
def _get_feed_episodes(self, show_id, **kwargs):
"""
Always returns a list.
"""
info("Getting episodes for Funimation/{}".format(show_id))
# Send request
url = self._episode_feed.format(id=show_id)
response = self.request(url, json=True, **kwargs)
if response is None:
error("Cannot get latest show for Funimation/{}".format(show_id))
return list()
# Parse RSS feed
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
#print(rss)
return response["videos"]
def _digest_episode(self, feed_episode, stream):
debug("Digesting episode")
# Get data
num = feed_episode["number"]
debug(" num={}".format(num))
name = feed_episode["show_name"]
debug(" name={}".format(name))
link = self._episode_url.format(show_slug=stream.show_key, ep_slug=feed_episode["url"])
debug(" link={}".format(link))
date = datetime.strptime(feed_episode["releaseDate"], "%Y/%m/%d")
debug(" date={}".format(date))
return Episode(num, name, link, date)
def get_seasonal_streams(self, year=None, season=None, **kwargs):
return list()
# Helpers
def _verify_feed(feed):
debug("Verifying feed")
if "videos" not in feed:
debug(" Feed doesn't contain videos")
return False
return True
def _is_valid_episode(feed_episode, show_id):
def get(key, default):
if key in feed_episode:
return feed_episode[key]
return default
# Ignore dubs (HA!)
if get("has_subtitles", "false") != "true" or get("dub_sub", "dub") != "sub":
debug("Is dub, ignoring")
return False
# Sanity check
if get("show_id", "-1") != show_id:
debug("Wrong ID")
return False
return True
|
Python
| 0
|
@@ -162,16 +162,29 @@
=7556914
+&limit=100000
%0A%0Afrom l
@@ -515,24 +515,37 @@
show_id=%7Bid%7D
+&limit=100000
%22%0A%09_episode_
|
446fe02ce407e3d14c1dddd5e58c7b3f54b2ccf9
|
delete functions
|
src/video.py
|
src/video.py
|
#
# video.py
# Created by pira on 2017/07/31.
#
#coding: utf-8
u"""For video processing."""
import numpy as np
import cv2
def showVideo(filename):
pass
def video2image(filename, n=0):
u"""Read mpeg video and divide into jpeg images.
@param filename:video filename
@param n :number of export images (if n=0, this function exports all images in video.)
@return count :number of exported images
"""
count = 1
fnin = filename[:filename.rfind('.')] #拡張子をとったファイル名を取得
cap = cv2.VideoCapture(filename)
if n == 0:
n = int(cap.get(7)) #CV_CAP_PROP_FRAME_COUNT
fps = round(cap.get(5)) #CV_CAP_PROP_FPS
height = int(cap.get(4)) #CV_CAP_PROP_FRAME_HEIGHT
width = int(cap.get(3)) #CV_CAP_PROP_FRAME_WIDTH
print('frame num =', n)
print('fps =', fps)
print('hright =', height)
print('width =', width, '\n')
for i in np.arange(n):
count = i+1
fnout = '%06d' % count
fnout = fnin + fnout + '.jpg'
ret, frame = cap.read()
cv2.imwrite(fnout, frame)
print('Export', count, 'jpeg Images.')
return count
def image2video():
pass
#filename = 'test.mov'
#n = video2image(filename, )
#print(n)
|
Python
| 0.000006
|
@@ -122,40 +122,8 @@
v2%0A%0A
-def showVideo(filename):%0A%09pass%0A%0A
def
@@ -1018,34 +1018,8 @@
nt%0A%0A
-def image2video():%0A%09pass%0A%0A
#fil
|
f43450ac6ecff870693bf3fcb0d638fcf334d492
|
Update template file.
|
src/project_euler/P000_Template.py
|
src/project_euler/P000_Template.py
|
# Description: Write Here
"""
Technique
- Write Here
Note
- Write Here
Instrumentation
- System Details: 8x Intel Core i7-3630QM CPU @ 2.40GHz, 16GB RAM, Ubuntu 14.04
- Input Details: Write Here
- Time for 100 runs: Minimum - NA sec, Average - NA sec, Maximum NA sec
- Write Here
"""
def function_name():
pass
# Main
def main():
"""Main function to test the above implementation. """
result = function_name()
print 'Write Here {0} '.format(result)
# Call Main
main()
|
Python
| 0
|
@@ -20,16 +20,32 @@
e Here%0A%0A
+import logging%0A%0A
%22%22%22%0ATech
@@ -406,16 +406,129 @@
n. %22%22%22%0A%0A
+ # Set logging level from DEBUG, INFO, WARNING. ERROR, CRITICAL%0A logging.basicConfig(level=logging.DEBUG)%0A%0A
resu
|
7c9abfc0e9c0187378c0d2f8be0af87c5dbd145a
|
Update mapplot.py
|
src/pyensae/graphhelper/mapplot.py
|
src/pyensae/graphhelper/mapplot.py
|
"""
@file
@brief Plotting maps.
"""
def plot_map_france(ax=None, scale='50m'):
"""
Creates a map for France using :epkg:`cartopy`.
@param ax existing axes or None to create ones
@param scale scale in (`10m`, `50m`, `110m`)
@return ax
.. plot::
from matplotlib import pyplot as plt
from pyensae.graphhelper import plot_map_france
plot_map_france()
plt.show()
"""
if ax is None: # pragma: no cover
import matplotlib.pyplot as plt # pylint: disable=C0415
import cartopy.crs as ccrs # pylint: disable=C0415
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
# The following line may make the program crash on debian + python3.9.1.
ax.set_extent([-5, 10, 38, 52])
import cartopy.feature as cfeature # pylint: disable=C0415
ax.add_feature(cfeature.OCEAN.with_scale(scale))
ax.add_feature(cfeature.RIVERS.with_scale(scale))
ax.add_feature(cfeature.BORDERS.with_scale(scale), linestyle=':')
return ax
def plot_map_france_polygon(geometry, colors, ax=None, scale='50m'):
"""
Plots polygons into a map for France.
@param geometry series of polygons
@param colors colors
@param scale scale, see @see fn map_france
@param ax existing axes, None to create one
@return ax
.. plot::
from matplotlib import pyplot as plt
import cartopy.crs as ccrs
from pyensae.datasource import load_french_departements
from pyensae.graphhelper import plot_map_france, plot_map_france_polygon
# loads the French departments
df = load_french_departements()
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
ax.set_extent([-5, 10, 38, 52])
N = float(df.shape[0])
plot_map_france_polygon(
ax=ax, geometry=df['geometry'],
colors=[(i/N, i/N, i/N) for i in range(df.shape[0])])
plt.show()
"""
from geopandas.plotting import plot_polygon_collection # pylint: disable=C0415
ax = plot_map_france(scale=scale, ax=ax)
plot_polygon_collection(
ax, geometry, facecolor=colors, values=None, edgecolor='black')
return ax
|
Python
| 0.000001
|
@@ -817,32 +817,34 @@
on3.9.1.%0A
+ #
ax.set_extent(%5B
|
8ae8329b9a143d3dc7d2ca6161ce7c3731997212
|
add l2_product to discretization
|
src/pymor/discretizers/elliptic.py
|
src/pymor/discretizers/elliptic.py
|
# This file is part of the pyMor project (http://www.pymor.org).
# Copyright Holders: Felix Albrecht, Rene Milk, Stephan Rave
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from pymor.analyticalproblems import EllipticProblem
from pymor.domaindiscretizers import discretize_domain_default
from pymor.operators.cg import DiffusionOperatorP1, L2ProductFunctionalP1
from pymor.operators.affine import LinearAffinelyDecomposedOperator
from pymor.operators import add_operators
from pymor.discretizations import StationaryLinearDiscretization
from pymor.grids import TriaGrid, OnedGrid, EmptyBoundaryInfo
from pymor.la import induced_norm
def discretize_elliptic_cg(analytical_problem, diameter=None, domain_discretizer=None,
grid=None, boundary_info=None):
'''Discretize an `EllipticProblem` using finite elements.
Since operators are not assembled during instatiation, calling this function is
cheap if the domain discretization proceeds quickly.
Parameters
----------
analytical_problem
The `EllipticProblem` to discretize.
diameter
If not None, is passed to the domain_discretizer.
domain_discretizer
Discretizer to be used for discretizing the analytical domain. This has
to be function `domain_discretizer(domain_description, diameter=...)`.
If further arguments should be passed to the discretizer, use
functools.partial. If None, `discretize_domain_default` is used.
grid
Instead of using a domain discretizer, the grid can be passed directly.
boundary_info
A `BoundaryInfo` specifying the boundary types of the grid boundary
entities. Must be provided is `grid` is provided.
Returns
-------
discretization
The discretization that has been generated.
data
Dict with the following entries:
grid
The generated grid.
boundary_info
The generated `BoundaryInfo`.
'''
assert isinstance(analytical_problem, EllipticProblem)
assert grid is None or boundary_info is not None
assert boundary_info is None or grid is not None
assert grid is None or domain_discretizer is None
if grid is None:
domain_discretizer = domain_discretizer or discretize_domain_default
if diameter is None:
grid, boundary_info = domain_discretizer(analytical_problem.domain)
else:
grid, boundary_info = domain_discretizer(analytical_problem.domain, diameter=diameter)
assert isinstance(grid, (OnedGrid, TriaGrid))
Operator = DiffusionOperatorP1
Functional = L2ProductFunctionalP1
p = analytical_problem
if p.diffusion_functionals is not None or len(p.diffusion_functions) > 1:
L0 = Operator(grid, boundary_info, diffusion_constant=0, name='diffusion_boundary_part')
Li = tuple(Operator(grid, boundary_info, diffusion_function=df, dirichlet_clear_diag=True,
name='diffusion_{}'.format(i))
for i, df in enumerate(p.diffusion_functions))
if p.diffusion_functionals is None:
L = LinearAffinelyDecomposedOperator(Li, L0, name='diffusion')
L.rename_parameter({'.coefficients': '.diffusion_coefficients'})
else:
L = LinearAffinelyDecomposedOperator(Li, L0, p.diffusion_functionals, name='diffusion')
else:
L = Operator(grid, boundary_info, diffusion_function=p.diffusion_functions[0],
name='diffusion')
F = Functional(grid, p.rhs, boundary_info, dirichlet_data=p.dirichlet_data)
import matplotlib.pyplot as pl
if isinstance(grid, TriaGrid):
def visualize(U):
assert len(U) == 1
pl.tripcolor(grid.centers(2)[:, 0], grid.centers(2)[:, 1], grid.subentities(0, 2), U.data.ravel())
pl.colorbar()
pl.show()
else:
def visualize(U):
assert len(U) == 1
pl.plot(grid.centers(1), U.data.ravel())
pl.show()
pass
discretization = StationaryLinearDiscretization(L, F, visualizer=visualize, name='{}_CG'.format(p.name))
discretization.h1_product = Operator(grid, boundary_info)
discretization.h1_norm = induced_norm(discretization.h1_product)
if hasattr(p, 'parameter_space'):
discretization.parameter_space = p.parameter_space
return discretization, {'grid': grid, 'boundary_info': boundary_info}
|
Python
| 0.000001
|
@@ -454,16 +454,29 @@
tionalP1
+, L2ProductP1
%0Afrom py
@@ -4416,16 +4416,66 @@
product)
+%0A discretization.l2_product = L2ProductP1(grid)
%0A%0A if
|
0d04ec0a0298011740190faf1b110860ef85a668
|
Change JVM defaults to use Temurin JDK distribution (#15009)
|
src/python/pants/jvm/subsystems.py
|
src/python/pants/jvm/subsystems.py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.option.option_types import DictOption, StrListOption, StrOption
from pants.option.subsystem import Subsystem
from pants.util.strutil import softwrap
class JvmSubsystem(Subsystem):
options_scope = "jvm"
help = softwrap(
"""
Options for general JVM functionality.
JDK strings will be passed directly to Coursier's `--jvm` parameter.
Run `cs java --available` to see a list of available JVM versions on your platform.
If the string 'system' is passed, Coursier's `--system-jvm` option will be used
instead, but note that this can lead to inconsistent behavior since the JVM version
will be whatever happens to be found first on the system's PATH.
"""
)
tool_jdk = StrOption(
"--tool-jdk",
default="adopt:1.11",
help=softwrap(
"""
The JDK to use when building and running Pants' internal JVM support code and other
non-compiler tools. See `jvm` help for supported values.
"""
),
advanced=True,
)
jdk = StrOption(
"--jdk",
default="adopt:1.11",
help=softwrap(
"""
The JDK to use.
This string will be passed directly to Coursier's `--jvm` parameter.
Run `cs java --available` to see a list of available JVM versions on your platform.
If the string 'system' is passed, Coursier's `--system-jvm` option will be used
instead, but note that this can lead to inconsistent behavior since the JVM version
will be whatever happens to be found first on the system's PATH.
"""
),
advanced=True,
)
resolves = DictOption(
"--resolves",
default={"jvm-default": "3rdparty/jvm/default.lock"},
# TODO: expand help message
help="A dictionary mapping resolve names to the path of their lockfile.",
)
default_resolve = StrOption(
"--default-resolve",
default="jvm-default",
help=softwrap(
"""
The default value used for the `resolve` and `compatible_resolves` fields.
The name must be defined as a resolve in `[jvm].resolves`.
"""
),
)
debug_args = StrListOption(
"--debug-args",
help=softwrap(
"""
Extra JVM arguments to use when running tests in debug mode.
For example, if you want to attach a remote debugger, use something like
['-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005']
"""
),
)
|
Python
| 0
|
@@ -960,37 +960,39 @@
default=%22
-adopt
+temurin
:1.11%22,%0A
@@ -1302,13 +1302,15 @@
lt=%22
-adopt
+temurin
:1.1
|
1ae42227d9df745420c1a3db11893589d91ba83e
|
Add search and search_auto_paging_iter abstract methods (#873)
|
stripe/api_resources/abstract/searchable_api_resource.py
|
stripe/api_resources/abstract/searchable_api_resource.py
|
from __future__ import absolute_import, division, print_function
from stripe.api_resources.abstract.api_resource import APIResource
class SearchableAPIResource(APIResource):
@classmethod
def _search(
cls,
search_url,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"get",
search_url,
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
|
Python
| 0
|
@@ -572,8 +572,205 @@
)%0A
+%0A @classmethod%0A def search(cls, *args, **kwargs):%0A raise NotImplementedError%0A%0A @classmethod%0A def search_auto_paging_iter(cls, *args, **kwargs):%0A raise NotImplementedError%0A
|
96a567e8e2d7fe351e90d2b5408f9d1a4cd3a499
|
Make sure that tracing is always cleaned up for v8_gc_times page_test
|
telemetry/telemetry/unittest_util/page_test_test_case.py
|
telemetry/telemetry/unittest_util/page_test_test_case.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provide a TestCase base class for PageTest subclasses' unittests."""
import unittest
from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal import story_runner
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from telemetry.page import page_test
from telemetry.page import test_expectations
from telemetry.results import results_options
from telemetry.unittest_util import options_for_unittests
class BasicTestPage(page_module.Page):
def __init__(self, url, page_set, base_dir):
super(BasicTestPage, self).__init__(url, page_set, base_dir)
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class PageTestTestCase(unittest.TestCase):
"""A base class to simplify writing unit tests for PageTest subclasses."""
def CreatePageSetFromFileInUnittestDataDir(self, test_filename):
ps = self.CreateEmptyPageSet()
page = BasicTestPage('file://' + test_filename, ps, base_dir=ps.base_dir)
ps.AddUserStory(page)
return ps
def CreateEmptyPageSet(self):
base_dir = util.GetUnittestDataDir()
ps = page_set_module.PageSet(file_path=base_dir)
return ps
def RunMeasurement(self, measurement, ps,
expectations=test_expectations.TestExpectations(),
options=None):
"""Runs a measurement against a pageset, returning the rows its outputs."""
if options is None:
options = options_for_unittests.GetCopy()
assert options
temp_parser = options.CreateParser()
story_runner.AddCommandLineArgs(temp_parser)
defaults = temp_parser.get_default_values()
for k, v in defaults.__dict__.items():
if hasattr(options, k):
continue
setattr(options, k, v)
measurement.CustomizeBrowserOptions(options.browser_options)
options.output_file = None
options.output_formats = ['none']
options.suppress_gtest_report = True
options.output_trace_tag = None
story_runner.ProcessCommandLineArgs(temp_parser, options)
results = results_options.CreateResults(EmptyMetadataForTest(), options)
story_runner.Run(measurement, ps, expectations, options, results)
return results
def TestTracingCleanedUp(self, measurement_class, options=None):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
start_tracing_called = [False]
stop_tracing_called = [False]
class BuggyMeasurement(measurement_class):
def __init__(self, *args, **kwargs):
measurement_class.__init__(self, *args, **kwargs)
# Inject fake tracing methods to tracing_controller
def TabForPage(self, page, browser):
ActualStartTracing = browser.platform.tracing_controller.Start
def FakeStartTracing(*args, **kwargs):
ActualStartTracing(*args, **kwargs)
start_tracing_called[0] = True
raise exceptions.IntentionalException
browser.StartTracing = FakeStartTracing
ActualStopTracing = browser.platform.tracing_controller.Stop
def FakeStopTracing(*args, **kwargs):
result = ActualStopTracing(*args, **kwargs)
stop_tracing_called[0] = True
return result
browser.platform.tracing_controller.Stop = FakeStopTracing
return measurement_class.TabForPage(self, page, browser)
measurement = BuggyMeasurement()
try:
self.RunMeasurement(measurement, ps, options=options)
except page_test.TestNotSupportedOnPlatformError:
pass
if start_tracing_called[0]:
self.assertTrue(stop_tracing_called[0])
|
Python
| 0.000004
|
@@ -3311,28 +3311,49 @@
browser.
-StartTracing
+platform.tracing_controller.Start
= FakeS
@@ -3863,34 +3863,28 @@
est.
-TestNotSupportedOnPlatform
+MultiTabTestAppCrash
Erro
|
5f7bf4f2e885c2343b89bc180b7aaad634b3a011
|
Remove TestConnectRemote decorator for FreeBSD
|
test/functionalities/connect_remote/TestConnectRemote.py
|
test/functionalities/connect_remote/TestConnectRemote.py
|
"""
Test lldb 'process connect' command.
"""
import os
import unittest2
import lldb
import pexpect
from lldbtest import *
class ConnectRemoteTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureFreeBSD('llvm.org/pr18313')
def test_connect_remote(self):
"""Test "process connect connect:://localhost:12345"."""
# First, we'll start a fake debugserver (a simple echo server).
fakeserver = pexpect.spawn('./EchoServer.py')
# Turn on logging for what the child sends back.
if self.TraceOn():
fakeserver.logfile_read = sys.stdout
# Schedule the fake debugserver to be shutting down during teardown.
def shutdown_fakeserver():
fakeserver.close()
self.addTearDownHook(shutdown_fakeserver)
# Wait until we receive the server ready message before continuing.
fakeserver.expect_exact('Listening on localhost:12345')
# Connect to the fake server....
self.runCmd("process connect -p gdb-remote connect://localhost:12345")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Python
| 0.000001
|
@@ -207,56 +207,8 @@
_)%0A%0A
- @expectedFailureFreeBSD('llvm.org/pr18313')%0A
|
26398ad93df10949861a0611d3c4f10330553f8b
|
Fix crash if no instances returned
|
ssha/menu.py
|
ssha/menu.py
|
import collections
import curses
from curses import panel
from . import ec2
Item = collections.namedtuple('Item', field_names=('label', 'value'))
class Menu(object):
def __init__(self, title, items, stdscreen):
self.window = stdscreen.subwin(0, 0)
self.window.timeout(1000)
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.title = title
self.position = 0
self.items = items
self.offset_x = 2
def addstr(self, y, x, string, attr):
try:
self.window.addstr(y, x, string, attr)
except curses.error:
# Curses will error on the last line even when it works.
# https://stackoverflow.com/questions/7063128/last-character-of-a-window-in-python-curses
if y == self.max_y - 1:
pass
else:
raise
def addline(self, y, string, attr):
"""
Displays a string on the screen. Handles truncation and borders.
"""
if y >= self.max_y:
return
# Display the left blank border.
self.addstr(
y=y,
x=0,
string=' ' * self.offset_x,
attr=curses.A_NORMAL,
)
# Remove trailing spaces so the truncate logic works correctly.
string = string.rstrip()
# Truncate the string if it is too long.
if self.offset_x + len(string) + self.offset_x > self.max_x:
string = string[:self.max_x - self.offset_x - self.offset_x - 2] + '..'
# Add whitespace between the end of the string and the edge of the
# screen. This is required when scrolling, to blank out characters
# from other lines that had been displayed here previously.
string += ' ' * (self.max_x - self.offset_x - len(string) - self.offset_x)
# Display the string.
self.addstr(
y=y,
x=self.offset_x,
string=string,
attr=attr,
)
# Display the right blank border.
self.addstr(
y=y,
x=self.max_x - self.offset_x,
string=' ' * self.offset_x,
attr=curses.A_NORMAL,
)
def navigate(self, n):
self.position += n
if self.position < 0:
self.position = len(self.items) - 1
elif self.position >= len(self.items):
self.position = 0
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
while True:
self.window.refresh()
curses.doupdate()
self.max_y, self.max_x = self.window.getmaxyx()
# Display the menu title.
if self.title:
self.addline(1, self.title)
self.addline(2, '-' * len(self.title))
offset_top = 3
else:
offset_top = 1
offset_bottom = 1
window_height = max(self.max_y - offset_top - offset_bottom - 1, 0)
if self.position < window_height:
window = (0, window_height)
else:
window = (self.position - window_height, self.position)
row = 0
for index, item in enumerate(self.items):
if index < window[0] or index > window[1]:
continue
# Highlight the selected item.
if index == self.position:
mode = curses.A_REVERSE
else:
mode = curses.A_NORMAL
# Display the item.
self.addline(
offset_top + row,
item.label,
mode,
)
row += 1
# Blank bottom lines if screen was resized
for y in range(offset_bottom):
self.addline(
self.max_y - y - 1,
'',
curses.A_NORMAL,
)
# Because window.timeout was called,
# this returns -1 if nothing was pressed.
key = self.window.getch()
if key in [curses.KEY_ENTER, ord('\n')]:
return self.items[self.position].value
elif key in (curses.KEY_UP, ord('k')):
self.navigate(-1)
elif key in (curses.KEY_DOWN, ord('j')):
self.navigate(1)
elif key in (ord('q'), ord('Q')):
raise KeyboardInterrupt
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
def _display(screen, items, title=None):
curses.curs_set(0)
menu = Menu(title, items, screen)
return menu.display()
def choose_config(names, search):
if search:
return search
elif len(names) > 1:
items = [Item(label=name, value=name) for name in names]
return curses.wrapper(_display, items)
elif names:
return names[0]
else:
return None
def _find_each_column_width(table):
columns_size = [0] * len(table[0])
for row in table:
for j, column_element in enumerate(row):
columns_size[j] = max(columns_size[j], len(column_element))
return columns_size
def choose_instance(instances, search):
labels = [ec2.label(inst) for inst in instances]
columns_width = _find_each_column_width(labels)
items = []
for i, inst in enumerate(instances):
formatted_labels = [label.ljust(columns_width[j]) for j, label in enumerate(labels[i])]
items.append(Item(label=' '.join(formatted_labels), value=inst))
if search:
search = search.lower()
items = [item for item in items if search in item.label.lower()]
if len(items) == 1:
return items[0].value
if not items:
return None
return curses.wrapper(_display, items)
|
Python
| 0.000006
|
@@ -5168,24 +5168,61 @@
dth(table):%0A
+ if not table:%0A return %5B%5D%0A%0A
columns_
|
2c967d42f141f744e0d84c552dd66ea8e3d75db1
|
fix end timestep for flows
|
stockflow.py
|
stockflow.py
|
import numpy as np
class simulation:
def __init__(self, tstep):
self.stocks = {}
self.flows = {}
self.tstep = tstep
def __getattr__(self,key):
return self.stocks[key] if key in self.stocks else self.flows[key]['vals']
def validate_key(self,key):
if key in self.stocks or key in self.flows:
raise NameError("Variable " + key + " already defined.")
def init_stocks(self,icdict):
for k,v in icdict.items():
self.stock(k,v)
def stock(self, key, IC):
self.validate_key(key)
self.stocks[key] = np.full((self.tstep,), IC) # init time series of stock
def flow(self, key, f, start=None, end=None):
self.validate_key(key)
self.flows[key] = {'start': start, 'end': end, 'f': f, 'vals': np.zeros((self.tstep,))}
def run(self):
for t in xrange(1,self.tstep):
for stock in self.stocks.itervalues(): # initialize stocks at prior values
stock[t] = stock[t-1]
for flow in self.flows.itervalues(): # calculate flows only once. distribute to stocks.
flow['vals'][t-1] = flow['f'](t-1)
if flow['start'] is not None:
self.stocks[flow['start']][t] -= flow['vals'][t-1]
if flow['end'] is not None:
self.stocks[flow['end']][t] += flow['vals'][t-1]
|
Python
| 0.000002
|
@@ -11,16 +11,35 @@
py as np
+%0Aimport collections
%0A%0Aclass
@@ -113,26 +113,49 @@
elf.flows =
-%7B%7D
+collections.OrderedDict()
%0A self.ts
@@ -1285,28 +1285,167 @@
'%5D%5D%5Bt%5D += flow%5B'vals'%5D%5Bt-1%5D%0A
+%0A for flow in self.flows.itervalues(): # calculate flows at final timestep%0A flow%5B'vals'%5D%5Bself.tstep-1%5D = flow%5B'f'%5D(self.tstep-1)%0A
|
5746233789e87dec900ada1f1d4d1f4f35a50dd0
|
fix hardcode forward slashes for file paths
|
Sound.py
|
Sound.py
|
import sublime, sublime_plugin
from subprocess import check_output, call
import threading, random
class EventSound(sublime_plugin.EventListener):
def __init__(self, *args, **kwargs):
super(EventSound, self).__init__(*args, **kwargs)
if sublime.platform() == "osx":
self.play = self.osx_play
self.random_play = self.osx_random_play
elif sublime.platform() == "linux":
pass # TODO
elif sublime.platform() == "windows":
pass # TODO
def osx_play(self, filename):
threading.Thread(target=lambda: self._osx_play(filename)).start()
def osx_random_play(self, dirname):
threading.Thread(target=lambda: self._osx_random_play(dirname)).start()
def _osx_play(self, filename):
self.on_play_flag = False
call(["afplay", "{0}/Sublime-Sound/sounds/{1}.mp3".format(sublime.packages_path(), filename)])
def _osx_random_play(self, dirname):
self.on_play_flag = False
num_files = sublime.load_settings("Sound.sublime-settings").get("random_sounds")["on_modify"]["num_files"]
call(["afplay", "{0}/Sublime-Sound/random_sounds/{1}/{2}.mp3".format(sublime.packages_path(), dirname, random.randrange(1, num_files))])
def on_new_async(self, view):
# Called when a new buffer is created. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False # TODO: use decorator
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_new"), 100)
def on_clone_async(self, view):
# Called when a view is cloned from an existing one. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_clone"), 100)
def on_load_async(self, view):
# Called when the file is finished loading. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_load"), 100)
def on_close(self, view):
# Called when a view is closed (note, there may still be other views into the same buffer).
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_close"), 100)
def on_pre_save_async(self, view):
# Called after a view has been saved. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_save"), 100)
def on_modified_async(self, view):
# Called after changes have been made to a view. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.random_play("on_modify"), 100)
|
Python
| 0.000002
|
@@ -90,16 +90,20 @@
, random
+, os
%0A%0Aclass
@@ -822,37 +822,67 @@
-call(%5B%22afplay%22, %22%7B0%7D/
+file_path = os.path.join(sublime.packages_path(), %22
Sublime-
@@ -890,66 +890,73 @@
ound
-/
+%22, %22
sounds
-/%7B1%7D.mp3%22.format(sublime.packages_path()
+%22, filename) + %22.mp3%22%0A call(%5B%22afplay%22
, file
-name)
+_path
%5D)%0A%0A
@@ -1157,29 +1157,59 @@
-call(%5B%22afplay%22, %22%7B0%7D/
+file_path = os.path.join(sublime.packages_path(), %22
Subl
@@ -1217,17 +1217,20 @@
me-Sound
-/
+%22, %22
random_s
@@ -1238,52 +1238,9 @@
unds
-/%7B1%7D/%7B2%7D.mp3%22.format(sublime.packages_path()
+%22
, di
@@ -1246,16 +1246,20 @@
irname,
+str(
random.r
@@ -1281,16 +1281,60 @@
_files))
+) + %22.mp3%22%0A call(%5B%22afplay%22, file_path
%5D)%0A%0A
|
4791a4a97200a62c195931475c1ee3d7bb3731f8
|
Add Table to reverb/__init__.py.
|
reverb/__init__.py
|
reverb/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reverb."""
# pylint: disable=g-import-not-at-top
# pylint: disable=g-bad-import-order
from reverb.platform.default import ensure_tf_install
ensure_tf_install.ensure_tf_version()
# Cleanup symbols to avoid polluting namespace.
del ensure_tf_install
# pylint: enable=g-bad-import-order
from reverb import distributions
from reverb import item_selectors as selectors
from reverb import rate_limiters
from reverb.client import Client
from reverb.client import Writer
from reverb.errors import ReverbError
from reverb.errors import TimeoutError
from reverb.replay_sample import ReplaySample
from reverb.replay_sample import SampleInfo
from reverb.server import PriorityTable
from reverb.server import Server
from reverb.tf_client import ReplayDataset
from reverb.tf_client import TFClient
|
Python
| 0.000157
|
@@ -1098,16 +1098,58 @@
rbError%0A
+# TODO(b/157210924): Rename TimeoutError.%0A
from rev
@@ -1178,16 +1178,53 @@
outError
+ # pylint: disable=redefined-builtin
%0A%0Afrom r
@@ -1380,16 +1380,48 @@
t Server
+%0Afrom reverb.server import Table
%0A%0Afrom r
|
f82f41e8c14d665eef6dedc94dfef84054970e9d
|
modify how we define trusted root basedir
|
geni/util/trustedroot.py
|
geni/util/trustedroot.py
|
import os
from gid import *
class TrustedRootList():
def __init__(self, dir="./trusted_roots"):
self.basedir = dir
# create the directory to hold the files
try:
os.makedirs(self.basedir)
# if the path already exists then pass
except OSError, (errno, strerr):
if errno == 17:
pass
def add_gid(self, gid):
fn = os.path.join(self.basedir, gid.get_hrn() + ".gid")
gid.save_to_file(fn)
def get_list(self):
gid_list = []
file_list = os.listdir(self.basedir)
for gid_file in file_list:
fn = os.path.join(self.basedir, gid_file)
if os.path.isfile(fn):
gid = GID(filename = fn)
gid_list.append(gid)
return gid_list
|
Python
| 0
|
@@ -21,16 +21,52 @@
import *
+%0Afrom geni.util.config import Config
%0A%0Aclass
@@ -115,28 +115,149 @@
dir=
-%22./trusted_roots%22):%0A
+None):%0A if not dir:%0A config = Config()%0A self.basedir = config.path + os.sep + 'trusted_roots'%0A else:%0A
@@ -279,17 +279,16 @@
r = dir%0A
-%0A
|
62f40f2121f15421cfcb5aef1cad75bce7208fb1
|
Fix bug with HiddenImageCropWidget
|
image_cropping/widgets.py
|
image_cropping/widgets.py
|
import logging
import inspect
import warnings
from django.db.models import get_model, ObjectDoesNotExist
from django import forms
from django.contrib.admin.widgets import AdminFileWidget, ForeignKeyRawIdWidget
from django.conf import settings
from easy_thumbnails.files import get_thumbnailer
logger = logging.getLogger(__name__)
def thumbnail(image_path):
thumbnailer = get_thumbnailer(image_path)
thumbnail_options = {
'detail': True,
'size': getattr(settings, 'IMAGE_CROPPING_THUMB_SIZE', (300, 300)),
}
thumb = thumbnailer.get_thumbnail(thumbnail_options)
return thumb.url
def get_attrs(image, name):
try:
return {
'class': "crop-thumb",
'data-thumbnail-url': thumbnail(image),
'data-field-name': name,
'data-org-width': image.width,
'data-org-height': image.height,
}
except ValueError:
# can't create thumbnail from image
return {}
class CropWidget(object):
class Media:
js = (
getattr(settings, 'JQUERY_URL',
'https://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js'),
"image_cropping/js/jquery.Jcrop.min.js",
"image_cropping/image_cropping.js",
)
css = {'all': ("image_cropping/css/jquery.Jcrop.min.css",)}
class ImageCropWidget(AdminFileWidget, CropWidget):
def render(self, name, value, attrs=None):
if not attrs:
attrs = {}
if value:
attrs.update(get_attrs(value, name))
return super(AdminFileWidget, self).render(name, value, attrs)
class HiddenImageCropWidget(forms.HiddenInput, CropWidget):
def render(self, name, value, attrs=None):
if not attrs:
attrs = {}
# we need to hide it the whole field by JS because the admin
# doesn't yet support hidden fields:
# https://code.djangoproject.com/ticket/11277
attrs['data-hide-field'] = True
if value:
attrs.update(get_attrs(value, name))
return super(HiddenImageCropWidget, self).render(name, value, attrs)
class CropForeignKeyWidget(ForeignKeyRawIdWidget, CropWidget):
def __init__(self, *args, **kwargs):
self.field_name = kwargs.pop('field_name')
# Django versions 1.4+ need the admin site passed in
if 'admin_site' in inspect.getargspec(ForeignKeyRawIdWidget.__init__)[0]:
# Django 1.4+
if 'admin_site' not in kwargs:
warnings.warn('Please use the ImageCroppingMixin in your ModelAdmin '
'instead of the CropForeignKey.', DeprecationWarning)
from django.contrib.admin.sites import site
kwargs['admin_site'] = site
elif 'admin_site' in kwargs:
# Django < 1.4 and admin_site passed in from ImageCroppingMixin
del kwargs['admin_site']
super(CropForeignKeyWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if value:
app_name = self.rel.to._meta.app_label
model_name = self.rel.to._meta.object_name.lower()
try:
image = getattr(
get_model(app_name, model_name).objects.get(pk=value),
self.field_name,
)
attrs.update(get_attrs(image, name))
except ObjectDoesNotExist:
logger.error("Can't find object: %s.%s with primary key %s "
"for cropping." % (app_name, model_name, value))
except AttributeError:
logger.error("Object %s.%s doesn't have an attribute named '%s'." % (
app_name, model_name, self.field_name))
return super(CropForeignKeyWidget, self).render(name, value, attrs)
|
Python
| 0
|
@@ -103,33 +103,8 @@
ist%0A
-from django import forms%0A
from
@@ -1639,27 +1639,13 @@
get(
-forms.HiddenInput,
+Image
Crop
@@ -1957,75 +1957,8 @@
rue%0A
- if value:%0A attrs.update(get_attrs(value, name))%0A
|
dcc9c2cd5cd66797d4d6f5438bcf74089c667f3c
|
Modify to make category directory
|
imagenet/image_crawler.py
|
imagenet/image_crawler.py
|
import os
import time
import requests
IMAGE_URL_API = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid='
OUTPUT_DIR = "images"
MAX_NUM_IMAGES_PER_CATEGORY = 1
def download_image(url, filename):
try:
r = requests.get(url)
except Exception:
return False
if not r.ok:
return False
with open(filename, 'wb') as fp:
fp.write(r.content)
return True
if __name__ == '__main__':
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# カテゴリ => WordNet IDの辞書を作成
cat2wnid = dict()
wnid2cat = dict()
with open('words.txt', 'r') as fp:
for line in fp.readlines():
line = line.rstrip()
wnid, category = line.split('\t')
cat2wnid[category] = wnid
wnid2cat[wnid] = category
# 画像を収集したいカテゴリのリストを読み込む
# ISVRC2014の1000カテゴリ
# http://image-net.org/challenges/LSVRC/2014/browse-synsets
target_categories = []
with open('class1000.txt', 'r') as fp:
for line in fp.readlines():
line = line.rstrip()
target_categories.append(line)
# カテゴリのリストをWordNet IDのリストに変換
target_wnid = []
for cat in target_categories:
target_wnid.append(cat2wnid[cat])
# 各カテゴリについて画像を収集
for wnid in target_wnid:
print("*** wnid = %s (%s)" % (wnid, wnid2cat[wnid]))
r = requests.get(IMAGE_URL_API + wnid)
if not r.ok:
print("WARNING: cannot get image list: wnid = %s" % wnid)
continue
page = r.text
image_url_list = page.rstrip().split('\r\n')
num_ok = 0
for image_url in image_url_list:
print("%s ... " % image_url, end="")
filename = image_url.split('/')[-1]
ret = download_image(image_url, os.path.join(OUTPUT_DIR, filename))
if ret == True:
print("OK")
num_ok += 1
if num_ok == MAX_NUM_IMAGES_PER_CATEGORY:
break
else:
print("NG")
# 同じドメインが連続する場合もあるので適宜スリープ
time.sleep(3)
|
Python
| 0
|
@@ -15,16 +15,30 @@
rt time%0A
+import random%0A
import r
@@ -45,16 +45,16 @@
equests%0A
-
%0AIMAGE_U
@@ -182,16 +182,19 @@
GORY = 1
+00%0A
%0A%0Adef do
@@ -410,16 +410,133 @@
ntent)%0A%0A
+ # flickr error image%0A if os.path.getsize(filename) == 2051:%0A os.remove(filename)%0A return False%0A%0A
retu
@@ -1428,16 +1428,50 @@
t_wnid:%0A
+ category = wnid2cat%5Bwnid%5D%0A
@@ -1510,25 +1510,20 @@
id,
-wnid2cat%5Bwnid%5D
+category
))%0A
+%0A
@@ -1752,16 +1752,109 @@
('%5Cr%5Cn')
+%0A random.shuffle(image_url_list)%0A%0A os.mkdir(os.path.join(OUTPUT_DIR, category))
%0A%0A
@@ -1950,16 +1950,8 @@
_url
-, end=%22%22
)%0A%0A
@@ -2074,24 +2074,24 @@
filename))%0A%0A
+
@@ -2100,16 +2100,8 @@
ret
- == True
:%0A
|
6168b224249f68170f4c41eaff83e9a9dd379bfb
|
remove add_bindings method
|
kaarmebot/dispatcher.py
|
kaarmebot/dispatcher.py
|
import collections
Message = collections.namedtuple('Message', ['source', 'target', 'contents'])
class MessageDispatcher:
def __init__(self):
self.routing_classes = {}
def add_binding(self, routing_class, predicate, handler):
rc = self.routing_classes.get(routing_class)
if rc:
rc.append((predicate, handler))
else:
self.routing_classes[routing_class] = [(predicate, handler)]
def add_bindings(self, *bindings):
for binding in bindings:
self.add_binding(*binding)
def remove_binding(self, routing_class, predicate, handler):
rc = self.routing_classes.get(routing_class)
if rc:
rc.remove((predicate, handler))
def get_handlers_for_message(self, message):
rc = self.routing_classes.get(message.__class__)
if rc:
for predicate, handler in rc:
if predicate(message):
yield handler
def dispatch(self, message):
handler_generator = self.get_handlers_for_message(message)
return [handler(message) for handler in handler_generator]
|
Python
| 0.000001
|
@@ -444,120 +444,8 @@
)%5D%0A%0A
- def add_bindings(self, *bindings):%0A for binding in bindings:%0A self.add_binding(*binding)%0A%0A
|
51d435f5815b5e85e09a547b0dab5debb940aad5
|
drop damaged data
|
xbaydns/tools/master/idcview.py
|
xbaydns/tools/master/idcview.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
idcview.py
Created by QingFeng on 2008-03-17.
Copyright (c) 2007 xBayDNS Team. All rights reserved.
"""
from decimal import Decimal
from operator import itemgetter
import os, sys, time
from xbaydns.conf import sysconf
def convfiles(files):
agents = []
data = {}
for filename in files:
agent_name = os.path.basename(filename).split('_')[0]
agents.append(agent_name)
file_obj = open(filename, "r")
line = file_obj.readline()
try:
preip, pingtype, latency, datetime = line.split(',')
except Exception, e:
print line
if preip not in data:
data[preip] = {}
latency_sum = Decimal(latency)
record_count = 1
for line in file_obj:
ip, pingtype, latency, datetime = line.split(',')
if preip != ip:
if preip not in data:
data[preip] = {}
data[preip][agent_name] = latency_sum/record_count
preip = ip
latency_sum = Decimal(latency)
record_count = 1
else:
latency_sum += Decimal(latency)
record_count += 1
if preip not in data:
data[preip] = {}
data[preip][agent_name] = latency_sum/record_count
return (agents, data)
def main():
if len(sys.argv) == 1:
logdir = os.path.join(sysconf.xbaydnsdb, 'iplatency')
if os.path.isdir(logdir) == False:
print "No such a directory %s"%logdir
sys.exit(1)
else:
agentfiles = os.listdir(logdir)
agentfiles = map(lambda x:os.path.join(logdir, x), agentfiles)
else:
agentfiles = sys.argv[1:]
if len(agentfiles) == 0:
print "No logs in the directory %s"%sysconf.xbaydnsdb
sys.exit(1)
outputdir = os.path.join(sysconf.xbaydnsdb, 'idcview')
if os.path.isdir(outputdir) == False:
try:
os.mkdir(outputdir)
except OSError, e:
print e.strerror
sys.exit(1)
outputfile = "%s/idcview.%s"%(outputdir, time.strftime("%Y-%m-%d"))
agents, data = convfiles(agentfiles)
outputfile_obj = open(outputfile, "w")
# write header
header = ""
for agent_name in agents:
header += "%s,"%agent_name
outputfile_obj.write("%s\n"%header[:-1])
for ip, latency_agents in data.items():
outputfile_obj.write("%s"%ip)
for agent_name in agents:
if agent_name not in latency_agents:
outputfile_obj.write(",-1")
else:
outputfile_obj.write(",%.2f"%latency_agents[agent_name])
outputfile_obj.write("\n")
outputfile_obj.close()
outputlink = "%s/idcview.current"%outputdir
if os.path.islink(outputlink) == True:
os.remove(outputlink)
try:
os.symlink(outputfile, outputlink)
except OSError, e:
print e.strerror
sys.exit(1)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -792,16 +792,37 @@
le_obj:%0A
+ try:%0A
@@ -867,32 +867,77 @@
line.split(',')%0A
+ except:%0A continue%0A
if p
|
ee51286fe1e0b77853d63ffcc9547a753eb78b0d
|
Reset login_failure var when login OK
|
t411/t411.py
|
t411/t411.py
|
# coding: utf8
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
from couchpotato.core.media.movie.providers.base import MovieProvider
from datetime import datetime
import traceback
class T411(TorrentProvider, MovieProvider):
"""
Couchpotato plugin to search movies torrents using T411 APIs. More
information about T411 APIs on https://api.t411.ch.
"""
url_scheme = 'https'
url_netloc_api = 'api.t411.ch'
url_netloc_www = 'www.t411.ch'
token_ttl = 90 # T411 authentication token TTL = 90 days
token_timestamp = None
http_time_between_calls = 0
log = CPLog(__name__)
def __init__(self):
"""
Default constructor
"""
TorrentProvider.__init__(self)
MovieProvider.__init__(self)
path_www = self.url_scheme+'://'+self.url_netloc_www
path_api = self.url_scheme+'://'+self.url_netloc_api
self.urls = {
'login': path_api+'/auth',
'search': path_api+'/torrents/search/{0} {1}?{2}',
'url': path_api+'/torrents/download/',
'detail_url': path_www+'/torrents/?id='
}
self.headers = {
'Authorization': None
}
def loginDownload(self, url='', nzb_id=''):
"""
It appends a T411 HTTP authentication header to the download request.
.. seealso:: YarrProvider.loginDownload
"""
result = None
try:
if self.login():
result = self.urlopen(url, headers=self.headers)
except:
self.log.error('Failed getting release from {0}: {1}'.
format(self.getName(), traceback.format_exc()))
return result
def formatQuality(self, quality):
"""
Generate a snippet of a T411 searching request by adding the current
quality term and its alternatives. For more informations see
http://www.t411.ch/faq/#300.
"""
result = [quality.get('identifier')]
for alt in quality.get('alternative'):
if isinstance(alt, basestring):
result.append(alt)
else:
result.append('({0})'.format('&'.join(alt)))
return '|'.join(result)
def login(self):
"""
Log to T411 torrents provider and store the HTTP authentication
header token.
.. seealso:: YarrProvider.login
"""
result = True
now = datetime.now()
if (self.token_timestamp is None) or ((now - self.token_timestamp).
days >= self.token_ttl):
data = {
'username': self.conf('username'),
'password': self.conf('password')
}
try:
data = self.getJsonData(self.urls.get('login'), data=data)
self.headers['Authorization'] = data['token']
self.token_timestamp = now
except:
if data and ('error' in data):
self.log.error('T411 error code {0}: {1}'.
format(data['code'], data['error']))
else:
self.log.error('Failed to login {0}: {1}'.
format(self.getName(),
traceback.format_exc()))
self.login_failures += 1
if self.login_failures >= 3:
self.disableAccount()
result = False
return result
def _searchOnTitle(self, title, media, quality, results):
"""
Do a T411 search based on possible titles.
.. seealso:: YarrProvider.search
"""
try:
params = {
'cid': 210, # Movie/Video category
'offset': 0,
'limit': 50 # We only select the 50 firsts results
}
url = self.urls['search'].format(simplifyString(title),
self.formatQuality(quality),
tryUrlencode(params))
data = self.getJsonData(url, headers=self.headers)
now = datetime.now()
for torrent in data['torrents']:
added = datetime.strptime(torrent['added'],
'%Y-%m-%d %H:%M:%S')
# Convert size from byte to kilobyte
size = int(torrent['size'])/1024
result = {
'id': int(torrent['id']),
'name': torrent['name'],
'seeders': int(torrent['seeders']),
'leechers': int(torrent['leechers']),
'size': self.parseSize(str(size)+'kb'),
'age': (now - added).days,
'url': self.urls['url']+torrent['id'],
'detail_url': self.urls['detail_url']+torrent['id'],
'verified': bool(int(torrent['isVerified']))
}
self.log.debug('{0}|{1}'.format(result.get('id'),
simplifyString(result.get('name'))))
results.append(result)
except:
self.log.error('Failed searching release from {0}: {1}'.
format(self.getName(), traceback.format_exc()))
|
Python
| 0.000004
|
@@ -3085,16 +3085,56 @@
p = now%0A
+ self.login_failures = 0%0A
|
1069a819043f12ed423ef461aacac479c52adc8d
|
make self.server an attribute which reads self.network.local
|
merc/application.py
|
merc/application.py
|
import asyncio
import datetime
import logging
import signal
import yaml
import passlib.context
from merc import config
from merc import config_format
from merc import channel
from merc import feature
from merc import protocol
from merc import server
from merc import user
from merc import util
logger = logging.getLogger(__name__)
class Application(object):
def __init__(self, config_filename, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
self.creation_time = datetime.datetime.now()
self.features = feature.FeatureLoader(self)
self.users = user.UserStore(self)
self.channels = channel.ChannelStore(self)
self.network = server.Network(self)
self.server = None
self.crypt_context = None
self.config = None
self.config_filename = config_filename
self.reload_config()
self.register_signal_handlers()
def check_config(self, cfg):
config.validate(cfg, config_format.Config)
def reload_config(self):
self.features.unload_all()
with open(self.config_filename, "r") as f:
config = yaml.safe_load(f)
try:
self.check_config(config)
for feature_name in config["features"]:
self.features.load(feature_name)
self.features.check_config(config)
except:
logger.critical("Configuration invalid.")
self.features.unload_all()
if self.config:
logger.critical("Reloading old configuration.")
for feature_name in self.config["features"]:
self.features.load(feature_name)
raise
else:
self.config = config
finally:
if self.config:
self.update_from_config()
def update_from_config(self):
self.network.update_local(
self.loop,
self.config["server"]["name"],
self.config["server"]["description"],
self.config["server"]["sid"])
self.server = self.network.local
self.crypt_context = passlib.context.CryptContext(
schemes=self.config["crypto"]["hash_schemes"])
def rehash(self):
@asyncio.coroutine
def coro():
yield from self.unbind()
self.reload_config()
yield from self.bind()
return asyncio.async(coro(), loop=self.loop)
@asyncio.coroutine
def bind(self):
yield from self.network.local.bind(self, self.config["bind"])
@asyncio.coroutine
def unbind(self):
yield from self.network.local.unbind()
@property
def version(self):
return util.get_version()
@property
def network_name(self):
return self.config["server"]["network_name"]
@property
def admin_location(self):
return self.config["admin"]["location"]
@property
def admin_location_fine(self):
return self.config["admin"]["location_fine"]
@property
def admin_name(self):
return self.config["admin"]["name"]
@property
def admin_email(self):
return self.config["admin"]["email"]
def register_signal_handlers(self):
signal.signal(signal.SIGHUP, lambda signum, frame: self.rehash())
def run_hooks(self, hook_name, *args, **kwargs):
for feature in self.features.all():
feature.run_hooks(hook_name, self, *args, **kwargs)
def get_feature_locals(self, feature):
return self.features[feature.NAME].server_locals
def start(self):
logger.info("Welcome to merc-{}, running for {} ({}) on network {}.".format(
util.get_version(), self.config["server"]["name"],
self.config["server"]["sid"], self.config["server"]["network_name"]))
self.loop.run_until_complete(self.bind())
self._autoconnect_links()
try:
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.loop.run_until_complete(self.unbind())
self.loop.close()
def _autoconnect_links(self):
for server_name, link_spec in self.config["links"].items():
if link_spec["autoconnect"]:
self.network.connect(server_name)
def main():
import argparse
import coloredlogs
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--config", "-c", help="file to load configuration from",
default="merc.conf")
parser.add_argument("--verbose", "-v", help="enable verbose (debug) logging",
action="store_true", default=False)
args = parser.parse_args()
coloredlogs.install(level=logging.DEBUG if args.verbose else logging.INFO)
logging.getLogger("asyncio").setLevel(logging.WARN)
try:
app = Application(args.config)
app.start()
except config.ParseError as e:
logger.fatal('Could not load configuration file, aborting.')
logger.fatal(e)
except Exception as e:
logger.fatal('Could not initialize merc, aborting.')
logger.fatal(e)
|
Python
| 0.000465
|
@@ -715,31 +715,8 @@
lf)%0A
- self.server = None%0A
@@ -871,16 +871,78 @@
lers()%0A%0A
+ @property%0A def server(self):%0A return self.network.local%0A
%0A def c
@@ -1917,45 +1917,8 @@
d%22%5D)
-%0A self.server = self.network.local
%0A%0A
|
98f33028451122b39c49a89d367a406dfd641dc0
|
use PID class
|
src/boat_pid_control/src/boat_pid_control/rudderPID.py
|
src/boat_pid_control/src/boat_pid_control/rudderPID.py
|
"""
PID control for the sailing robot
controling sail position
based on goal sail direction
Inputs:
- current heading
- goal heading
Output:
- Change in motor position/motor position
TODO:
consider tack and jibe
"""
import rospy
PROPORTIONAL_GAIN = 0.1
INTEGRAL_GAIN = 0
DERIVATIVE_GAIN = 0
currentHeading = 23
goalHeading = 35
def get_pid(currentHeading, goalHeading):
# with new ROS input for goal or current heading
# Error calculation for angular error!
error = currentHeading - goalHeading
p = error * PROPORTIONAL_GAIN
i = 0
d = 0
correction = p + i + d
rudder_position = 2
#translate correction to servo change ...
return rudder_position
|
Python
| 0.000001
|
@@ -227,16 +227,52 @@
t rospy%0A
+from pid_controller_class import PID
%0A%0APROPOR
@@ -327,16 +327,136 @@
AIN = 0%0A
+INTEGRAL_LIMIT = 1%0A%0Acontroller = PID(PROPORTIONAL_GAIN, INTEGRAL_GAIN, DERIVATIVE_GAIN, INTEGRAL_LIMIT, -INTEGRAL_LIMIT)
%0A%0Acurren
@@ -588,16 +588,21 @@
g%0A%0A #
+ TODO
Error c
@@ -676,91 +676,53 @@
ing%0A
-%0A%0A p = error * PROPORTIONAL_GAIN%0A i = 0%0A d = 0%0A%0A correction = p + i + d
+ correction = controller.update_PID(error)
%0A
|
ce51f1c3829ac44e67c51a8fc97577f5f897d9e7
|
bump version
|
require_i18n/__init__.py
|
require_i18n/__init__.py
|
# Copyright Collab 2015
# shortcuts
from .util import extract_tower_json
# version information
__version__ = (1, 0, 0, 'b1')
#: For example: `2.0.0`
short_version = '.'.join([str(x) for x in __version__[:3]])
#: For example: `2.0.0a1`
version = '{}{}'.format('.'.join([str(x) for x in __version__[:-1]]),
__version__[-1])
|
Python
| 0
|
@@ -116,17 +116,17 @@
0, 0, 'b
-1
+2
')%0A%0A#: F
|
f8f9453569667fba4aabe7aaa1669cbfe9e6eb0a
|
Add a link to the password change page.
|
DjangoPlugin/tracdjangoplugin/__init__.py
|
DjangoPlugin/tracdjangoplugin/__init__.py
|
from trac.core import Component, implements
from trac.web.chrome import INavigationContributor
from trac.web.api import IRequestFilter, IRequestHandler
from trac.wiki.web_ui import WikiModule
from trac.util import Markup
class CustomWikiModule(WikiModule):
"""Works in combination with the CustomNavigationBar and replaces
the default wiki module. Has a different logic for active item
handling.
"""
def get_active_navigation_item(self, req):
pagename = req.args.get('page')
if pagename == 'Reports':
return 'custom_reports'
return 'wiki'
class CustomNewTicket(Component):
"""Hide certain options for the new ticket page"""
implements(IRequestFilter, IRequestHandler)
hidden_fields = frozenset(['stage', 'needs_tests', 'needs_docs',
'needs_better_patch'])
def match_request(self, req):
return req.path_info == '/simpleticket'
def process_request(self, req):
req.redirect(req.href.newticket())
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if req.path_info == '/newticket' and not data.get('preview_mode', False):
simple_interface = 'TICKET_BATCH_MODIFY' not in req.perm
if simple_interface:
data['fields'] = [f for f in data['fields']
if f['name'] not in self.hidden_fields]
data['simple_interface'] = simple_interface
template = 'custom_ticket.html'
return template, data, content_type
class CustomNavigationBar(Component):
"""Implements some more items for the navigation bar."""
implements(INavigationContributor)
def get_active_navigation_item(self, req):
return ''
def get_navigation_items(self, req):
items = []
if req.authname == 'anonymous':
items.append(('metanav', 'register',
Markup('<a href="https://www.djangoproject.com/accounts/register/">Register</a>')))
items.append(('metanav', 'reset_password',
Markup('<a href="https://www.djangoproject.com/accounts/password/reset/">Forgot your password?</a>')))
items.append(('mainnav', 'custom_reports', Markup('<a href="%s">Reports</a>' % req.href.wiki('Reports'))))
return items
try:
# Provided by https://github.com/aaugustin/trac-github
from tracext.github import GitHubBrowser
except ImportError:
pass
else:
from genshi.builder import tag
class GitHubBrowserWithSVNChangesets(GitHubBrowser):
def _format_changeset_link(self, formatter, ns, chgset, label,
fullmatch=None):
# Dead-simple version for SVN changesets
if chgset.isnumeric():
href = formatter.href.changeset(chgset, None, '/')
return tag.a(label, class_="changeset", href=href)
# Fallback to the default implemntation
return (super(GitHubBrowserWithSVNChangesets,self)
._format_changeset_link(formatter, ns, chgset, label, fullmatch))
|
Python
| 0
|
@@ -2246,32 +2246,220 @@
ssword?%3C/a%3E')))%0A
+ else:%0A items.append(('metanav', 'change_password',%0A Markup('%3Ca href=%22https://www.djangoproject.com/accounts/password/change/%22%3EChange your password%3C/a%3E'))%0A
items.ap
|
3995a031b919705a721cfcd1362076ddf8ffa797
|
Simplify TestingDumpedMockTask
|
buildlet/tests/test_dumpedmocktask.py
|
buildlet/tests/test_dumpedmocktask.py
|
"""
Same as test_cachedtask but mock data is dumped in datastore.
This test module serves as functional test for datastore and
basis for parallel runner (such as `multiprocessing`) testing.
"""
from ..datastore.inmemory import (
DataStoreNestableInMemory, DataValuePickledInMemory)
# Avoid importing test case at top-level to duplicated test
from . import test_cachedtask
class TestingDumpedMockTask(test_cachedtask.TestingCachedTask):
def run(self):
super(TestingDumpedMockTask, self).run()
self.datastore.get_valuestore('mock').set(self.mock)
def load(self):
super(TestingDumpedMockTask, self).load()
self.mock = self.datastore.get_valuestore('mock').get()
class DumpedMockRootTask(test_cachedtask.CachedRootTask,
TestingDumpedMockTask):
pass
class DataStoreNestableCopiedInMemory(DataStoreNestableInMemory):
# To be compatible with file-based store:
default_valuestore_type = DataValuePickledInMemory
# # I don't need to worry about this because valuestore is not
# # used in BaseCachedTask.
# class TestCachedTaskCopiedInMemory(test_cachedtask.TestCachedTask):
# DataStoreClass = DataStoreNestableCopiedInMemory
class TestDumpedMockTask(test_cachedtask.TestCachedTask):
TaskClass = DumpedMockRootTask
ParentTaskClass = TestingDumpedMockTask
DataStoreClass = DataStoreNestableCopiedInMemory
def test_rerun_new_instance(self):
self.test_simple_run()
self.task = self.TaskClass(**self.get_taskclass_kwds())
for func in self.TaskClass.mock_methods:
self.assert_run_num(0, func=func)
def getmocks(t):
return [t.mock] + [p.mock for p in t.get_parents()]
def setmocks(t, mocks):
t.mock = mocks[0]
for (p, m) in zip(t.get_parents(), mocks[1:]):
p.mock = m
newmocks = getmocks(self.task)
self.runner.run(self.task)
loadedmocks = getmocks(self.task)
setmocks(self.task, newmocks)
for (nm, lm) in zip(newmocks[1:], loadedmocks[1:]):
assert nm is lm, "Mocks for the parent task should not be loaded."
pnum_is_zero = 0 # see TestCachedTask
# Until `run`, the numbers are the same as in TestCachedTask
self.assert_run_num(0, pnum_is_zero)
self.assert_run_num(1, pnum_is_zero, func='load')
self.assert_run_num(1, pnum_is_zero, func='pre_run')
# New mock is not touched at all after `run`/`load`
self.assert_run_num(0, pnum_is_zero, func='post_success_run')
self.assert_run_num(0, pnum_is_zero, func='post_error_run')
setmocks(self.task, loadedmocks)
self.assert_run_num(1, pnum_is_zero)
self.assert_run_num(0, pnum_is_zero, func='load')
self.assert_run_num(1, pnum_is_zero, func='pre_run')
# post_success_run mock is called in the first run and the
# second run.
self.assert_run_num(2, pnum_is_zero, func='post_success_run')
self.assert_run_num(0, pnum_is_zero, func='post_error_run')
|
Python
| 0.000002
|
@@ -448,16 +448,20 @@
def
+pre_
run(self
@@ -475,56 +475,112 @@
-super(TestingDumpedMockTask, self).run()%0A
+# Load mock always at the very first stage.%0A if 'mock' in self.datastore:%0A self.mock =
sel
@@ -618,113 +618,206 @@
k').
-set(self.mock)%0A%0A def load(self):%0A super(TestingDumpedMockTask, self).load()%0A self.mock =
+get()%0A super(TestingDumpedMockTask, self).pre_run()%0A%0A def post_success_run(self):%0A super(TestingDumpedMockTask, self).post_success_run()%0A # Save mock at the very end:%0A
sel
@@ -851,20 +851,29 @@
'mock').
-g
+s
et(
+self.mock
)%0A%0A%0Aclas
|
48e136cea0886fa00bc99d9b380bf3e8db7a25b6
|
Move import of resource module
|
kolibri/utils/system.py
|
kolibri/utils/system.py
|
"""
Utilities for local system calls, everything here is cross-platform.
become_daemon was originally taken from Django:
https://github.com/django/django/commit/5836a5771f2aefca83349b111f4191d6485af1d5#diff-f7d80be2ccf77f4f009d08dcac4b7736
We might want to refactor this into:
system/__init__.py
system/posix.py
system/windows.py
etc..
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import resource
import sys
import six
from django.db import connections
from .conf import KOLIBRI_HOME
from kolibri.utils.android import on_android
logger = logging.getLogger(__name__)
def _posix_pid_exists(pid):
"""Check whether PID exists in the current process table."""
import errno
if pid < 0:
return False
try:
# Send signal 0, this is harmless
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
def _windows_pid_exists(pid):
import ctypes
kernel32 = ctypes.windll.kernel32
SYNCHRONIZE = 0x100000
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
return False
buffering = int(six.PY3) # No unbuffered text I/O on Python 3 (#20815).
def _posix_become_daemon(
our_home_dir=".", out_log="/dev/null", err_log="/dev/null", umask=0o022
):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
if sys.platform != "darwin": # This block breaks on OS X
# Fix courtesy of https://github.com/serverdensity/python-daemon/blob/master/daemon.py#L94
si = open("/dev/null", "r")
so = open(out_log, "a+", buffering)
se = open(err_log, "a+", buffering)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
def _windows_become_daemon(our_home_dir=".", out_log=None, err_log=None, umask=0o022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
old_stderr = sys.stderr
old_stdout = sys.stdout
if err_log:
sys.stderr = open(err_log, "a", buffering)
else:
sys.stderr = _WindowsNullDevice()
if out_log:
sys.stdout = open(out_log, "a", buffering)
else:
sys.stdout = _WindowsNullDevice()
# Redirect stderr and stdout
os.dup2(sys.stderr.fileno(), old_stderr.fileno())
os.dup2(sys.stdout.fileno(), old_stdout.fileno())
old_stderr.flush()
old_stdout.flush()
class _WindowsNullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
def get_free_space(path=KOLIBRI_HOME):
while path and not os.path.exists(path):
path = os.path.dirname(path) # look to parent if it doesn't exist
if not path:
raise Exception("Could not calculate free space")
if sys.platform.startswith("win"):
import ctypes
free = ctypes.c_ulonglong(0)
check = ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(path), None, None, ctypes.pointer(free)
)
if check == 0:
raise ctypes.winError()
result = free.value
elif on_android():
# This is meant for android, which needs to interact with android API to understand free
# space. If we're somehow getting here on non-android, we've got a problem.
try:
from jnius import autoclass
StatFs = autoclass("android.os.StatFs")
AndroidString = autoclass("java.lang.String")
st = StatFs(AndroidString(path))
try:
# for api version 18+
result = st.getFreeBlocksLong() * st.getBlockSizeLong()
except Exception:
# for api versions < 18
result = st.getFreeBlocks() * st.getBlockSize()
except Exception as e:
raise e
else:
st = os.statvfs(os.path.realpath(path))
result = st.f_bavail * st.f_frsize
return result
_become_daemon_function = None
def become_daemon(**kwargs):
# close all connections before forking, to avoid SQLite corruption:
# https://www.sqlite.org/howtocorrupt.html#_carrying_an_open_database_connection_across_a_fork_
connections.close_all()
_become_daemon_function(**kwargs)
def _posix_get_fd_limit():
fd_soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
return fd_soft_limit
def _windows_get_fd_limit():
import ctypes
return ctypes.cdll._getmaxstdio()
# Utility functions
if os.name == "posix":
pid_exists = _posix_pid_exists
get_fd_limit = _posix_get_fd_limit
_become_daemon_function = _posix_become_daemon
else:
pid_exists = _windows_pid_exists
get_fd_limit = _windows_get_fd_limit
_become_daemon_function = _windows_become_daemon
|
Python
| 0
|
@@ -484,24 +484,8 @@
os%0A
-import resource%0A
impo
@@ -5171,24 +5171,45 @@
fd_limit():%0A
+ import resource%0A%0A
fd_soft_
|
4d46001296ad083df6827a9c97333f0f093f31bd
|
Document new evil magic, and add required var.
|
example/config.py
|
example/config.py
|
# Mnemosyne configuration
# =======================
#
# This file is a Python script. When run, the following variables will be
# defined for you; you may change or add to them as you see fit.
#
# ``entries_dir``: a Maildir containing all the blog entries.
# ``layout_dir``: the blog's layout, as a skeleton directory tree.
# ``style_dir``: empy styles used for filling layout templates.
# ``output_dir``: location where we will write the generated pages.
#
# These will be $HOME/Mnemosyne/{entries,layout,style,htdocs} respectively.
#
# ``vars``: a dict of default local variables passed to all templates.
#
# This will contain the keys __version__, __url__, __author__, and __email__.
#
# You may also define functions here to add 'magic' attributes to each entry.
# A function with a name of the form ``make_MAGIC`` (which takes a single
# argument, the entry) will be used to create an attribute ``e._MAGIC`` for
# each entry ``e``. Either a single value or a list of values may be returned.
#
# In your layout, a file or directory name containing ``__MAGIC__`` will then
# be evaluated once for each value ``make_MAGIC`` returns, with the entries
# for which ``make_MAGIC`` returns that value or a list containing it.
vars['blogname'] = 'Example Blog'
class Entry:
def get_organization(self):
return self.m.get('Organization')
|
Python
| 0
|
@@ -189,16 +189,18 @@
fit.%0A#%0A#
+ *
%60%60entri
@@ -253,16 +253,18 @@
tries.%0A#
+ *
%60%60layou
@@ -322,16 +322,18 @@
tree.%0A#
+ *
%60%60style
@@ -388,16 +388,18 @@
lates.%0A#
+ *
%60%60outpu
@@ -543,13 +543,17 @@
#%0A#
-%60%60var
+* %60%60local
s%60%60:
@@ -700,573 +700,404 @@
#%0A#
-You may also define functions here to add 'magic' attributes to each entry.%0A# A function with a name of the form %60%60make_MAGIC%60%60 (which takes a single%0A# argument, the entry) will be used to create an attribute %60%60e._MAGIC%60%60 for%0A# each entry %60%60e%60%60. Either a single value or a list of values may be returned.%0A#%0A# In your layout, a file or directory name containing %60%60__MAGIC__%60%60 will then%0A# be evaluated once for each value %60%60make_MAGIC%60%60 returns, with the entries%0A# for which %60%60make_MAGIC%60%60 returns that value or a list containing it.%0A%0Avars%5B'blogname'%5D = 'Example Blog
+* %60%60MnemosyneEntry%60%60: a class used to represent each entry passed to the%0A# templates.%0A#%0A# If you wish to extend this class, you may define a new class %60%60Entry%60%60 here,%0A# using %60%60MnemosyneEntry%60%60 as its base class. Any methods with a name of the%0A# form %60%60get_ATTRIBUTE%60%60 will be used to provide e.ATTRIBUTE at runtime.%0A%0Alocals%5B'blogname'%5D = 'Example Blog'%0Alocals%5B'base'%5D = 'http://example.invalid
'%0A%0Ac
|
56506fa23fb6f3f705639cad2a816919ad9a9433
|
add district cmp field to rankings
|
controllers/api/api_district_controller.py
|
controllers/api/api_district_controller.py
|
import json
import webapp2
from controllers.api.api_base_controller import ApiBaseController
from consts.district_type import DistrictType
from consts.event_type import EventType
from datetime import datetime
from google.appengine.ext import ndb
from helpers.district_helper import DistrictHelper
from helpers.event_helper import EventHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
from models.event_team import EventTeam
from models.team import Team
class ApiDistrictControllerBase(ApiBaseController):
def _set_district(self, district):
self.district_abbrev = district
self.district = DistrictType.abbrevs[self.district_abbrev]
@property
def _validators(self):
return []
class ApiDistrictListController(ApiDistrictControllerBase):
CACHE_KEY_FORMAT = "apiv2_districts_{}" # year
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiDistrictListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs["year"] or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
def _track_call(self, year=None):
if year is None:
year = datetime.now().year
self._track_call_defer('district_list', year)
def _render(self, year=None):
all_cmp_event_keys = Event.query(Event.year == int(self.year), Event.event_type_enum == EventType.DISTRICT_CMP).fetch(None, keys_only=True)
events = ndb.get_multi(all_cmp_event_keys)
district_keys = [DistrictType.type_abbrevs[event.event_district_enum] for event in events]
return json.dumps(district_keys, ensure_ascii=True)
class ApiDistrictEventsController(ApiDistrictControllerBase):
CACHE_KEY_FORMAT = "apiv2_district_events_controller_{}_{}" # (district_short, year)
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiDistrictEventsController, self).__init__(*args, **kw)
self.district_abbrev = self.request.route_kwargs["district_abbrev"]
self.year = int(self.request.route_kwargs["year"] or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.district_abbrev, self.year)
def _track_call(self, district_abbrev, year=None):
if year is None:
year = datetime.now().year
self._track_call_defer('district/events', str(year)+district_abbrev)
def _render(self, district_abbrev, year=None):
self._set_district(district_abbrev)
event_keys = Event.query(Event.year == self.year, Event.event_district_enum == self.district).fetch(None, keys_only=True)
events = ndb.get_multi(event_keys)
events = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(events, ensure_ascii=True)
class ApiDistrictRankingsController(ApiDistrictControllerBase):
CACHE_KEY_FORMAT = "apiv2_district_rankings_controller_{}_{}" # (district_short, year)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiDistrictRankingsController, self).__init__(*args, **kw)
self.district_abbrev = self.request.route_kwargs["district_abbrev"]
self.year = int(self.request.route_kwargs["year"] or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.district_abbrev, self.year)
def _track_call(self, district_abbrev, year=None):
if year is None:
year = datetime.now().year
self._track_call_defer('district/rankings', str(year)+district_abbrev)
def _render(self, district_abbrev, year=None):
self._set_district(district_abbrev)
event_keys = Event.query(Event.year == self.year, Event.event_district_enum == self.district).fetch(None, keys_only=True)
events = ndb.get_multi(event_keys)
district_cmp_keys_future = Event.query(Event.year == self.year, Event.event_type_enum == EventType.DISTRICT_CMP).fetch_async(None, keys_only=True)
event_futures = ndb.get_multi_async(event_keys)
event_team_keys_future = EventTeam.query(EventTeam.event.IN(event_keys)).fetch_async(None, keys_only=True)
if self.year == 2014: # TODO: only 2014 has accurate rankings calculations
team_futures = ndb.get_multi_async(set([ndb.Key(Team, et_key.id().split('_')[1]) for et_key in event_team_keys_future.get_result()]))
events = [event_future.get_result() for event_future in event_futures]
EventHelper.sort_events(events)
district_cmp_futures = ndb.get_multi_async(district_cmp_keys_future.get_result())
if self.year == 2014: # TODO: only 2014 has accurate rankings calculations
team_totals = DistrictHelper.calculate_rankings(events, team_futures, self.year)
else:
return json.dumps([])
rankings = []
currentRank = 1
for key, points in team_totals:
point_detail = {}
point_detail["rank"] = currentRank
point_detail["team_key"] = key
point_detail["event_points"] = {}
for event in points["event_points"]:
point_detail["event_points"][event[0].key_name] = event[1]
if "rookie_bonus" in points:
point_detail["rookie_bonus"] = points["rookie_bonus"]
else:
point_detail["rookie_bonus"] = 0
point_detail["point_total"] = points["point_total"]
rankings.append(point_detail)
currentRank += 1
return json.dumps(rankings)
|
Python
| 0
|
@@ -5288,68 +5288,321 @@
-point_detail%5B%22event_points%22%5D%5Bevent%5B0%5D.key_name%5D = event%5B1%5D %0A
+event_key = event%5B0%5D.key_name%0A point_detail%5B%22event_points%22%5D%5Bevent_key%5D = event%5B1%5D %0A event_details = Event.get_by_id(event_key)%0A point_detail%5B%22event_points%22%5D%5Bevent%5B0%5D.key_name%5D%5B'district_cmp'%5D = True if event_details.event_type_enum == EventType.DISTRICT_CMP else False
@@ -5604,24 +5604,25 @@
e
+%0A
%0A
|
f818b7b8e23bae7d9ceb359ff9a0a3264c8ba69f
|
Remove unused DATABASE_URL env variable
|
example_config.py
|
example_config.py
|
"""
File to easily switch between configurations between production and
development, etc.
"""
import os
# You must set each of these in your heroku environment with the heroku
# config:set command. See README.md for more information.
HEROKU_ENV_REQUIREMENTS = ('HEROKU', 'SECRET_KEY', 'GITHUB_CLIENT_ID',
'GITHUB_SECRET', 'DATABASE_URL',
'REPO_OWNER', 'REPO_NAME',
'REPO_OWNER_ACCESS_TOKEN', 'REDISCLOUD_URL',
'REDIS_URL', 'MAILCHIMP_API_KEY',
'MAILCHIMP_LIST_ID', 'MAILCHIMP_STACKS_GROUP_NAME',
'SECONDARY_REPO_OWNER', 'SECONDARY_REPO_NAME',
'BASE_URL', 'CELERY_BROKER_URL',
'CELERY_TASK_SERIALIZER')
class Config(object):
DEBUG = False
CSRF_ENABLED = True
HEROKU = False
SECRET_KEY = 'not-a-good-value'
# Details of the repo where all articles are stored. The GITHUB_CLIENT_ID
# and GITHUB_SECRET should allow full-access to this database.
GITHUB_CLIENT_ID = 'replace-me'
GITHUB_SECRET = 'replace-me'
REPO_OWNER = None
REPO_NAME = None
REPO_OWNER_ACCESS_TOKEN = None
CELERY_TASK_SERIALIZER = 'json'
CELERY_BROKER_URL = None
# Secondary (optional) repo for articles that are not editable
SECONDARY_REPO_OWNER = None
SECONDARY_REPO_NAME = None
# For caching
REDISCLOUD_URL = None
# For celery
REDIS_URL = None
MAILCHIMP_API_KEY = None
MAILCHIMP_LIST_ID = None
MAILCHIMP_STACKS_GROUP_NAME = None
class DevelopmentConfig(Config):
DEBUG = True
|
Python
| 0.000001
|
@@ -348,51 +348,8 @@
ET',
- 'DATABASE_URL',%0A
'RE
|
549190dc81bd4e666a8ca52eed8726a5717a8dde
|
fix syntax error
|
migrate/__init__.py
|
migrate/__init__.py
|
# -*- coding: utf-8 -*-
from .migration import Migration
from importlib import import_module
import os
class Migrator(object):
def __init__(self, count, runner_path, migrations_dir, direction, *args, **kwargs):
self.count = count
self.migrations_dir = migrations_dir
self.direction = direction
runner_cls = self.runner(runner_path)
self.runner = runner_cls(*args, **kwargs)
self.current = self.runner.version()
def runner(self, path):
package, name = path.rsplit('.', 1)
return import_module(name, package)
def migrations_to_run(self):
try:
names = names(os.listdir(self.migrations_dir))
except OSError: # explicitly raising this. Deal with it!
raise
if not names:
raise ValueError('No migrations to run in %s' % self.migrations_dir)
if self.direction == 'up':
return [
m for m in migrations
if self.current < m
][:self.count]
elif self.direction == 'down':
return [
m for m in reversed(migrations)
if self.current >= m
][:self.count]
else:
raise ValueError('Unknown migration direction "%s"' % self.direction)
def run(self):
"put all the parts together"
names = self.migrations_to_run()
if not names:
return {'message': 'No migrations necessary!'}
for name in names:
with open(os.path.join(self.migrations_dir, name), 'r'):
migration = Migration(mig.read())
if self.direction == 'up':
self.runner.up(name, migration)
else:
self.runner.down(name, migration)
else:
raise ValueError('Unknown migration direction "%s"' % self.direction)
return {'message': 'Ran %d migrations' % len(names)}
|
Python
| 0.000047
|
@@ -1715,34 +1715,59 @@
)%0A el
-se
+if self.direction == 'down'
:%0A
|
68b6444eed6f3a748aa3a0126c20a52a5d56e6f9
|
Comment added for missing code.
|
app/notifications/rest.py
|
app/notifications/rest.py
|
import uuid
from flask import (
Blueprint,
jsonify,
request
)
from app import (notify_alpha_client, api_user)
from app.aws_sqs import add_notification_to_queue
from app.dao import (templates_dao)
from app.schemas import (
email_notification_schema, sms_template_notification_schema)
notifications = Blueprint('notifications', __name__)
@notifications.route('/<notification_id>', methods=['GET'])
def get_notifications(notification_id):
# TODO return notification id details
return jsonify({'id': notification_id}), 200
@notifications.route('/sms', methods=['POST'])
def create_sms_notification():
resp_json = request.get_json()
notification, errors = sms_template_notification_schema.load(resp_json)
if errors:
return jsonify(result="error", message=errors), 400
add_notification_to_queue(api_user['client'], notification['template'], 'sms', notification)
# TODO data to be returned
return jsonify({}), 200
@notifications.route('/email', methods=['POST'])
def create_email_notification():
resp_json = request.get_json()
notification, errors = email_notification_schema.load(resp_json)
if errors:
return jsonify(result="error", message=errors), 400
add_notification_to_queue(api_user['client'], "admin", 'email', notification)
# TODO data to be returned
return jsonify({}), 200
@notifications.route('/sms/service/<service_id>', methods=['POST'])
def create_sms_for_service(service_id):
resp_json = request.get_json()
notification, errors = sms_template_notification_schema.load(resp_json)
if errors:
return jsonify(result="error", message=errors), 400
template_id = notification['template']
job_id = notification['job']
# TODO: job/job_id is in notification and can used to update job status
# TODO: remove once beta is reading notifications from the queue
template = templates_dao.get_model_templates(template_id)
if template.service.id != uuid.UUID(service_id):
message = "Invalid template: id {} for service id: {}".format(template.id, service_id)
return jsonify(result="error", message=message), 400
add_notification_to_queue(service_id, template_id, 'sms', notification)
# TODO data to be returned
return jsonify({}), 200
|
Python
| 0
|
@@ -955,33 +955,33 @@
jsonify(%7B%7D), 20
-0
+4
%0A%0A%0A@notification
@@ -1363,25 +1363,25 @@
nify(%7B%7D), 20
-0
+4
%0A%0A%0A@notifica
@@ -2293,14 +2293,14 @@
nify(%7B%7D), 20
-0
+4
%0A
|
a12e907f5548b1b571b547c1e9e059b0bcd36242
|
Fix datetime when a benchmark is the only datum in a bar.
|
zipline/gens/tradesimulation.py
|
zipline/gens/tradesimulation.py
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from itertools import chain
from logbook import Logger, Processor
import zipline.finance.trading as trading
from zipline.protocol import BarData, DATASOURCE_TYPE
from zipline.gens.utils import hash_args
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def get_hash(self):
"""
There should only ever be one TSC in the system, so
we don't bother passing args into the hash.
"""
return self.__class__.__name__ + hash_args()
def __init__(self, algo, sim_params):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
# ==============
# Algo Setup
# ==============
self.algo = algo
self.algo_start = self.sim_params.first_open
self.algo_start = self.algo_start.replace(hour=0, minute=0,
second=0,
microsecond=0)
# ==============
# Snapshot Setup
# ==============
# The algorithm's data as of our most recent event.
# We want an object that will have empty objects as default
# values on missing keys.
self.current_data = BarData()
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
self.snapshot_dt = None
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if not 'algo_dt' in record.extra:
record.extra['algo_dt'] = self.snapshot_dt
self.processor = Processor(inject_algo_dt)
@property
def perf_key(self):
return self.EMISSION_TO_PERF_KEY_MAP[
self.algo.perf_tracker.emission_rate]
def transform(self, stream_in):
"""
Main generator work loop.
"""
# Initialize the mkt_close
mkt_close = self.algo.perf_tracker.market_close
# Set the simulation date to be the first event we see.
peek_date, peek_snapshot = next(stream_in)
self.simulation_dt = peek_date
# Stitch back together the generator by placing the peeked
# event back in front
stream = itertools.chain([(peek_date, peek_snapshot)],
stream_in)
# inject the current algo
# snapshot time to any log record generated.
with self.processor.threadbound():
updated = False
bm_updated = False
for date, snapshot in stream:
self.algo.perf_tracker.set_date(date)
self.algo.blotter.set_date(date)
# If we're still in the warmup period. Use the event to
# update our universe, but don't yield any perf messages,
# and don't send a snapshot to handle_data.
if date < self.algo_start:
for event in snapshot:
if event.type in (DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.CUSTOM):
self.update_universe(event)
self.algo.perf_tracker.process_event(event)
else:
for event in snapshot:
if event.type in (DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.CUSTOM):
self.update_universe(event)
updated = True
if event.type == DATASOURCE_TYPE.BENCHMARK:
bm_updated = True
txns, orders = self.algo.blotter.process_trade(event)
for data in chain(txns, orders, [event]):
self.algo.perf_tracker.process_event(data)
# Update our portfolio.
self.algo.set_portfolio(
self.algo.perf_tracker.get_portfolio()
)
# Send the current state of the universe
# to the user's algo.
if updated:
self.simulate_snapshot(date)
updated = False
# run orders placed in the algorithm call
# above through perf tracker before emitting
# the perf packet, so that the perf includes
# placed orders
for order in self.algo.blotter.new_orders:
self.algo.perf_tracker.process_event(order)
self.algo.blotter.new_orders = []
# The benchmark is our internal clock. When it
# updates, we need to emit a performance message.
if bm_updated:
bm_updated = False
yield self.get_message(date)
# When emitting minutely, we re-iterate the day as a
# packet with the entire days performance rolled up.
if self.algo.perf_tracker.emission_rate == 'minute':
if date == mkt_close:
daily_rollup = self.algo.perf_tracker.to_dict(
emission_type='daily'
)
daily_rollup['daily_perf']['recorded_vars'] = \
self.algo.recorded_vars
yield daily_rollup
tp = self.algo.perf_tracker.todays_performance
tp.rollover()
if mkt_close < self.algo.perf_tracker.last_close:
mkt_close = self.get_next_close(mkt_close)
self.algo.perf_tracker.handle_intraday_close()
risk_message = self.algo.perf_tracker.handle_simulation_end()
yield risk_message
def get_message(self, date):
rvars = self.algo.recorded_vars
if self.algo.perf_tracker.emission_rate == 'daily':
perf_message = \
self.algo.perf_tracker.handle_market_close()
perf_message['daily_perf']['recorded_vars'] = rvars
return perf_message
elif self.algo.perf_tracker.emission_rate == 'minute':
self.algo.perf_tracker.handle_minute_close(date)
perf_message = self.algo.perf_tracker.to_dict()
perf_message['minute_perf']['recorded_vars'] = rvars
return perf_message
def get_next_close(self, mkt_close):
if mkt_close >= trading.environment.last_trading_day:
return self.sim_params.last_close
else:
return trading.environment.next_open_and_close(mkt_close)[1]
def update_universe(self, event):
"""
Update the universe with new event information.
"""
# Update our knowledge of this event's sid
sid_data = self.current_data[event.sid]
sid_data.__dict__.update(event.__dict__)
def simulate_snapshot(self, date):
"""
Run the user's algo against our current snapshot and update
the algo's simulated time.
"""
# Needs to be set so that we inject the proper date into algo
# log/print lines.
self.snapshot_dt = date
self.algo.set_datetime(self.snapshot_dt)
# Update the simulation time.
self.simulation_dt = date
self.algo.handle_data(self.current_data)
|
Python
| 0
|
@@ -4427,16 +4427,77 @@
CHMARK:%0A
+ self.algo.set_datetime(event.dt)%0A
|
99821667acf52b9348ff6b4d45476ae801034bef
|
Fix issues with non-str Lua arguments.
|
mockredis/script.py
|
mockredis/script.py
|
import sys
import threading
from mockredis.exceptions import ResponseError
LuaLock = threading.Lock()
class Script(object):
"""
An executable Lua script object returned by ``MockRedis.register_script``.
"""
def __init__(self, registered_client, script, load_dependencies=True):
self.registered_client = registered_client
self.script = script
self.load_dependencies = load_dependencies
self.sha = registered_client.script_load(script)
def __call__(self, keys=[], args=[], client=None):
"""Execute the script, passing any required ``args``"""
with LuaLock:
client = client or self.registered_client
if not client.script_exists(self.sha)[0]:
self.sha = client.script_load(self.script)
return self._execute_lua(keys, args, client)
def _execute_lua(self, keys, args, client):
"""
Sets KEYS and ARGV alongwith redis.call() function in lua globals
and executes the lua redis script
"""
lua, lua_globals = Script._import_lua(self.load_dependencies)
lua_globals.KEYS = self._python_to_lua(keys)
lua_globals.ARGV = self._python_to_lua(args)
def _call(*call_args):
# redis-py and native redis commands are mostly compatible argument
# wise, but some exceptions need to be handled here:
if str(call_args[0]).lower() == 'lrem':
response = client.call(
call_args[0], call_args[1],
call_args[3], # "count", default is 0
call_args[2])
else:
response = client.call(*call_args)
return self._python_to_lua(response)
lua_globals.redis = {"call": _call}
return self._lua_to_python(lua.execute(self.script), return_status=True)
@staticmethod
def _import_lua(load_dependencies=True):
"""
Import lua and dependencies.
:param load_dependencies: should Lua library dependencies be loaded?
:raises: RuntimeError if Lua is not available
"""
try:
import lua
except ImportError:
raise RuntimeError("Lua not installed")
lua_globals = lua.globals()
if load_dependencies:
Script._import_lua_dependencies(lua, lua_globals)
return lua, lua_globals
@staticmethod
def _import_lua_dependencies(lua, lua_globals):
"""
Imports lua dependencies that are supported by redis lua scripts.
The current implementation is fragile to the target platform and lua version
and may be disabled if these imports are not needed.
Included:
- cjson lib.
Pending:
- base lib.
- table lib.
- string lib.
- math lib.
- debug lib.
- cmsgpack lib.
"""
if sys.platform not in ('darwin', 'windows'):
import ctypes
ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL)
try:
lua_globals.cjson = lua.eval('require "cjson"')
except RuntimeError:
raise RuntimeError("cjson not installed")
@staticmethod
def _lua_to_python(lval, return_status=False):
"""
Convert Lua object(s) into Python object(s), as at times Lua object(s)
are not compatible with Python functions
"""
import lua
lua_globals = lua.globals()
if lval is None:
# Lua None --> Python None
return None
if lua_globals.type(lval) == "table":
# Lua table --> Python list
pval = []
for i in lval:
if return_status:
if i == 'ok':
return lval[i]
if i == 'err':
raise ResponseError(lval[i])
pval.append(Script._lua_to_python(lval[i]))
return pval
elif isinstance(lval, long):
# Lua number --> Python long
return long(lval)
elif isinstance(lval, float):
# Lua number --> Python float
return float(lval)
elif lua_globals.type(lval) == "userdata":
# Lua userdata --> Python string
return str(lval)
elif lua_globals.type(lval) == "string":
# Lua string --> Python string
return lval
elif lua_globals.type(lval) == "boolean":
# Lua boolean --> Python bool
return bool(lval)
raise RuntimeError("Invalid Lua type: " + str(lua_globals.type(lval)))
@staticmethod
def _python_to_lua(pval):
"""
Convert Python object(s) into Lua object(s), as at times Python object(s)
are not compatible with Lua functions
"""
import lua
if pval is None:
# Python None --> Lua None
return lua.eval("")
if isinstance(pval, (list, tuple, set)):
# Python list --> Lua table
# e.g.: in lrange
# in Python returns: [v1, v2, v3]
# in Lua returns: {v1, v2, v3}
lua_list = lua.eval("{}")
lua_table = lua.eval("table")
for item in pval:
lua_table.insert(lua_list, Script._python_to_lua(item))
return lua_list
elif isinstance(pval, dict):
# Python dict --> Lua dict
# e.g.: in hgetall
# in Python returns: {k1:v1, k2:v2, k3:v3}
# in Lua returns: {k1, v1, k2, v2, k3, v3}
lua_dict = lua.eval("{}")
lua_table = lua.eval("table")
for k, v in pval.iteritems():
lua_table.insert(lua_dict, Script._python_to_lua(k))
lua_table.insert(lua_dict, Script._python_to_lua(v))
return lua_dict
elif isinstance(pval, str):
# Python string --> Lua userdata
return pval
elif isinstance(pval, bool):
# Python bool--> Lua boolean
return lua.eval(str(pval).lower())
elif isinstance(pval, (int, long, float)):
# Python int --> Lua number
lua_globals = lua.globals()
return lua_globals.tonumber(str(pval))
raise RuntimeError("Invalid Python type: " + str(type(pval)))
|
Python
| 0
|
@@ -827,26 +827,70 @@
ute_lua(
+%5Bstr(key) for key in
keys
+%5D
,
+ %5Bstr(arg) for arg in
args
+%5D
, client
|
65e3dcb5f085a0c70c5ad4cc607e561b95b07a49
|
Replace a verbose patch call with a function that does the same thing
|
corehq/apps/users/tests/test_middleware.py
|
corehq/apps/users/tests/test_middleware.py
|
from __future__ import absolute_import
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.test.client import Client
from django_otp.middleware import OTPMiddleware
from corehq.util.test_utils import flag_enabled
from corehq.apps.users.models import CouchUser
from corehq.apps.users.middleware import Enforce2FAMiddleware
import mock
class TestTwoFactorMiddleware(TestCase):
def setUp(self):
self.account_request = self.create_request(request_url="/account/",
username="test_1@test.com",
password="123")
self.non_account_request = self.create_request(request_url="/not_account/",
username="test_2@test.com",
password="123")
@classmethod
def create_request(cls, request_url, username, password):
# Initialize request
request = Client().get(request_url).wsgi_request
# Create user
request.user = get_user_model().objects.create_user(username=username, email=username, password=password)
username = request.user.get_username()
# Create couch user
request.couch_user = CouchUser()
# Login
assert Client().login(username=username, password=password)
# Activate middleware
OTPMiddleware().process_request(request)
return request
@classmethod
def enable_two_factor_for_user(cls, request):
request.user.otp_device = "test_device"
@classmethod
def call_process_view_with_couch_mock(cls, request, disable_two_factor):
with mock.patch('corehq.apps.users.models.CouchUser.two_factor_disabled',
new_callable=mock.PropertyMock,
return_value=disable_two_factor):
response = Enforce2FAMiddleware().process_view(request, "test_view_func",
"test_view_args", "test_view_kwargs")
return response
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_permission_denied(self):
request = self.non_account_request
with mock.patch('corehq.apps.users.models.CouchUser.two_factor_disabled',
new_callable=mock.PropertyMock,
return_value=False):
response = Enforce2FAMiddleware().process_view(request, "test_view_func",
"test_view_args", "test_view_kwargs")
self.assertEqual(response.status_code, 403)
self.assertEqual(response._request, request)
self.assertEqual(response.template_name, 'two_factor/core/otp_required.html')
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_two_factor_enabled(self):
request = self.non_account_request
self.enable_two_factor_for_user(request)
response = self.call_process_view_with_couch_mock(request, disable_two_factor=False)
self.assertEqual(response, None)
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_couch_user_two_factor_disabled(self):
request = self.non_account_request
response = self.call_process_view_with_couch_mock(request, disable_two_factor=True)
self.assertEqual(response, None)
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_account_url(self):
request = self.account_request
response = self.call_process_view_with_couch_mock(request, disable_two_factor=False)
self.assertEqual(response, None)
|
Python
| 0.000178
|
@@ -2271,364 +2271,91 @@
-with mock.patch('corehq.apps.users.models.CouchUser.two_factor_disabled',%0A new_callable=mock.PropertyMock,%0A return_value=False):%0A response = Enforce2FAMiddleware().process_view(request, %22test_view_func%22,%0A %22test_view_args%22, %22test_view_kwargs%22
+response = self.call_process_view_with_couch_mock(request, disable_two_factor=False
)%0A
|
cf61408831d6c0a184076b2a0cbc3050c14aa241
|
Fix tar content type check (#1398)
|
insights/core/archives.py
|
insights/core/archives.py
|
#!/usr/bin/env python
import logging
import os
import tempfile
from contextlib import contextmanager
from insights.util import content_type, fs, subproc
from insights.util.content_type import from_file
logger = logging.getLogger(__name__)
COMPRESSION_TYPES = ("zip", "tar", "gz", "bz2", "xz")
class InvalidArchive(Exception):
def __init__(self, msg):
super(InvalidArchive, self).__init__(msg)
self.msg = msg
class InvalidContentType(InvalidArchive):
def __init__(self, content_type):
self.msg = 'Invalid content type: "%s"' % content_type
super(InvalidContentType, self).__init__(self.msg)
self.content_type = content_type
class ZipExtractor(object):
def __init__(self, timeout=None):
self.content_type = "application/zip"
self.timeout = timeout
self.tmp_dir = None
self.created_tmp_dir = False
def from_path(self, path, extract_dir=None, content_type=None):
self.tmp_dir = tempfile.mkdtemp(prefix="insights-", dir=extract_dir)
self.created_tmp_dir = True
command = "unzip -n -q -d %s %s" % (self.tmp_dir, path)
subproc.call(command, timeout=self.timeout)
return self
class TarExtractor(object):
def __init__(self, timeout=None):
self.timeout = timeout
self.tmp_dir = None
TAR_FLAGS = {
"application/x-xz": "-J",
"application/x-gzip": "-z",
"application/gzip": "-z",
"application/x-bzip2": "-j",
"application/x-tar": ""
}
def _archive_type(self, _input):
_type = content_type.from_file(_input)
if _type not in self.TAR_FLAGS:
raise InvalidContentType(_type)
return _type
def from_path(self, path, extract_dir=None, content_type=None):
if os.path.isdir(path):
self.tmp_dir = path
else:
self.content_type = content_type or self._archive_type(path)
tar_flag = self.TAR_FLAGS.get(self.content_type)
self.tmp_dir = tempfile.mkdtemp(prefix="insights-", dir=extract_dir)
self.created_tmp_dir = True
command = "tar %s -x --exclude=*/dev/null -f %s -C %s" % (tar_flag, path, self.tmp_dir)
logging.debug("Extracting files in '%s'", self.tmp_dir)
subproc.call(command, timeout=self.timeout)
return self
def get_all_files(path):
names = []
for root, dirs, files in os.walk(path):
for dirname in dirs:
names.append(os.path.join(root, dirname) + "/")
for filename in files:
names.append(os.path.join(root, filename))
return names
class Extraction(object):
def __init__(self, tmp_dir, content_type):
self.tmp_dir = tmp_dir
self.content_type = content_type
@contextmanager
def extract(path, timeout=None, extract_dir=None, content_type=None):
"""
Extract path into a temporary directory in `extract_dir`.
Yields an object containing the temporary path and the content type of the
original archive.
If the extraction takes longer than `timeout` seconds, the temporary path
is removed, and an exception is raised.
"""
content_type = content_type or from_file(path)
if content_type == "application/zip":
extractor = ZipExtractor(timeout=timeout)
else:
extractor = TarExtractor(timeout=timeout)
try:
ctx = extractor.from_path(path, extract_dir=extract_dir, content_type=content_type)
content_type = extractor.content_type
yield Extraction(ctx.tmp_dir, content_type)
finally:
if extractor.created_tmp_dir:
fs.remove(extractor.tmp_dir, chmod=True)
|
Python
| 0
|
@@ -124,22 +124,8 @@
port
- content_type,
fs,
@@ -133,17 +133,16 @@
subproc%0A
-%0A
from ins
@@ -181,16 +181,43 @@
rom_file
+ as content_type_from_file%0A
%0Alogger
@@ -1552,121 +1552,122 @@
ef _
+t
ar
-chive_type(self, _input):%0A _type = content_type.from_file(_input)%0A if _type not in self.TAR_FLAGS
+_flag_for_content_type(self, content_type):%0A flag = self.TAR_FLAGS.get(content_type)%0A if not flag
:%0A
@@ -1701,16 +1701,23 @@
entType(
+content
_type)%0A
@@ -1730,21 +1730,20 @@
return
-_type
+flag
%0A%0A de
@@ -1934,25 +1934,29 @@
or
-self._archive_typ
+content_type_from_fil
e(pa
@@ -1987,29 +1987,42 @@
= self.
-TAR_FLAGS.get
+_tar_flag_for_content_type
(self.co
@@ -3249,16 +3249,29 @@
type or
+content_type_
from_fil
|
b480426bdc7f5a59523cc9e6250efedd311d0854
|
fix other minor remark
|
lib/vsc/utils/testing.py
|
lib/vsc/utils/testing.py
|
#!/usr/bin/env python
##
#
# Copyright 2014-2014 Ghent University
#
# This file is part of vsc-base,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/vsc-base
#
# vsc-base is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2 of
# the License, or (at your option) any later version.
#
# vsc-base is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with vsc-base. If not, see <http://www.gnu.org/licenses/>.
##
"""
Test utilities.
@author: Kenneth Hoste (Ghent University)
"""
import re
import sys
from unittest import TestCase
class EnhancedTestCase(TestCase):
"""Enhanced test case, provides extra functionality (e.g. an assertErrorRegex method)."""
def convert_exception_to_str(self, err):
"""Convert an Exception instance to a string."""
msg = err
if hasattr(err, 'msg'):
msg = err.msg
try:
res = str(msg)
except UnicodeEncodeError:
res = msg.encode('utf8', 'replace')
return res
def assertErrorRegex(self, error, regex, call, *args, **kwargs):
"""
Convenience method to match regex with the expected error message.
Example: self.assertErrorRegex(OSError, "No such file or directory", os.remove, '/no/such/file')
"""
try:
call(*args, **kwargs)
str_kwargs = ['='.join([k, str(v)]) for (k, v) in kwargs.items()]
str_args = ', '.join(map(str, args) + str_kwargs)
self.assertTrue(False, "Expected errors with %s(%s) call should occur" % (call.__name__, str_args))
except error, err:
msg = self.convert_exception_to_str(err)
self.assertTrue(re.search(regex, msg), "Pattern '%s' is found in '%s'" % (regex, msg))
|
Python
| 0.003494
|
@@ -2387,16 +2387,54 @@
tr(err)%0A
+ regex = re.compile(regex)%0A
@@ -2459,16 +2459,19 @@
e(re
+gex
.search(
rege
@@ -2466,23 +2466,16 @@
.search(
-regex,
msg), %22P
@@ -2512,16 +2512,24 @@
%25 (regex
+.pattern
, msg))%0A
|
5c86a27f3ff324a8a7f74e161eeff4bda62fe0ab
|
add asynchronous dump
|
integration_tests/dump.py
|
integration_tests/dump.py
|
import os.path
import sys
import base64
from app import Application
def dump(request):
body = request.body
if body is not None:
body = base64.b64encode(body).decode('ascii')
result = {
"method": request.method,
"path": request.path,
"query_string": request.query_string,
"headers": request.headers,
"match_dict": request.match_dict,
"body": body
}
return request.Response(json=result)
app = Application()
r = app.get_router()
r.add_route('/dump/{p1}/{p2}', dump)
if __name__ == '__main__':
app.serve()
|
Python
| 0.000001
|
@@ -33,16 +33,31 @@
base64%0A
+import asyncio%0A
%0A%0Afrom a
@@ -476,16 +476,149 @@
esult)%0A%0A
+%0Aasync def adump(request):%0A sleep = int(request.query.get('sleep', 0))%0A await asyncio.sleep(sleep)%0A%0A return dump(request)%0A%0A%0A
app = Ap
@@ -688,16 +688,55 @@
, dump)%0A
+r.add_route('/adump/%7Bp1%7D/%7Bp2%7D', adump)%0A
%0A%0Aif __n
|
31c4cdf59c502cddd7770501a68457c7e9166216
|
fix checksums of 2.2.0.1 and 2.2.1.1 (#24264)
|
var/spack/repos/builtin/packages/py-pyprecice/package.py
|
var/spack/repos/builtin/packages/py-pyprecice/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyprecice(PythonPackage):
"""
This package provides python language bindings for the
C++ library preCICE.
"""
homepage = "https://www.precice.org"
git = "https://github.com/precice/python-bindings.git"
url = "https://github.com/precice/python-bindings/archive/v2.0.0.1.tar.gz"
maintainers = ["ajaust", "BenjaminRodenberg", "IshaanDesai"]
# Always prefer final version of release candidate
version("develop", branch="develop")
version('2.2.1.1', sha256='481715c9e90660d3d97e63ee590a3b74a17564cd5cac3bfceceb59788fd11b14')
version('2.2.0.2', sha256='2287185f9ad7500dced53459543d27bb66bd2438c2e4bf81ee3317e6a00513d5')
version('2.2.0.1', sha256='032fa58193cfa69e3be37557977056e8f507d89b40c490a351d17271269b25ad')
version('2.1.1.2', sha256='363eb3eeccf964fd5ee87012c1032353dd1518662868f2b51f04a6d8a7154045')
version("2.1.1.1", sha256="972f574549344b6155a8dd415b6d82512e00fa154ca25ae7e36b68d4d2ed2cf4")
version("2.1.0.1", sha256="ac5cb7412c6b96b08a04fa86ea38e52d91ea739a3bd1c209baa93a8275e4e01a")
version("2.0.2.1", sha256="c6fca26332316de041f559aecbf23122a85d6348baa5d3252be4ddcd5e94c09a")
version("2.0.1.1", sha256="2791e7c7e2b04bc918f09f3dfca2d3371e6f8cbb7e57c82bd674703f4fa00be7")
version("2.0.0.2", sha256="5f055d809d65ec2e81f4d001812a250f50418de59990b47d6bcb12b88da5f5d7")
version("2.0.0.1", sha256="96eafdf421ec61ad6fcf0ab1d3cf210831a815272984c470b2aea57d4d0c9e0e")
# Older versions of the bindings checked versions via pip. This patch
# removes the pip dependency.
# See also https://github.com/spack/spack/pull/19558
patch("deactivate-version-check-via-pip.patch", when="@:2.1.1.1")
depends_on("precice@develop", when="@develop")
depends_on("precice@2.2.1", when="@2.2.1.1:2.2.1.99")
depends_on("precice@2.2.0", when="@2.2.0.1:2.2.0.99")
depends_on("precice@2.1.1", when="@2.1.1.1:2.1.1.99")
depends_on("precice@2.1.0", when="@2.1.0.1:2.1.0.99")
depends_on("precice@2.0.2", when="@2.0.2.1:2.0.2.99")
depends_on("precice@2.0.1", when="@2.0.1.1:2.0.1.99")
depends_on("precice@2.0.0", when="@2.0.0.1:2.0.0.99")
depends_on("python@3:", type=("build", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-numpy", type=("build", "run"))
depends_on("py-mpi4py", type=("build", "run"))
depends_on("py-cython@0.29:", type=("build"))
phases = ['install_lib', 'build_ext', 'install']
def build_ext_args(self, spec, prefix):
return [
"--include-dirs=" + spec["precice"].headers.directories[0],
"--library-dirs=" + spec["precice"].libs.directories[0]
]
def install(self, spec, prefix):
# Older versions of the bindings had a non-standard installation routine
# See also https://github.com/spack/spack/pull/19558#discussion_r513123239
if self.version <= Version("2.1.1.1"):
self.setup_py("install", "--prefix={0}".format(prefix))
|
Python
| 0.000002
|
@@ -724,72 +724,72 @@
56='
-481715c9e90660d3d97e63ee590a3b74a17564cd5cac3bfceceb59788fd11b14
+d96674f1ff91761c29efce34f8e09e2ec29a4862227b7204439e865dbe755a86
')%0A
@@ -920,72 +920,72 @@
56='
-032fa58193cfa69e3be37557977056e8f507d89b40c490a351d17271269b25ad
+229625e2e6df03987ababce5abe2021b0974cbe5a588b936a9cba653f4908d4b
')%0A
|
de0c0edafb3e3a128c8ad54e9841c7a188cba328
|
Format output (#85)
|
modules/src/time.py
|
modules/src/time.py
|
import requests
import config
import os
from templates.text import TextTemplate
from datetime import datetime
MAPQUEST_CONSUMER_KEY = os.environ.get('MAPQUEST_CONSUMER_KEY', config.MAPQUEST_CONSUMER_KEY)
TIME_ZONE_DB_API_KEY = os.environ.get('TIME_ZONE_DB_API_KEY', config.TIME_ZONE_DB_API_KEY)
def process(input, entities):
output = {}
try:
r = requests.get('http://open.mapquestapi.com/nominatim/v1/search.php?key=' + MAPQUEST_CONSUMER_KEY + '&format=json&q='+ entities['time_location'][0]['value'] + '&limit=1')
location_data = r.json()
r = requests.get('http://api.timezonedb.com/?lat='+ location_data[0]['lat'] + '&lng='+ location_data[0]['lon'] + '&format=json&key=' + TIME_ZONE_DB_API_KEY)
time_data = r.json()
time = datetime.utcfromtimestamp(time_data['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
output['input'] = input
output['output'] = TextTemplate('Location: ' + location_data[0]['display_name'] + '\nTime: ' + time + ' ' + time_data['abbreviation']).get_message()
output['success'] = True
except:
error_message = 'I couldn\'t get the time at the location you specified.'
error_message += '\nPlease ask me something else, like:'
error_message += '\n - time in new york'
error_message += '\n - india time'
error_message += '\n - time at paris'
output['error_msg'] = TextTemplate(error_message).get_message()
output['success'] = False
return output
|
Python
| 0.000877
|
@@ -833,15 +833,18 @@
e('%25
-Y-%25m-%25d
+a %25b %25d %25Y
%25H:
|
38ba5ddbadd02df7d3781526350eb5b060f702fb
|
remove one of the two softmask items in the doc
|
librosa/util/__init__.py
|
librosa/util/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities
=========
Array operations
----------------
.. autosummary::
:toctree: generated/
frame
pad_center
fix_length
fix_frames
index_to_slice
softmask
sync
softmask
axis_sort
normalize
roll_sparse
sparsify_rows
buf_to_float
tiny
Matching
--------
.. autosummary::
:toctree: generated/
match_intervals
match_events
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
localmax
peak_pick
Input validation
----------------
.. autosummary::
:toctree: generated/
valid_audio
valid_int
valid_intervals
File operations
---------------
.. autosummary::
:toctree: generated/
example_audio_file
find_files
"""
from .utils import * # pylint: disable=wildcard-import
from .files import * # pylint: disable=wildcard-import
from .matching import * # pylint: disable=wildcard-import
from .deprecation import * # pylint: disable=wildcard-import
from . import decorators
from . import exceptions
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
Python
| 0
|
@@ -236,29 +236,16 @@
sync
-%0A softmask
%0A%0A ax
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.